Files
mev-beta/pkg/arbitrage/detection_engine.go
2025-10-04 09:31:02 -05:00

765 lines
22 KiB
Go

package arbitrage
import (
"context"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/exchanges"
"github.com/fraktal/mev-beta/pkg/math"
"github.com/fraktal/mev-beta/pkg/types"
)
// ArbitrageDetectionEngine discovers profitable arbitrage opportunities in real-time
type ArbitrageDetectionEngine struct {
registry *exchanges.ExchangeRegistry
calculator *math.ArbitrageCalculator
gasEstimator math.GasEstimator
logger *logger.Logger
decimalConverter *math.DecimalConverter
// Configuration
config DetectionConfig
// State management
runningMutex sync.RWMutex
isRunning bool
stopChan chan struct{}
opportunityChan chan *types.ArbitrageOpportunity
// Performance tracking
scanCount uint64
opportunityCount uint64
lastScanTime time.Time
// Worker pools
scanWorkers *WorkerPool
pathWorkers *WorkerPool
}
// DetectionConfig configures the arbitrage detection engine
type DetectionConfig struct {
// Scanning parameters
ScanInterval time.Duration
MaxConcurrentScans int
MaxConcurrentPaths int
// Opportunity criteria
MinProfitThreshold *math.UniversalDecimal
MaxPriceImpact *math.UniversalDecimal
MaxHops int
// Token filtering
HighPriorityTokens []common.Address
TokenWhitelist []common.Address
TokenBlacklist []common.Address
// Exchange filtering
EnabledExchanges []math.ExchangeType
ExchangeWeights map[math.ExchangeType]float64
// Performance settings
CachePoolData bool
CacheTTL time.Duration
BatchSize int
// Risk management
MaxPositionSize *math.UniversalDecimal
RequiredConfidence float64
}
// WorkerPool manages concurrent workers for scanning
type WorkerPool struct {
workers int
taskChan chan ScanTask
wg sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
}
// ScanTask represents a scanning task
type ScanTask struct {
TokenPair exchanges.TokenPair
Exchanges []*exchanges.ExchangeConfig
InputAmount *math.UniversalDecimal
ResultChan chan ScanResult
}
// ScanResult contains the result of a scanning task
type ScanResult struct {
Opportunity *types.ArbitrageOpportunity
Error error
ScanTime time.Duration
}
// NewArbitrageDetectionEngine creates a new arbitrage detection engine
func NewArbitrageDetectionEngine(
registry *exchanges.ExchangeRegistry,
gasEstimator math.GasEstimator,
logger *logger.Logger,
config DetectionConfig,
) *ArbitrageDetectionEngine {
calculator := math.NewArbitrageCalculator(gasEstimator)
engine := &ArbitrageDetectionEngine{
registry: registry,
calculator: calculator,
gasEstimator: gasEstimator,
logger: logger,
decimalConverter: math.NewDecimalConverter(),
config: config,
isRunning: false,
stopChan: make(chan struct{}),
opportunityChan: make(chan *types.ArbitrageOpportunity, 1000), // Buffered channel
}
// Set default configuration if not provided
engine.setDefaultConfig()
return engine
}
// setDefaultConfig sets default configuration values
func (engine *ArbitrageDetectionEngine) setDefaultConfig() {
if engine.config.ScanInterval == 0 {
engine.config.ScanInterval = 1 * time.Second
}
if engine.config.MaxConcurrentScans == 0 {
engine.config.MaxConcurrentScans = 10
}
if engine.config.MaxConcurrentPaths == 0 {
engine.config.MaxConcurrentPaths = 50
}
if engine.config.MinProfitThreshold == nil {
engine.config.MinProfitThreshold, _ = engine.decimalConverter.FromString("0.01", 18, "ETH")
}
if engine.config.MaxPriceImpact == nil {
engine.config.MaxPriceImpact, _ = engine.decimalConverter.FromString("2", 4, "PERCENT")
}
if engine.config.MaxHops == 0 {
engine.config.MaxHops = 3
}
if engine.config.CacheTTL == 0 {
engine.config.CacheTTL = 30 * time.Second
}
if engine.config.BatchSize == 0 {
engine.config.BatchSize = 20
}
if engine.config.RequiredConfidence == 0 {
engine.config.RequiredConfidence = 0.7
}
if len(engine.config.EnabledExchanges) == 0 {
// Enable all exchanges by default
for _, exchangeConfig := range engine.registry.GetAllExchanges() {
engine.config.EnabledExchanges = append(engine.config.EnabledExchanges, exchangeConfig.Type)
}
}
}
// Start begins the arbitrage detection process
func (engine *ArbitrageDetectionEngine) Start(ctx context.Context) error {
engine.runningMutex.Lock()
defer engine.runningMutex.Unlock()
if engine.isRunning {
return fmt.Errorf("detection engine is already running")
}
engine.logger.Info("Starting arbitrage detection engine...")
engine.logger.Info(fmt.Sprintf("Configuration - Scan Interval: %v, Max Concurrent Scans: %d, Min Profit: %s ETH",
engine.config.ScanInterval,
engine.config.MaxConcurrentScans,
engine.decimalConverter.ToHumanReadable(engine.config.MinProfitThreshold)))
// Initialize worker pools
if err := engine.initializeWorkerPools(ctx); err != nil {
return fmt.Errorf("failed to initialize worker pools: %w", err)
}
engine.isRunning = true
// Start main detection loop
go engine.detectionLoop(ctx)
// Start opportunity processing
go engine.opportunityProcessor(ctx)
engine.logger.Info("Arbitrage detection engine started successfully")
return nil
}
// Stop halts the arbitrage detection process
func (engine *ArbitrageDetectionEngine) Stop() error {
engine.runningMutex.Lock()
defer engine.runningMutex.Unlock()
if !engine.isRunning {
return fmt.Errorf("detection engine is not running")
}
engine.logger.Info("Stopping arbitrage detection engine...")
// Signal stop
close(engine.stopChan)
// Stop worker pools
if engine.scanWorkers != nil {
engine.scanWorkers.Stop()
}
if engine.pathWorkers != nil {
engine.pathWorkers.Stop()
}
engine.isRunning = false
engine.logger.Info(fmt.Sprintf("Detection engine stopped. Total scans: %d, Opportunities found: %d",
engine.scanCount, engine.opportunityCount))
return nil
}
// initializeWorkerPools sets up worker pools for concurrent processing
func (engine *ArbitrageDetectionEngine) initializeWorkerPools(ctx context.Context) error {
// Initialize scan worker pool
engine.scanWorkers = NewWorkerPool(engine.config.MaxConcurrentScans, ctx)
engine.scanWorkers.Start(engine.processScanTask)
// Initialize path worker pool
engine.pathWorkers = NewWorkerPool(engine.config.MaxConcurrentPaths, ctx)
engine.pathWorkers.Start(engine.processPathTask)
return nil
}
// detectionLoop runs the main detection logic
func (engine *ArbitrageDetectionEngine) detectionLoop(ctx context.Context) {
ticker := time.NewTicker(engine.config.ScanInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
engine.logger.Info("Detection loop stopped due to context cancellation")
return
case <-engine.stopChan:
engine.logger.Info("Detection loop stopped")
return
case <-ticker.C:
engine.performScan(ctx)
}
}
}
// performScan executes a complete arbitrage scan
func (engine *ArbitrageDetectionEngine) performScan(ctx context.Context) {
scanStart := time.Now()
engine.scanCount++
engine.logger.Debug(fmt.Sprintf("Starting arbitrage scan #%d", engine.scanCount))
// Get token pairs to scan
tokenPairs := engine.getTokenPairsToScan()
// Get input amounts to test
inputAmounts := engine.getInputAmountsToTest()
// Create scan tasks
scanTasks := make([]ScanTask, 0)
for _, pair := range tokenPairs {
// Get exchanges that support this pair
supportingExchanges := engine.registry.GetExchangesForPair(
common.HexToAddress(pair.Token0.Address),
common.HexToAddress(pair.Token1.Address),
)
// Filter enabled exchanges
enabledExchanges := engine.filterEnabledExchanges(supportingExchanges)
if len(enabledExchanges) < 2 {
continue // Need at least 2 exchanges for arbitrage
}
for _, inputAmount := range inputAmounts {
task := ScanTask{
TokenPair: pair,
Exchanges: enabledExchanges,
InputAmount: inputAmount,
ResultChan: make(chan ScanResult, 1),
}
scanTasks = append(scanTasks, task)
}
}
engine.logger.Debug(fmt.Sprintf("Created %d scan tasks for %d token pairs", len(scanTasks), len(tokenPairs)))
// Process scan tasks in batches
engine.processScanTasksBatch(ctx, scanTasks)
scanDuration := time.Since(scanStart)
engine.lastScanTime = time.Now()
engine.logger.Debug(fmt.Sprintf("Completed arbitrage scan #%d in %v", engine.scanCount, scanDuration))
}
// getTokenPairsToScan returns token pairs to scan for arbitrage
func (engine *ArbitrageDetectionEngine) getTokenPairsToScan() []exchanges.TokenPair {
// Get high priority tokens first
highPriorityTokens := engine.registry.GetHighPriorityTokens(10)
// Create pairs from high priority tokens
pairs := make([]exchanges.TokenPair, 0)
for i, token0 := range highPriorityTokens {
for j, token1 := range highPriorityTokens {
if i >= j {
continue // Avoid duplicates and self-pairs
}
// Check if pair is supported
if engine.registry.IsPairSupported(
common.HexToAddress(token0.Address),
common.HexToAddress(token1.Address),
) {
pairs = append(pairs, exchanges.TokenPair{
Token0: token0,
Token1: token1,
})
}
}
}
return pairs
}
// getInputAmountsToTest returns different input amounts to test for arbitrage
func (engine *ArbitrageDetectionEngine) getInputAmountsToTest() []*math.UniversalDecimal {
amounts := make([]*math.UniversalDecimal, 0)
// Test different input amounts to find optimal arbitrage size
testAmounts := []string{"0.1", "0.5", "1", "2", "5", "10"}
for _, amountStr := range testAmounts {
if amount, err := engine.decimalConverter.FromString(amountStr, 18, "ETH"); err == nil {
amounts = append(amounts, amount)
}
}
return amounts
}
// filterEnabledExchanges filters exchanges based on configuration
func (engine *ArbitrageDetectionEngine) filterEnabledExchanges(exchangeConfigs []*exchanges.ExchangeConfig) []*exchanges.ExchangeConfig {
enabled := make([]*exchanges.ExchangeConfig, 0)
enabledMap := make(map[math.ExchangeType]bool)
for _, exchangeType := range engine.config.EnabledExchanges {
enabledMap[exchangeType] = true
}
for _, exchange := range exchangeConfigs {
if enabledMap[exchange.Type] {
enabled = append(enabled, exchange)
}
}
return enabled
}
// processScanTasksBatch processes scan tasks in batches for efficiency
func (engine *ArbitrageDetectionEngine) processScanTasksBatch(ctx context.Context, tasks []ScanTask) {
batchSize := engine.config.BatchSize
for i := 0; i < len(tasks); i += batchSize {
end := i + batchSize
if end > len(tasks) {
end = len(tasks)
}
batch := tasks[i:end]
engine.processScanBatch(ctx, batch)
// Small delay between batches to avoid overwhelming the system
select {
case <-ctx.Done():
return
case <-time.After(10 * time.Millisecond):
}
}
}
// processScanBatch processes a batch of scan tasks concurrently
func (engine *ArbitrageDetectionEngine) processScanBatch(ctx context.Context, batch []ScanTask) {
resultChans := make([]chan ScanResult, len(batch))
// Submit tasks to worker pool
for i, task := range batch {
resultChans[i] = task.ResultChan
select {
case engine.scanWorkers.taskChan <- task:
case <-ctx.Done():
return
}
}
// Collect results
for _, resultChan := range resultChans {
select {
case result := <-resultChan:
if result.Error != nil {
engine.logger.Debug(fmt.Sprintf("Scan task error: %v", result.Error))
continue
}
if result.Opportunity != nil && engine.calculator.IsOpportunityProfitable(result.Opportunity) {
engine.opportunityCount++
// Send opportunity to processing channel
select {
case engine.opportunityChan <- result.Opportunity:
engine.logger.Info(fmt.Sprintf("🎯 Found profitable arbitrage: %s profit, %0.1f%% confidence",
result.Opportunity.NetProfit.String(),
result.Opportunity.Confidence*100))
default:
engine.logger.Warn("Opportunity channel full, dropping opportunity")
}
}
case <-ctx.Done():
return
case <-time.After(5 * time.Second):
engine.logger.Warn("Scan task timed out")
}
}
}
// processScanTask processes a single scan task
func (engine *ArbitrageDetectionEngine) processScanTask(task ScanTask) {
start := time.Now()
// Find arbitrage paths between exchanges
paths := engine.findArbitragePaths(task.TokenPair, task.Exchanges)
var bestOpportunity *types.ArbitrageOpportunity
for _, path := range paths {
// Calculate arbitrage opportunity
opportunity, err := engine.calculator.CalculateArbitrageOpportunity(
path,
task.InputAmount,
math.TokenInfo{
Address: task.TokenPair.Token0.Address,
Symbol: task.TokenPair.Token0.Symbol,
Decimals: task.TokenPair.Token0.Decimals,
},
math.TokenInfo{
Address: task.TokenPair.Token1.Address,
Symbol: task.TokenPair.Token1.Symbol,
Decimals: task.TokenPair.Token1.Decimals,
},
)
if err != nil {
continue
}
// Check if this is the best opportunity so far
if bestOpportunity == nil || engine.isOpportunityBetter(opportunity, bestOpportunity) {
bestOpportunity = opportunity
}
}
result := ScanResult{
Opportunity: bestOpportunity,
ScanTime: time.Since(start),
}
task.ResultChan <- result
}
// findArbitragePaths finds possible arbitrage paths between exchanges
func (engine *ArbitrageDetectionEngine) findArbitragePaths(pair exchanges.TokenPair, exchangeConfigs []*exchanges.ExchangeConfig) [][]*math.PoolData {
paths := make([][]*math.PoolData, 0)
// For simplicity, we'll focus on 2-hop arbitrage (buy on exchange A, sell on exchange B)
// Production implementation would include multi-hop paths
token0Addr := common.HexToAddress(pair.Token0.Address)
token1Addr := common.HexToAddress(pair.Token1.Address)
for i, exchange1 := range exchangeConfigs {
for j, exchange2 := range exchangeConfigs {
if i == j {
continue // Same exchange
}
// Find pools on each exchange
pool1 := engine.findBestPool(exchange1, token0Addr, token1Addr)
pool2 := engine.findBestPool(exchange2, token1Addr, token0Addr) // Reverse direction
if pool1 != nil && pool2 != nil {
path := []*math.PoolData{pool1, pool2}
paths = append(paths, path)
}
}
}
return paths
}
// findBestPool finds the best pool for a token pair on an exchange
func (engine *ArbitrageDetectionEngine) findBestPool(exchange *exchanges.ExchangeConfig, token0, token1 common.Address) *math.PoolData {
// Get the pool detector and liquidity fetcher from the registry
poolDetector := engine.registry.GetPoolDetector(exchange.Type)
liquidityFetcher := engine.registry.GetLiquidityFetcher(exchange.Type)
if poolDetector == nil || liquidityFetcher == nil {
return nil
}
// Get pools for this pair
pools, err := poolDetector.GetAllPools(token0, token1)
if err != nil || len(pools) == 0 {
return nil
}
// For now, return data for the first pool
// Production implementation would compare liquidity and select the best
poolData, err := liquidityFetcher.GetPoolData(pools[0])
if err != nil {
return nil
}
return poolData
}
// isOpportunityBetter compares two opportunities and returns true if the first is better
func (engine *ArbitrageDetectionEngine) isOpportunityBetter(opp1, opp2 *types.ArbitrageOpportunity) bool {
// Compare net profit first
if opp1.NetProfit.Cmp(opp2.NetProfit) > 0 {
return true
} else if opp1.NetProfit.Cmp(opp2.NetProfit) < 0 {
return false
}
// If profits are equal, compare confidence
return opp1.Confidence > opp2.Confidence
}
// processPathTask processes a path finding task
func (engine *ArbitrageDetectionEngine) processPathTask(task ScanTask) {
// This would be used for more complex path finding algorithms
// For now, defer to the main scan task processing
engine.processScanTask(task)
}
// opportunityProcessor processes discovered opportunities
func (engine *ArbitrageDetectionEngine) opportunityProcessor(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case <-engine.stopChan:
return
case opportunity := <-engine.opportunityChan:
engine.processOpportunity(opportunity)
}
}
}
// processOpportunity processes a discovered arbitrage opportunity
func (engine *ArbitrageDetectionEngine) processOpportunity(opportunity *types.ArbitrageOpportunity) {
engine.logger.Info(fmt.Sprintf("Processing arbitrage opportunity: %s -> %s",
opportunity.TokenIn.Hex()[:8],
opportunity.TokenOut.Hex()[:8]))
// Log detailed opportunity information
engine.logger.Info(fmt.Sprintf(" Input Amount: %s",
opportunity.AmountIn.String()))
engine.logger.Info(fmt.Sprintf(" Input Token: %s",
opportunity.TokenIn.Hex()))
engine.logger.Info(fmt.Sprintf(" Net Profit: %s ETH",
opportunity.NetProfit.String()))
engine.logger.Info(fmt.Sprintf(" ROI: %.2f%%", opportunity.ROI))
engine.logger.Info(fmt.Sprintf(" Price Impact: %.2f%%", opportunity.PriceImpact))
engine.logger.Info(fmt.Sprintf(" Confidence: %.1f%%", opportunity.Confidence*100))
engine.logger.Info(fmt.Sprintf(" Risk Level: %.2f", opportunity.Risk))
engine.logger.Info(fmt.Sprintf(" Protocol: %s", opportunity.Protocol))
engine.logger.Info(fmt.Sprintf(" Path length: %d", len(opportunity.Path)))
// TODO: Send to execution engine for actual execution
// This would integrate with Component 4: Flash Swap Execution System
}
// GetOpportunityChannel returns the channel for receiving opportunities
func (engine *ArbitrageDetectionEngine) GetOpportunityChannel() <-chan *types.ArbitrageOpportunity {
return engine.opportunityChan
}
// GetStats returns detection engine statistics
func (engine *ArbitrageDetectionEngine) GetStats() DetectionStats {
engine.runningMutex.RLock()
defer engine.runningMutex.RUnlock()
return DetectionStats{
IsRunning: engine.isRunning,
TotalScans: engine.scanCount,
OpportunitiesFound: engine.opportunityCount,
LastScanTime: engine.lastScanTime,
ScanInterval: engine.config.ScanInterval,
ConfiguredExchanges: len(engine.config.EnabledExchanges),
}
}
// ScanOpportunities scans for arbitrage opportunities using the provided parameters
func (engine *ArbitrageDetectionEngine) ScanOpportunities(ctx context.Context, params []*DetectionParams) ([]*types.ArbitrageOpportunity, error) {
if !engine.isRunning {
return nil, fmt.Errorf("detection engine is not running, call Start() first")
}
var opportunities []*types.ArbitrageOpportunity
// Process each detection parameter
for _, param := range params {
// Create token info using simplified approach for now
// In production, this would query contract metadata
token0Info := exchanges.TokenInfo{
Address: param.TokenA.Hex(),
Symbol: param.TokenA.Hex()[:8], // Use first 8 chars of address as symbol
Name: "Unknown Token",
Decimals: 18, // Standard ERC-20 decimals
}
token1Info := exchanges.TokenInfo{
Address: param.TokenB.Hex(),
Symbol: param.TokenB.Hex()[:8], // Use first 8 chars of address as symbol
Name: "Unknown Token",
Decimals: 18, // Standard ERC-20 decimals
}
tokenPair := exchanges.TokenPair{
Token0: token0Info,
Token1: token1Info,
}
// Get exchange configurations for this token pair
exchangeConfigs := engine.registry.GetExchangesForPair(common.HexToAddress(tokenPair.Token0.Address), common.HexToAddress(tokenPair.Token1.Address))
if len(exchangeConfigs) < 2 {
continue // Need at least 2 exchanges for arbitrage
}
// Find all possible arbitrage paths between the tokens
paths := engine.findArbitragePaths(tokenPair, exchangeConfigs)
// Calculate profitability for each path
for _, path := range paths {
if len(path) == 0 {
continue
}
// Get token info for the first and last pools in the path
tokenA := path[0].Token0
tokenZ := path[len(path)-1].Token1
if path[len(path)-1].Token0.Address == tokenA.Address {
tokenZ = path[len(path)-1].Token0
}
// Test various input amounts to find the most profitable one
inputAmounts := engine.getInputAmountsToTest()
for _, inputAmount := range inputAmounts {
// Calculate arbitrage opportunity using the calculator
opportunity, err := engine.calculator.CalculateArbitrageOpportunity(path, inputAmount, tokenA, tokenZ)
if err != nil {
engine.logger.Debug(fmt.Sprintf("Failed to calculate opportunity for path: %v", err))
continue
}
// Apply filters based on the parameters
if opportunity.NetProfit.Cmp(param.MinProfit) < 0 {
continue // Below minimum profit threshold
}
// Check slippage threshold
if opportunity.PriceImpact > param.MaxSlippage {
continue // Above maximum slippage tolerance
}
// Add to opportunities if it passes all checks
opportunities = append(opportunities, opportunity)
// For now, break after finding one good opportunity per path
// to avoid too many similar results (can be made configurable)
break
}
}
}
return opportunities, nil
}
// DetectionStats contains statistics about the detection engine
type DetectionStats struct {
IsRunning bool
TotalScans uint64
OpportunitiesFound uint64
LastScanTime time.Time
ScanInterval time.Duration
ConfiguredExchanges int
}
// NewWorkerPool creates a new worker pool
func NewWorkerPool(workers int, ctx context.Context) *WorkerPool {
ctx, cancel := context.WithCancel(ctx)
return &WorkerPool{
workers: workers,
taskChan: make(chan ScanTask, workers*2), // Buffered channel
ctx: ctx,
cancel: cancel,
}
}
// Start starts the worker pool
func (wp *WorkerPool) Start(taskProcessor func(ScanTask)) {
for i := 0; i < wp.workers; i++ {
wp.wg.Add(1)
go func() {
defer wp.wg.Done()
for {
select {
case <-wp.ctx.Done():
return
case task := <-wp.taskChan:
taskProcessor(task)
}
}
}()
}
}
// Stop stops the worker pool
func (wp *WorkerPool) Stop() {
wp.cancel()
close(wp.taskChan)
wp.wg.Wait()
}