Files
mev-beta/pkg/scanner/concurrent.go

425 lines
13 KiB
Go

package scanner
import (
"fmt"
"math/big"
"sync"
"time"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/events"
"github.com/fraktal/mev-beta/pkg/uniswap"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"golang.org/x/sync/singleflight"
)
// MarketScanner scans markets for price movement opportunities with concurrency
type MarketScanner struct {
config *config.BotConfig
logger *logger.Logger
workerPool chan chan EventDetails
workers []*EventWorker
wg sync.WaitGroup
cacheGroup singleflight.Group
cache map[string]*CachedData
cacheMutex sync.RWMutex
cacheTTL time.Duration
}
// EventWorker represents a worker that processes event details
type EventWorker struct {
ID int
WorkerPool chan chan EventDetails
JobChannel chan EventDetails
QuitChan chan bool
scanner *MarketScanner
}
// NewMarketScanner creates a new market scanner with concurrency support
func NewMarketScanner(cfg *config.BotConfig, logger *logger.Logger) *MarketScanner {
scanner := &MarketScanner{
config: cfg,
logger: logger,
workerPool: make(chan chan EventDetails, cfg.MaxWorkers),
workers: make([]*EventWorker, 0, cfg.MaxWorkers),
cache: make(map[string]*CachedData),
cacheTTL: time.Duration(cfg.RPCTimeout) * time.Second,
}
// Create workers
for i := 0; i < cfg.MaxWorkers; i++ {
worker := NewEventWorker(i, scanner.workerPool, scanner)
scanner.workers = append(scanner.workers, worker)
worker.Start()
}
// Start cache cleanup routine
go scanner.cleanupCache()
return scanner
}
// NewEventWorker creates a new event worker
func NewEventWorker(id int, workerPool chan chan EventDetails, scanner *MarketScanner) *EventWorker {
return &EventWorker{
ID: id,
WorkerPool: workerPool,
JobChannel: make(chan EventDetails),
QuitChan: make(chan bool),
scanner: scanner,
}
}
// Start begins the worker
func (w *EventWorker) Start() {
go func() {
for {
// Register the worker in the worker pool
w.WorkerPool <- w.JobChannel
select {
case job := <-w.JobChannel:
// Process the job
w.Process(job)
case <-w.QuitChan:
// Stop the worker
return
}
}
}()
}
// Stop terminates the worker
func (w *EventWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
// Process handles an event detail
func (w *EventWorker) Process(event EventDetails) {
// Analyze the event in a separate goroutine to maintain throughput
go func() {
defer w.scanner.wg.Done()
// Log the processing
w.scanner.logger.Debug(fmt.Sprintf("Worker %d processing %s event in pool %s from protocol %s",
w.ID, event.Type.String(), event.PoolAddress, event.Protocol))
// Analyze based on event type
switch event.Type {
case events.Swap:
w.scanner.analyzeSwapEvent(event)
case events.AddLiquidity:
w.scanner.analyzeLiquidityEvent(event, true)
case events.RemoveLiquidity:
w.scanner.analyzeLiquidityEvent(event, false)
case events.NewPool:
w.scanner.analyzeNewPoolEvent(event)
default:
w.scanner.logger.Debug(fmt.Sprintf("Worker %d received unknown event type: %d", w.ID, event.Type))
}
}()
}
// SubmitEvent submits an event for processing by the worker pool
func (s *MarketScanner) SubmitEvent(event EventDetails) {
s.wg.Add(1)
// Get an available worker job channel
jobChannel := <-s.workerPool
// Send the job to the worker
jobChannel <- event
}
// analyzeSwapEvent analyzes a swap event for arbitrage opportunities
func (s *MarketScanner) analyzeSwapEvent(event EventDetails) {
s.logger.Debug(fmt.Sprintf("Analyzing swap event in pool %s", event.PoolAddress))
// Get pool data with caching
poolData, err := s.getPoolData(event.PoolAddress)
if err != nil {
s.logger.Error(fmt.Sprintf("Error getting pool data for %s: %v", event.PoolAddress, err))
return
}
// Calculate price impact
priceMovement, err := s.calculatePriceMovement(event, poolData)
if err != nil {
s.logger.Error(fmt.Sprintf("Error calculating price movement for pool %s: %v", event.PoolAddress, err))
return
}
// Check if the movement is significant
if s.isSignificantMovement(priceMovement, s.config.MinProfitThreshold) {
s.logger.Info(fmt.Sprintf("Significant price movement detected in pool %s: %+v", event.PoolAddress, priceMovement))
// Look for arbitrage opportunities
opportunities := s.findArbitrageOpportunities(event, priceMovement)
if len(opportunities) > 0 {
s.logger.Info(fmt.Sprintf("Found %d arbitrage opportunities for pool %s", len(opportunities), event.PoolAddress))
for _, opp := range opportunities {
s.logger.Info(fmt.Sprintf("Arbitrage opportunity: %+v", opp))
}
}
} else {
s.logger.Debug(fmt.Sprintf("Price movement in pool %s is not significant: %f", event.PoolAddress, priceMovement.PriceImpact))
}
}
// analyzeLiquidityEvent analyzes liquidity events (add/remove)
func (s *MarketScanner) analyzeLiquidityEvent(event EventDetails, isAdd bool) {
action := "adding"
if !isAdd {
action = "removing"
}
s.logger.Debug(fmt.Sprintf("Analyzing liquidity event (%s) in pool %s", action, event.PoolAddress))
// Update cached pool data
s.updatePoolData(event)
s.logger.Info(fmt.Sprintf("Liquidity %s event processed for pool %s", action, event.PoolAddress))
}
// analyzeNewPoolEvent analyzes new pool creation events
func (s *MarketScanner) analyzeNewPoolEvent(event EventDetails) {
s.logger.Info(fmt.Sprintf("New pool created: %s (protocol: %s)", event.PoolAddress, event.Protocol))
// Add to known pools
// In a real implementation, you would want to fetch and cache the pool data
s.logger.Debug(fmt.Sprintf("Added new pool %s to monitoring", event.PoolAddress))
}
// calculatePriceMovement calculates the price movement from a swap event
func (s *MarketScanner) calculatePriceMovement(event EventDetails, poolData *CachedData) (*PriceMovement, error) {
// Calculate the price before the swap
priceBefore := uniswap.SqrtPriceX96ToPrice(poolData.SqrtPriceX96.ToBig())
priceMovement := &PriceMovement{
Token0: event.Token0,
Token1: event.Token1,
Pool: event.PoolAddress,
Protocol: event.Protocol,
AmountIn: new(big.Int).Add(event.Amount0In, event.Amount1In),
AmountOut: new(big.Int).Add(event.Amount0Out, event.Amount1Out),
PriceBefore: priceBefore,
TickBefore: event.Tick,
Timestamp: event.Timestamp,
}
// Calculate price impact (simplified)
// In practice, this would involve more complex calculations using Uniswap V3 math
if priceMovement.AmountIn.Cmp(big.NewInt(0)) > 0 {
impact := new(big.Float).Quo(
new(big.Float).SetInt(priceMovement.AmountOut),
new(big.Float).SetInt(priceMovement.AmountIn),
)
priceImpact, _ := impact.Float64()
priceMovement.PriceImpact = priceImpact
}
return priceMovement, nil
}
// isSignificantMovement determines if a price movement is significant enough to exploit
func (s *MarketScanner) isSignificantMovement(movement *PriceMovement, threshold float64) bool {
// Check if the price impact is above our threshold
return movement.PriceImpact > threshold
}
// findArbitrageOpportunities looks for arbitrage opportunities based on price movements
func (s *MarketScanner) findArbitrageOpportunities(event EventDetails, movement *PriceMovement) []ArbitrageOpportunity {
s.logger.Debug(fmt.Sprintf("Searching for arbitrage opportunities for pool %s", event.PoolAddress))
opportunities := make([]ArbitrageOpportunity, 0)
// This would contain logic to:
// 1. Compare prices across different pools for the same token pair
// 2. Calculate potential profit after gas costs
// 3. Identify triangular arbitrage opportunities
// 4. Check if the opportunity is profitable
// For now, we'll return a mock opportunity for demonstration
opp := ArbitrageOpportunity{
Path: []string{event.Token0, event.Token1},
Pools: []string{event.PoolAddress, "0xMockPoolAddress"},
Profit: big.NewInt(1000000000000000000), // 1 ETH
GasEstimate: big.NewInt(200000000000000000), // 0.2 ETH
ROI: 5.0, // 500%
Protocol: event.Protocol,
}
opportunities = append(opportunities, opp)
return opportunities
}
// Stop stops the market scanner and all workers
func (s *MarketScanner) Stop() {
// Stop all workers
for _, worker := range s.workers {
worker.Stop()
}
// Wait for all jobs to complete
s.wg.Wait()
}
// ArbitrageOpportunity represents a potential arbitrage opportunity
type ArbitrageOpportunity struct {
Path []string // Token path for the arbitrage
Pools []string // Pools involved in the arbitrage
Profit *big.Int // Estimated profit in wei
GasEstimate *big.Int // Estimated gas cost
ROI float64 // Return on investment percentage
Protocol string // DEX protocol
}
// PriceMovement represents a potential price movement
type PriceMovement struct {
Token0 string // Token address
Token1 string // Token address
Pool string // Pool address
Protocol string // DEX protocol
AmountIn *big.Int // Amount of token being swapped in
AmountOut *big.Int // Amount of token being swapped out
PriceBefore *big.Float // Price before the swap
PriceAfter *big.Float // Price after the swap (to be calculated)
PriceImpact float64 // Calculated price impact
TickBefore int // Tick before the swap
TickAfter int // Tick after the swap (to be calculated)
Timestamp time.Time // Event timestamp
}
// EventDetails contains details about a detected event
type EventDetails struct {
Type events.EventType
Protocol string
PoolAddress string
Token0 string
Token1 string
Amount0In *big.Int
Amount0Out *big.Int
Amount1In *big.Int
Amount1Out *big.Int
SqrtPriceX96 *uint256.Int
Liquidity *uint256.Int
Tick int
Timestamp time.Time
TransactionHash common.Hash
}
// CachedData represents cached pool data
type CachedData struct {
Address common.Address
Token0 common.Address
Token1 common.Address
Fee int64
Liquidity *uint256.Int
SqrtPriceX96 *uint256.Int
Tick int
TickSpacing int
LastUpdated time.Time
}
// getPoolData retrieves pool data with caching
func (s *MarketScanner) getPoolData(poolAddress string) (*CachedData, error) {
// Check cache first
cacheKey := fmt.Sprintf("pool_%s", poolAddress)
s.cacheMutex.RLock()
if data, exists := s.cache[cacheKey]; exists && time.Since(data.LastUpdated) < s.cacheTTL {
s.cacheMutex.RUnlock()
s.logger.Debug(fmt.Sprintf("Cache hit for pool %s", poolAddress))
return data, nil
}
s.cacheMutex.RUnlock()
// Use singleflight to prevent duplicate requests
result, err, _ := s.cacheGroup.Do(cacheKey, func() (interface{}, error) {
return s.fetchPoolData(poolAddress)
})
if err != nil {
return nil, err
}
poolData := result.(*CachedData)
// Update cache
s.cacheMutex.Lock()
s.cache[cacheKey] = poolData
s.cacheMutex.Unlock()
s.logger.Debug(fmt.Sprintf("Fetched and cached pool data for %s", poolAddress))
return poolData, nil
}
// fetchPoolData fetches pool data from the blockchain
func (s *MarketScanner) fetchPoolData(poolAddress string) (*CachedData, error) {
s.logger.Debug(fmt.Sprintf("Fetching pool data for %s", poolAddress))
// This is a simplified implementation
// In practice, you would interact with the Ethereum blockchain to get real data
address := common.HexToAddress(poolAddress)
// For now, we'll return mock data
pool := &CachedData{
Address: address,
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"), // USDC
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"), // WETH
Fee: 3000, // 0.3%
Liquidity: uint256.NewInt(1000000000000000000), // 1 ETH equivalent
SqrtPriceX96: uint256.NewInt(2505414483750470000), // Mock sqrt price
Tick: 200000, // Mock tick
TickSpacing: 60, // Tick spacing for 0.3% fee
LastUpdated: time.Now(),
}
s.logger.Debug(fmt.Sprintf("Fetched pool data for %s", poolAddress))
return pool, nil
}
// updatePoolData updates cached pool data
func (s *MarketScanner) updatePoolData(event EventDetails) {
cacheKey := fmt.Sprintf("pool_%s", event.PoolAddress)
s.cacheMutex.Lock()
defer s.cacheMutex.Unlock()
// Update existing cache entry or create new one
data := &CachedData{
Address: common.HexToAddress(event.PoolAddress),
Token0: common.HexToAddress(event.Token0),
Token1: common.HexToAddress(event.Token1),
Liquidity: event.Liquidity,
SqrtPriceX96: event.SqrtPriceX96,
Tick: event.Tick,
LastUpdated: time.Now(),
}
s.cache[cacheKey] = data
s.logger.Debug(fmt.Sprintf("Updated cache for pool %s", event.PoolAddress))
}
// cleanupCache removes expired cache entries
func (s *MarketScanner) cleanupCache() {
ticker := time.NewTicker(10 * time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.cacheMutex.Lock()
for key, data := range s.cache {
if time.Since(data.LastUpdated) > s.cacheTTL {
delete(s.cache, key)
s.logger.Debug(fmt.Sprintf("Removed expired cache entry: %s", key))
}
}
s.cacheMutex.Unlock()
}
}
}