feat: create v2-prep branch with comprehensive planning

Restructured project for V2 refactor:

**Structure Changes:**
- Moved all V1 code to orig/ folder (preserved with git mv)
- Created docs/planning/ directory
- Added orig/README_V1.md explaining V1 preservation

**Planning Documents:**
- 00_V2_MASTER_PLAN.md: Complete architecture overview
  - Executive summary of critical V1 issues
  - High-level component architecture diagrams
  - 5-phase implementation roadmap
  - Success metrics and risk mitigation

- 07_TASK_BREAKDOWN.md: Atomic task breakdown
  - 99+ hours of detailed tasks
  - Every task < 2 hours (atomic)
  - Clear dependencies and success criteria
  - Organized by implementation phase

**V2 Key Improvements:**
- Per-exchange parsers (factory pattern)
- Multi-layer strict validation
- Multi-index pool cache
- Background validation pipeline
- Comprehensive observability

**Critical Issues Addressed:**
- Zero address tokens (strict validation + cache enrichment)
- Parsing accuracy (protocol-specific parsers)
- No audit trail (background validation channel)
- Inefficient lookups (multi-index cache)
- Stats disconnection (event-driven metrics)

Next Steps:
1. Review planning documents
2. Begin Phase 1: Foundation (P1-001 through P1-010)
3. Implement parsers in Phase 2
4. Build cache system in Phase 3
5. Add validation pipeline in Phase 4
6. Migrate and test in Phase 5

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Administrator
2025-11-10 10:14:26 +01:00
parent 1773daffe7
commit 803de231ba
411 changed files with 20390 additions and 8680 deletions

View File

@@ -0,0 +1,177 @@
package analysis
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/events"
"github.com/fraktal/mev-beta/pkg/marketdata"
scannercommon "github.com/fraktal/mev-beta/pkg/scanner/common"
"github.com/fraktal/mev-beta/pkg/scanner/market"
)
// LiquidityAnalyzer handles analysis of liquidity events
type LiquidityAnalyzer struct {
logger *logger.Logger
marketDataLogger *marketdata.MarketDataLogger
}
// NewLiquidityAnalyzer creates a new liquidity analyzer
func NewLiquidityAnalyzer(logger *logger.Logger, marketDataLogger *marketdata.MarketDataLogger) *LiquidityAnalyzer {
return &LiquidityAnalyzer{
logger: logger,
marketDataLogger: marketDataLogger,
}
}
// AnalyzeLiquidityEvent analyzes liquidity events (add/remove)
func (l *LiquidityAnalyzer) AnalyzeLiquidityEvent(event events.Event, marketScanner *market.MarketScanner, isAdd bool) {
action := "adding"
eventType := "mint"
if !isAdd {
action = "removing"
eventType = "burn"
}
l.logger.Debug(fmt.Sprintf("Analyzing liquidity event (%s) in pool %s", action, event.PoolAddress))
// Get comprehensive pool data to determine factory
poolInfo, poolExists := l.marketDataLogger.GetPoolInfo(event.PoolAddress)
factory := common.Address{}
if poolExists {
factory = poolInfo.Factory
} else {
// Determine factory from known DEX protocols
factory = marketScanner.GetFactoryForProtocol(event.Protocol)
}
// Create comprehensive liquidity event data for market data logger
liquidityData := &marketdata.LiquidityEventData{
TxHash: event.TransactionHash,
BlockNumber: event.BlockNumber,
LogIndex: uint(0), // Default log index (would need to be extracted from receipt)
Timestamp: time.Now(),
EventType: eventType,
PoolAddress: event.PoolAddress,
Factory: factory,
Protocol: event.Protocol,
Token0: event.Token0,
Token1: event.Token1,
Amount0: event.Amount0,
Amount1: event.Amount1,
Liquidity: event.Liquidity,
Owner: common.Address{}, // Default owner (would need to be extracted from transaction)
Recipient: common.Address{}, // Default recipient (would need to be extracted from transaction)
}
// Calculate USD values for liquidity amounts
liquidityData.Amount0USD, liquidityData.Amount1USD, liquidityData.TotalUSD = l.calculateLiquidityUSDValues(liquidityData)
// Log comprehensive liquidity event to market data logger
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
if err := l.marketDataLogger.LogLiquidityEvent(ctx, event, liquidityData); err != nil {
l.logger.Debug(fmt.Sprintf("Failed to log liquidity event to market data logger: %v", err))
}
// Log the liquidity event to database (legacy)
marketScanner.LogLiquidityEvent(event, eventType)
// Update cached pool data
marketScanner.UpdatePoolData(event)
l.logger.Info(fmt.Sprintf("Liquidity %s event processed for pool %s", action, event.PoolAddress))
}
// AnalyzeNewPoolEvent analyzes new pool creation events
func (l *LiquidityAnalyzer) AnalyzeNewPoolEvent(event events.Event, marketScanner *market.MarketScanner) {
l.logger.Info(fmt.Sprintf("New pool created: %s (protocol: %s)", event.PoolAddress, event.Protocol))
// Add to known pools by fetching and caching the pool data
l.logger.Debug(fmt.Sprintf("Adding new pool %s to monitoring", event.PoolAddress))
// Fetch pool data to validate it's a real pool
poolData, err := marketScanner.GetPoolData(event.PoolAddress.Hex())
if err != nil {
l.logger.Error(fmt.Sprintf("Failed to fetch data for new pool %s: %v", event.PoolAddress, err))
return
}
// Validate that this is a real pool contract
if poolData.Address == (common.Address{}) {
l.logger.Warn(fmt.Sprintf("Invalid pool contract at address %s", event.PoolAddress.Hex()))
return
}
// Log pool data to database
marketScanner.LogPoolData(poolData)
l.logger.Info(fmt.Sprintf("Successfully added new pool %s to monitoring (tokens: %s-%s, fee: %d)",
event.PoolAddress.Hex(), poolData.Token0.Hex(), poolData.Token1.Hex(), poolData.Fee))
}
// calculateLiquidityUSDValues calculates USD values for liquidity event amounts
func (l *LiquidityAnalyzer) calculateLiquidityUSDValues(liquidityData *marketdata.LiquidityEventData) (amount0USD, amount1USD, totalUSD float64) {
// Get token prices in USD (using a simplified approach)
token0Price := l.getTokenPriceUSD(liquidityData.Token0)
token1Price := l.getTokenPriceUSD(liquidityData.Token1)
// Calculate decimals for proper conversion
token0Decimals := l.getTokenDecimals(liquidityData.Token0)
token1Decimals := l.getTokenDecimals(liquidityData.Token1)
// Calculate amount0 USD
if liquidityData.Amount0 != nil {
amount0Float := l.bigIntToFloat(liquidityData.Amount0, token0Decimals)
amount0USD = amount0Float * token0Price
}
// Calculate amount1 USD
if liquidityData.Amount1 != nil {
amount1Float := l.bigIntToFloat(liquidityData.Amount1, token1Decimals)
amount1USD = amount1Float * token1Price
}
// Total USD value
totalUSD = amount0USD + amount1USD
return amount0USD, amount1USD, totalUSD
}
// getTokenPriceUSD gets the USD price of a token using various price sources
func (l *LiquidityAnalyzer) getTokenPriceUSD(tokenAddr common.Address) float64 {
if price, exists := scannercommon.GetTokenPriceUSD(tokenAddr); exists {
return price
}
// For unknown tokens, return 0 (in production, would query price oracle or DEX)
return 0.0
}
// getTokenDecimals returns the decimal places for a token
func (l *LiquidityAnalyzer) getTokenDecimals(tokenAddr common.Address) uint8 {
if decimals, exists := scannercommon.GetTokenDecimals(tokenAddr); exists {
return decimals
}
// Default to 18 for unknown tokens
return 18
}
// bigIntToFloat converts a big.Int amount to float64 accounting for token decimals
func (l *LiquidityAnalyzer) bigIntToFloat(amount *big.Int, decimals uint8) float64 {
if amount == nil {
return 0.0
}
divisor := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(decimals)), nil)
amountFloat := new(big.Float).SetInt(amount)
divisorFloat := new(big.Float).SetInt(divisor)
result := new(big.Float).Quo(amountFloat, divisorFloat)
resultFloat, _ := result.Float64()
return resultFloat
}

View File

@@ -0,0 +1,44 @@
// Package common provides common data and functions for the scanner.
package common
import (
"github.com/ethereum/go-ethereum/common"
)
var knownPrices = map[common.Address]float64{
common.HexToAddress("0x82af49447d8a07e3bd95bd0d56f35241523fbab1"): 2000.0, // WETH
common.HexToAddress("0xaf88d065e77c8cc2239327c5edb3a432268e5831"): 1.0, // USDC
common.HexToAddress("0xff970a61a04b1ca14834a43f5de4533ebddb5cc8"): 1.0, // USDC.e
common.HexToAddress("0xfd086bc7cd5c481dcc9c85ebe478a1c0b69fcbb9"): 1.0, // USDT
common.HexToAddress("0x2f2a2543b76a4166549f7aab2e75bef0aefc5b0f"): 43000.0, // WBTC
common.HexToAddress("0x912ce59144191c1204e64559fe8253a0e49e6548"): 0.75, // ARB
common.HexToAddress("0xfc5a1a6eb076a2c7ad06ed22c90d7e710e35ad0a"): 45.0, // GMX
common.HexToAddress("0xf97f4df75117a78c1a5a0dbb814af92458539fb4"): 12.0, // LINK
common.HexToAddress("0xfa7f8980b0f1e64a2062791cc3b0871572f1f7f0"): 8.0, // UNI
common.HexToAddress("0xba5ddd1f9d7f570dc94a51479a000e3bce967196"): 85.0, // AAVE
}
var knownDecimals = map[common.Address]uint8{
common.HexToAddress("0x82af49447d8a07e3bd95bd0d56f35241523fbab1"): 18, // WETH
common.HexToAddress("0xaf88d065e77c8cc2239327c5edb3a432268e5831"): 6, // USDC
common.HexToAddress("0xff970a61a04b1ca14834a43f5de4533ebddb5cc8"): 6, // USDC.e
common.HexToAddress("0xfd086bc7cd5c481dcc9c85ebe478a1c0b69fcbb9"): 6, // USDT
common.HexToAddress("0x2f2a2543b76a4166549f7aab2e75bef0aefc5b0f"): 8, // WBTC
common.HexToAddress("0x912ce59144191c1204e64559fe8253a0e49e6548"): 18, // ARB
common.HexToAddress("0xfc5a1a6eb076a2c7ad06ed22c90d7e710e35ad0a"): 18, // GMX
common.HexToAddress("0xf97f4df75117a78c1a5a0dbb814af92458539fb4"): 18, // LINK
common.HexToAddress("0xfa7f8980b0f1e64a2062791cc3b0871572f1f7f0"): 18, // UNI
common.HexToAddress("0xba5ddd1f9d7f570dc94a51479a000e3bce967196"): 18, // AAVE
}
func GetTokenPriceUSD(tokenAddr common.Address) (float64, bool) {
price, exists := knownPrices[tokenAddr]
return price, exists
}
func GetTokenDecimals(tokenAddr common.Address) (uint8, bool) {
decimals, exists := knownDecimals[tokenAddr]
return decimals, exists
}

View File

@@ -0,0 +1,317 @@
package scanner
import (
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/cache"
"github.com/fraktal/mev-beta/pkg/contracts"
"github.com/fraktal/mev-beta/pkg/database"
"github.com/fraktal/mev-beta/pkg/events"
"github.com/fraktal/mev-beta/pkg/marketdata"
"github.com/fraktal/mev-beta/pkg/profitcalc"
"github.com/fraktal/mev-beta/pkg/scanner/analysis"
"github.com/fraktal/mev-beta/pkg/scanner/market"
"github.com/fraktal/mev-beta/pkg/scanner/swap"
)
// Scanner is the main market scanner that handles event processing
type Scanner struct {
marketScanner *market.MarketScanner
swapAnalyzer *swap.SwapAnalyzer
liquidityAnalyzer *analysis.LiquidityAnalyzer
config *config.BotConfig
logger *logger.Logger
workerPool chan chan events.Event
workers []*EventWorker
wg sync.WaitGroup
parsingMonitor *ParsingMonitor // Parsing performance monitor
reserveCache *cache.ReserveCache // ADDED: Reserve cache for event-driven invalidation
}
// EventWorker represents a worker that processes event details
type EventWorker struct {
ID int
WorkerPool chan chan events.Event
JobChannel chan events.Event
QuitChan chan bool
scanner *Scanner
}
// NewScanner creates a new market scanner with concurrency support
func NewScanner(cfg *config.BotConfig, logger *logger.Logger, contractExecutor *contracts.ContractExecutor, db *database.Database, reserveCache *cache.ReserveCache) *Scanner {
scanner := &Scanner{
config: cfg,
logger: logger,
workerPool: make(chan chan events.Event, cfg.MaxWorkers),
workers: make([]*EventWorker, 0, cfg.MaxWorkers),
reserveCache: reserveCache, // ADDED: Store reserve cache for event-driven invalidation
}
// Initialize the market scanner
marketScanner := market.NewMarketScanner(cfg, logger, contractExecutor, db)
scanner.marketScanner = marketScanner
// Initialize the swap analyzer
swapAnalyzer := swap.NewSwapAnalyzer(
logger,
marketScanner.GetMarketDataLogger(),
marketScanner.GetProfitCalculator(),
marketScanner.GetOpportunityRanker(),
)
scanner.swapAnalyzer = swapAnalyzer
// Initialize the liquidity analyzer
liquidityAnalyzer := analysis.NewLiquidityAnalyzer(
logger,
marketScanner.GetMarketDataLogger(),
)
scanner.liquidityAnalyzer = liquidityAnalyzer
// Initialize parsing monitor
parsingMonitor := NewParsingMonitor(logger, nil)
scanner.parsingMonitor = parsingMonitor
// Create workers
for i := 0; i < cfg.MaxWorkers; i++ {
worker := NewEventWorker(i, scanner.workerPool, scanner)
scanner.workers = append(scanner.workers, worker)
worker.Start()
}
return scanner
}
// NewEventWorker creates a new event worker
func NewEventWorker(id int, workerPool chan chan events.Event, scanner *Scanner) *EventWorker {
return &EventWorker{
ID: id,
WorkerPool: workerPool,
JobChannel: make(chan events.Event),
QuitChan: make(chan bool),
scanner: scanner,
}
}
// Start begins the worker
func (w *EventWorker) Start() {
go func() {
for {
// Register the worker in the worker pool
w.WorkerPool <- w.JobChannel
select {
case job := <-w.JobChannel:
// Process the job
w.Process(job)
case <-w.QuitChan:
// Stop the worker
return
}
}
}()
}
// Stop terminates the worker
func (w *EventWorker) Stop() {
go func() {
w.QuitChan <- true
}()
}
// Process handles an event detail
func (w *EventWorker) Process(event events.Event) {
// RACE CONDITION FIX: Process synchronously in the worker goroutine
// instead of spawning another nested goroutine to avoid WaitGroup race
defer w.scanner.wg.Done()
// Log the processing
w.scanner.logger.Debug(fmt.Sprintf("Worker %d processing %s event in pool %s from protocol %s",
w.ID, event.Type.String(), event.PoolAddress, event.Protocol))
// EVENT-DRIVEN CACHE INVALIDATION
// Invalidate reserve cache when pool state changes (Swap, AddLiquidity, RemoveLiquidity)
// This ensures profit calculations always use fresh reserve data
if w.scanner.reserveCache != nil {
switch event.Type {
case events.Swap, events.AddLiquidity, events.RemoveLiquidity:
// Pool state changed - invalidate cached reserves for this pool
w.scanner.reserveCache.Invalidate(event.PoolAddress)
w.scanner.logger.Debug(fmt.Sprintf("Cache invalidated for pool %s due to %s event",
event.PoolAddress.Hex(), event.Type.String()))
}
}
// Analyze based on event type
switch event.Type {
case events.Swap:
w.scanner.swapAnalyzer.AnalyzeSwapEvent(event, w.scanner.marketScanner)
case events.AddLiquidity:
w.scanner.liquidityAnalyzer.AnalyzeLiquidityEvent(event, w.scanner.marketScanner, true)
case events.RemoveLiquidity:
w.scanner.liquidityAnalyzer.AnalyzeLiquidityEvent(event, w.scanner.marketScanner, false)
case events.NewPool:
w.scanner.liquidityAnalyzer.AnalyzeNewPoolEvent(event, w.scanner.marketScanner)
default:
w.scanner.logger.Debug(fmt.Sprintf("Worker %d received unknown event type: %d", w.ID, event.Type))
}
}
// SubmitEvent submits an event for processing by the worker pool
func (s *Scanner) SubmitEvent(event events.Event) {
startTime := time.Now()
// CRITICAL FIX: Validate pool address before submission
if event.PoolAddress == (common.Address{}) {
s.logger.Warn(fmt.Sprintf("REJECTED: Event with zero PoolAddress rejected - TxHash: %s, Protocol: %s, Type: %v, Token0: %s, Token1: %s",
event.TransactionHash.Hex(), event.Protocol, event.Type, event.Token0.Hex(), event.Token1.Hex()))
// Record parsing failure
s.parsingMonitor.RecordParsingEvent(ParsingEvent{
TransactionHash: event.TransactionHash,
Protocol: event.Protocol,
Success: false,
RejectionReason: "zero_address",
PoolAddress: event.PoolAddress,
Token0: event.Token0,
Token1: event.Token1,
ParseTimeMs: float64(time.Since(startTime).Nanoseconds()) / 1000000,
Timestamp: time.Now(),
})
return // Reject events with zero pool addresses
}
// Additional validation: Pool address should not match token addresses
if event.PoolAddress == event.Token0 || event.PoolAddress == event.Token1 {
s.logger.Warn(fmt.Sprintf("REJECTED: Event with pool address matching token address - TxHash: %s, Pool: %s, Token0: %s, Token1: %s",
event.TransactionHash.Hex(), event.PoolAddress.Hex(), event.Token0.Hex(), event.Token1.Hex()))
// Record parsing failure
s.parsingMonitor.RecordParsingEvent(ParsingEvent{
TransactionHash: event.TransactionHash,
Protocol: event.Protocol,
Success: false,
RejectionReason: "duplicate_address",
PoolAddress: event.PoolAddress,
Token0: event.Token0,
Token1: event.Token1,
ParseTimeMs: float64(time.Since(startTime).Nanoseconds()) / 1000000,
Timestamp: time.Now(),
})
return // Reject events where pool address matches token addresses
}
// Additional validation: Check for suspicious zero-padded addresses
poolHex := event.PoolAddress.Hex()
if len(poolHex) == 42 && poolHex[:20] == "0x000000000000000000" {
s.logger.Warn(fmt.Sprintf("REJECTED: Event with suspicious zero-padded pool address - TxHash: %s, Pool: %s",
event.TransactionHash.Hex(), poolHex))
// Record parsing failure
s.parsingMonitor.RecordParsingEvent(ParsingEvent{
TransactionHash: event.TransactionHash,
Protocol: event.Protocol,
Success: false,
RejectionReason: "suspicious_address",
PoolAddress: event.PoolAddress,
Token0: event.Token0,
Token1: event.Token1,
ParseTimeMs: float64(time.Since(startTime).Nanoseconds()) / 1000000,
Timestamp: time.Now(),
})
return // Reject events with zero-padded addresses
}
// Record successful parsing
s.parsingMonitor.RecordParsingEvent(ParsingEvent{
TransactionHash: event.TransactionHash,
Protocol: event.Protocol,
Success: true,
PoolAddress: event.PoolAddress,
Token0: event.Token0,
Token1: event.Token1,
ParseTimeMs: float64(time.Since(startTime).Nanoseconds()) / 1000000,
Timestamp: time.Now(),
})
s.wg.Add(1)
// Get an available worker job channel
jobChannel := <-s.workerPool
// Send the job to the worker
jobChannel <- event
}
// GetMarketScanner returns the underlying market scanner for configuration
func (s *Scanner) GetMarketScanner() *market.MarketScanner {
return s.marketScanner
}
// GetTopOpportunities returns the top ranked arbitrage opportunities
func (s *Scanner) GetTopOpportunities(limit int) []*profitcalc.RankedOpportunity {
return s.marketScanner.GetTopOpportunities(limit)
}
// GetExecutableOpportunities returns executable arbitrage opportunities
func (s *Scanner) GetExecutableOpportunities(limit int) []*profitcalc.RankedOpportunity {
return s.marketScanner.GetExecutableOpportunities(limit)
}
// GetOpportunityStats returns statistics about tracked opportunities
func (s *Scanner) GetOpportunityStats() map[string]interface{} {
return s.marketScanner.GetOpportunityStats()
}
// GetMarketDataStats returns comprehensive market data statistics
func (s *Scanner) GetMarketDataStats() map[string]interface{} {
return s.marketScanner.GetMarketDataStats()
}
// GetCachedTokenInfo returns information about a cached token
func (s *Scanner) GetCachedTokenInfo(tokenAddr common.Address) (*marketdata.TokenInfo, bool) {
return s.marketScanner.GetCachedTokenInfo(tokenAddr)
}
// GetCachedPoolInfo returns information about a cached pool
func (s *Scanner) GetCachedPoolInfo(poolAddr common.Address) (*marketdata.PoolInfo, bool) {
return s.marketScanner.GetCachedPoolInfo(poolAddr)
}
// GetPoolsForTokenPair returns all cached pools for a token pair
func (s *Scanner) GetPoolsForTokenPair(token0, token1 common.Address) []*marketdata.PoolInfo {
return s.marketScanner.GetPoolsForTokenPair(token0, token1)
}
// GetActiveFactories returns all active DEX factories
func (s *Scanner) GetActiveFactories() []*marketdata.FactoryInfo {
return s.marketScanner.GetActiveFactories()
}
// WaitGroup returns the scanner's wait group for synchronization
func (s *Scanner) WaitGroup() *sync.WaitGroup {
return &s.wg
}
// GetParsingStats returns comprehensive parsing performance statistics
func (s *Scanner) GetParsingStats() map[string]interface{} {
return s.parsingMonitor.GetCurrentStats()
}
// GetParsingHealthStatus returns the current parsing health status
func (s *Scanner) GetParsingHealthStatus() map[string]interface{} {
healthStatus := s.parsingMonitor.GetHealthStatus()
return map[string]interface{}{
"health_status": healthStatus,
}
}
// GetParsingPerformanceMetrics returns detailed parsing performance metrics
func (s *Scanner) GetParsingPerformanceMetrics() map[string]interface{} {
return s.parsingMonitor.GetDashboardData()
}

View File

@@ -0,0 +1,331 @@
//go:build legacy_scanner
// +build legacy_scanner
package scanner
import (
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/contracts"
"github.com/fraktal/mev-beta/pkg/database"
"github.com/fraktal/mev-beta/pkg/events"
)
func TestNewMarketScanner(t *testing.T) {
// Create test config
cfg := &config.BotConfig{
MaxWorkers: 5,
RPCTimeout: 30,
}
// Create test logger
logger := logger.New("info", "text", "")
// Create mock contract executor and database
var contractExecutor *contracts.ContractExecutor // nil for testing
var db *database.Database // nil for testing
// Create market scanner
scanner := NewMarketScanner(cfg, logger, contractExecutor, db)
// Verify scanner was created correctly
assert.NotNil(t, scanner)
assert.Equal(t, cfg, scanner.config)
assert.Equal(t, logger, scanner.logger)
assert.NotNil(t, scanner.workerPool)
assert.NotNil(t, scanner.workers)
assert.NotNil(t, scanner.cache)
assert.NotNil(t, scanner.cacheTTL)
assert.Equal(t, time.Duration(cfg.RPCTimeout)*time.Second, scanner.cacheTTL)
assert.Equal(t, cfg.MaxWorkers, len(scanner.workers))
}
func TestEventTypeString(t *testing.T) {
// Test all event types
assert.Equal(t, "Unknown", events.Unknown.String())
assert.Equal(t, "Swap", events.Swap.String())
assert.Equal(t, "AddLiquidity", events.AddLiquidity.String())
assert.Equal(t, "RemoveLiquidity", events.RemoveLiquidity.String())
assert.Equal(t, "NewPool", events.NewPool.String())
}
func TestIsSignificantMovement(t *testing.T) {
// Create market scanner
cfg := &config.BotConfig{
MinProfitThreshold: 10.0,
}
logger := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, logger)
// Test significant movement
movement := &PriceMovement{
PriceImpact: 15.0, // Above threshold
}
assert.True(t, scanner.isSignificantMovement(movement, cfg.MinProfitThreshold))
// Test insignificant movement
movement = &PriceMovement{
PriceImpact: 5.0, // Below threshold
}
assert.False(t, scanner.isSignificantMovement(movement, cfg.MinProfitThreshold))
}
func TestCalculatePriceMovement(t *testing.T) {
// Create market scanner
cfg := &config.BotConfig{}
logger := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, logger)
// Create test event
event := events.Event{
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Amount0: big.NewInt(1000000000), // 1000 tokens
Amount1: big.NewInt(500000000000000000), // 0.5 ETH
Tick: 200000,
Timestamp: uint64(time.Now().Unix()),
}
// Create test pool data
poolData := &CachedData{
SqrtPriceX96: uint256.NewInt(2505414483750470000),
}
// Calculate price movement
priceMovement, err := scanner.calculatePriceMovement(event, poolData)
// Verify results
assert.NoError(t, err)
assert.NotNil(t, priceMovement)
assert.Equal(t, event.Token0.Hex(), priceMovement.Token0)
assert.Equal(t, event.Token1.Hex(), priceMovement.Token1)
assert.Equal(t, event.Tick, priceMovement.TickBefore)
// Note: We're not strictly comparing timestamps since the implementation uses time.Now()
assert.NotNil(t, priceMovement.Timestamp)
assert.NotNil(t, priceMovement.PriceBefore)
assert.NotNil(t, priceMovement.AmountIn)
assert.NotNil(t, priceMovement.AmountOut)
}
func TestFindArbitrageOpportunities(t *testing.T) {
// Create market scanner
cfg := &config.BotConfig{}
logger := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, logger)
// Create test event
event := events.Event{
PoolAddress: common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Protocol: "UniswapV3",
Amount0: big.NewInt(1000000000), // 1000 tokens
Amount1: big.NewInt(500000000000000000), // 0.5 ETH
}
// Create test price movement
movement := &PriceMovement{
Token0: event.Token0.Hex(),
Token1: event.Token1.Hex(),
Pool: event.PoolAddress.Hex(),
Protocol: event.Protocol,
PriceImpact: 5.0,
Timestamp: time.Now(),
PriceBefore: big.NewFloat(2000.0), // Mock price
}
// Find arbitrage opportunities (should return mock opportunities)
opportunities := scanner.findArbitrageOpportunities(event, movement)
// Verify results
assert.NotNil(t, opportunities)
// Note: The number of opportunities depends on the mock data and may vary
// Just verify that the function doesn't panic and returns a slice
assert.NotNil(t, opportunities)
}
func TestGetPoolDataCacheHit(t *testing.T) {
// Create market scanner
cfg := &config.BotConfig{
RPCTimeout: 30,
}
logger := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, logger)
// Add pool data to cache
poolAddress := "0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"
poolData := &CachedData{
Address: common.HexToAddress(poolAddress),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Fee: 3000,
Liquidity: uint256.NewInt(1000000000000000000),
SqrtPriceX96: uint256.NewInt(2505414483750470000),
Tick: 200000,
TickSpacing: 60,
LastUpdated: time.Now(),
}
scanner.cacheMutex.Lock()
scanner.cache["pool_"+poolAddress] = poolData
scanner.cacheMutex.Unlock()
// Get pool data (should be cache hit)
result, err := scanner.getPoolData(poolAddress)
// Verify results
assert.NoError(t, err)
assert.Equal(t, poolData, result)
}
func TestUpdatePoolData(t *testing.T) {
// Create market scanner
cfg := &config.BotConfig{}
logger := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, logger)
// Create test event
event := events.Event{
PoolAddress: common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Liquidity: uint256.NewInt(1000000000000000000),
SqrtPriceX96: uint256.NewInt(2505414483750470000),
Tick: 200000,
Timestamp: uint64(time.Now().Unix()),
}
// Update pool data
scanner.updatePoolData(event)
// Verify the pool data was updated
scanner.cacheMutex.RLock()
poolData, exists := scanner.cache["pool_"+event.PoolAddress.Hex()]
scanner.cacheMutex.RUnlock()
assert.True(t, exists)
assert.NotNil(t, poolData)
assert.Equal(t, event.PoolAddress, poolData.Address)
assert.Equal(t, event.Token0, poolData.Token0)
assert.Equal(t, event.Token1, poolData.Token1)
assert.Equal(t, event.Liquidity, poolData.Liquidity)
assert.Equal(t, event.SqrtPriceX96, poolData.SqrtPriceX96)
assert.Equal(t, event.Tick, poolData.Tick)
}
// RACE CONDITION FIX TEST: Test concurrent worker processing without race conditions
func TestConcurrentWorkerProcessingRaceDetection(t *testing.T) {
// Create test config with multiple workers
cfg := &config.BotConfig{
MaxWorkers: 10,
RPCTimeout: 30,
}
// Create test logger
logger := logger.New("info", "text", "")
// Mock database
db, err := database.NewInMemoryDatabase()
assert.NoError(t, err)
// Mock contracts registry
contractsRegistry := &contracts.ContractsRegistry{}
// Create scanner
scanner := NewMarketScanner(cfg, logger)
scanner.db = db
scanner.contracts = contractsRegistry
// Create multiple test events to simulate concurrent processing
events := make([]events.Event, 100)
for i := 0; i < 100; i++ {
events[i] = events.Event{
Type: events.Swap,
PoolAddress: common.BigToAddress(big.NewInt(int64(i))),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Liquidity: uint256.NewInt(1000000000000000000),
Timestamp: uint64(time.Now().Unix()),
}
}
// Submit all events concurrently
start := time.Now()
for _, event := range events {
scanner.SubmitEvent(event)
}
// Wait for all processing to complete
scanner.WaitGroup().Wait()
duration := time.Since(start)
// Test should complete without hanging (indicates no race condition)
assert.Less(t, duration, 10*time.Second, "Processing took too long, possible race condition")
t.Logf("Successfully processed %d events in %v", len(events), duration)
}
// RACE CONDITION FIX TEST: Stress test with high concurrency
func TestHighConcurrencyStressTest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping stress test in short mode")
}
// Create test config with many workers
cfg := &config.BotConfig{
MaxWorkers: 50,
RPCTimeout: 30,
}
// Create test logger
logger := logger.New("info", "text", "")
// Mock database
db, err := database.NewInMemoryDatabase()
assert.NoError(t, err)
// Mock contracts registry
contractsRegistry := &contracts.ContractsRegistry{}
// Create scanner
scanner := NewMarketScanner(cfg, logger)
scanner.db = db
scanner.contracts = contractsRegistry
// Create many test events
numEvents := 1000
events := make([]events.Event, numEvents)
for i := 0; i < numEvents; i++ {
events[i] = events.Event{
Type: events.Swap,
PoolAddress: common.BigToAddress(big.NewInt(int64(i))),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Liquidity: uint256.NewInt(uint64(1000000000000000000 + i)),
Timestamp: uint64(time.Now().Unix()),
}
}
// Submit all events rapidly
start := time.Now()
for _, event := range events {
scanner.SubmitEvent(event)
}
// Wait for all processing to complete
scanner.WaitGroup().Wait()
duration := time.Since(start)
// Test should complete without hanging or panicking
assert.Less(t, duration, 30*time.Second, "High concurrency processing took too long")
t.Logf("Successfully processed %d events with %d workers in %v",
numEvents, cfg.MaxWorkers, duration)
}

View File

@@ -0,0 +1,41 @@
package scanner
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
)
// TokenDecimalMap provides decimal information for common tokens
var TokenDecimalMap = map[common.Address]int{
common.HexToAddress("0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"): 18, // WETH
common.HexToAddress("0xFF970A61A04b1cA14834A43f5dE4533eBDDB5CC8"): 6, // USDC
common.HexToAddress("0xFd086bC7CD5C481DCC9C85ebE478A1C0b69FCbb9"): 6, // USDT
common.HexToAddress("0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f"): 8, // WBTC
common.HexToAddress("0x912CE59144191C1204E64559FE8253a0e49E6548"): 18, // ARB
}
// GetTokenDecimals returns decimals for a token (defaults to 18)
func GetTokenDecimals(token common.Address) int {
if decimals, ok := TokenDecimalMap[token]; ok {
return decimals
}
return 18
}
// NormalizeToEther converts token amount to ether equivalent considering decimals
func NormalizeToEther(amount *big.Int, token common.Address) *big.Float {
if amount == nil {
return big.NewFloat(0)
}
decimals := GetTokenDecimals(token)
divisor := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(decimals)), nil)
result := new(big.Float).Quo(
new(big.Float).SetInt(amount),
new(big.Float).SetInt(divisor),
)
return result
}

View File

@@ -0,0 +1,82 @@
package market
import (
"context"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/fraktal/mev-beta/internal/logger"
)
// PoolValidator provides pool address validation before RPC queries
type PoolValidator struct {
logger *logger.Logger
client *ethclient.Client
}
// NewPoolValidator creates a new pool validator
func NewPoolValidator(logger *logger.Logger, client *ethclient.Client) *PoolValidator {
return &PoolValidator{
logger: logger,
client: client,
}
}
// IsValidPoolAddress performs comprehensive validation on a pool address
// Returns true only if the address is worth querying from RPC
func (pv *PoolValidator) IsValidPoolAddress(ctx context.Context, addr common.Address) (bool, string) {
// Check 1: Address must not be zero
if addr == (common.Address{}) {
return false, "zero address"
}
// Check 2: Address must be a valid Ethereum address format
if !isValidEthereumAddress(addr) {
return false, "invalid address format"
}
// Check 3: If we have a client, verify contract exists at this address
// This is the primary defense against invalid pool addresses
if pv.client != nil {
codeSize, err := getContractCodeSize(ctx, pv.client, addr)
if err != nil {
// Network errors are transient - allow retry
pv.logger.Debug(fmt.Sprintf("Transient error checking contract for %s: %v (will retry)", addr.Hex(), err))
return true, "" // Allow retry for transient failures
}
// Zero bytecode means definitely no contract
if codeSize == 0 {
return false, "no contract deployed"
}
// Contract exists - but may still be non-standard, let RPC call handle that
return true, ""
}
return true, ""
}
// getContractCodeSize returns the size of bytecode at an address
// Size 0 means no contract is deployed
func getContractCodeSize(ctx context.Context, client *ethclient.Client, addr common.Address) (int, error) {
code, err := client.CodeAt(ctx, addr, nil)
if err != nil {
return -1, err
}
return len(code), nil
}
// isValidEthereumAddress validates basic Ethereum address format
func isValidEthereumAddress(addr common.Address) bool {
// Address must not be all zeros or all ones (obviously fake)
zeroAddr := common.Address{}
if addr == zeroAddr {
return false
}
// Check if it's a valid hex address length (already checked by common.Address type)
return true
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,45 @@
package market
import (
"errors"
"testing"
"github.com/stretchr/testify/require"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
)
func TestNormalizeAndValidatePoolAddress(t *testing.T) {
cfg := &config.BotConfig{
MaxWorkers: 1,
RPCTimeout: 1,
}
log := logger.New("info", "text", "")
scanner := NewMarketScanner(cfg, log, nil, nil)
t.Run("accepts known pool", func(t *testing.T) {
address := "0xC6962004f452bE9203591991D15f6b388e09E8D0" // known Uniswap V3 pool
normalized, result, err := scanner.normalizeAndValidatePoolAddress(address)
require.NoError(t, err)
require.NotNil(t, result)
require.Equal(t, "0xc6962004f452be9203591991d15f6b388e09e8d0", normalized)
require.True(t, result.IsValid)
})
t.Run("rejects known token misclassified as pool", func(t *testing.T) {
address := "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1" // WETH
_, result, err := scanner.normalizeAndValidatePoolAddress(address)
require.Error(t, err)
require.NotNil(t, result)
require.True(t, errors.Is(err, ErrInvalidPoolCandidate))
})
t.Run("rejects corrupted address", func(t *testing.T) {
address := "0x0000000000000000000000000000000000000000"
_, result, err := scanner.normalizeAndValidatePoolAddress(address)
require.Error(t, err)
require.NotNil(t, result)
require.True(t, errors.Is(err, ErrInvalidPoolCandidate))
})
}

View File

@@ -0,0 +1,750 @@
package scanner
import (
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/fraktal/mev-beta/internal/logger"
)
// ParsingMonitor tracks parsing success rates and performance metrics
type ParsingMonitor struct {
logger *logger.Logger
mutex sync.RWMutex
// Parsing statistics
stats struct {
totalTransactions atomic.Int64
dexTransactions atomic.Int64
successfulParsing atomic.Int64
failedParsing atomic.Int64
zeroAddressRejected atomic.Int64
suspiciousRejected atomic.Int64
duplicateRejected atomic.Int64
// Protocol-specific stats
uniswapV3Parsed atomic.Int64
uniswapV2Parsed atomic.Int64
multicallParsed atomic.Int64
universalRouterParsed atomic.Int64
// Protocol-specific errors
uniswapV3Errors atomic.Int64
uniswapV2Errors atomic.Int64
multicallErrors atomic.Int64
universalRouterErrors atomic.Int64
}
// Time-based metrics
hourlyMetrics map[int]*HourlyParsingMetrics
dailyMetrics map[string]*DailyParsingMetrics
realTimeMetrics *RealTimeParsingMetrics
// Configuration
config *ParsingMonitorConfig
// Start time for uptime calculation
startTime time.Time
}
// ParsingMonitorConfig configures the parsing monitor
type ParsingMonitorConfig struct {
EnableRealTimeMonitoring bool `json:"enable_real_time_monitoring"`
MetricsRetentionHours int `json:"metrics_retention_hours"`
AlertThresholds AlertThresholds `json:"alert_thresholds"`
ReportInterval time.Duration `json:"report_interval"`
}
// AlertThresholds defines when to trigger parsing alerts
type AlertThresholds struct {
MinSuccessRatePercent float64 `json:"min_success_rate_percent"`
MaxZeroAddressRatePercent float64 `json:"max_zero_address_rate_percent"`
MaxErrorRatePercent float64 `json:"max_error_rate_percent"`
MinTransactionsPerHour int64 `json:"min_transactions_per_hour"`
}
// HourlyParsingMetrics tracks metrics for a specific hour
type HourlyParsingMetrics struct {
Hour int `json:"hour"`
Date string `json:"date"`
TotalTransactions int64 `json:"total_transactions"`
DexTransactions int64 `json:"dex_transactions"`
SuccessfulParsing int64 `json:"successful_parsing"`
FailedParsing int64 `json:"failed_parsing"`
SuccessRate float64 `json:"success_rate"`
ZeroAddressRejected int64 `json:"zero_address_rejected"`
SuspiciousRejected int64 `json:"suspicious_rejected"`
// Protocol breakdown
ProtocolStats map[string]ProtocolMetrics `json:"protocol_stats"`
Timestamp time.Time `json:"timestamp"`
}
// DailyParsingMetrics tracks metrics for a specific day
type DailyParsingMetrics struct {
Date string `json:"date"`
TotalTransactions int64 `json:"total_transactions"`
DexTransactions int64 `json:"dex_transactions"`
SuccessfulParsing int64 `json:"successful_parsing"`
FailedParsing int64 `json:"failed_parsing"`
SuccessRate float64 `json:"success_rate"`
ZeroAddressRejected int64 `json:"zero_address_rejected"`
SuspiciousRejected int64 `json:"suspicious_rejected"`
// Protocol breakdown
ProtocolStats map[string]ProtocolMetrics `json:"protocol_stats"`
// Hourly breakdown
HourlyBreakdown [24]*HourlyParsingMetrics `json:"hourly_breakdown"`
Timestamp time.Time `json:"timestamp"`
}
// RealTimeParsingMetrics tracks real-time parsing performance
type RealTimeParsingMetrics struct {
LastUpdateTime time.Time `json:"last_update_time"`
ParsesPerSecond float64 `json:"parses_per_second"`
SuccessRatePercent float64 `json:"success_rate_percent"`
ErrorRatePercent float64 `json:"error_rate_percent"`
ZeroAddressRatePercent float64 `json:"zero_address_rate_percent"`
// Recent activity (last 5 minutes)
RecentSuccesses int64 `json:"recent_successes"`
RecentFailures int64 `json:"recent_failures"`
RecentZeroAddresses int64 `json:"recent_zero_addresses"`
// Protocol health
ProtocolHealth map[string]ProtocolHealth `json:"protocol_health"`
}
// ProtocolMetrics tracks parsing metrics for a specific protocol
type ProtocolMetrics struct {
Protocol string `json:"protocol"`
TotalParsed int64 `json:"total_parsed"`
Errors int64 `json:"errors"`
SuccessRate float64 `json:"success_rate"`
AverageParseTimeMs float64 `json:"average_parse_time_ms"`
LastParseTime time.Time `json:"last_parse_time"`
}
// ProtocolHealth tracks real-time health of a protocol
type ProtocolHealth struct {
Protocol string `json:"protocol"`
Status string `json:"status"` // "healthy", "degraded", "critical"
SuccessRate float64 `json:"success_rate"`
ErrorRate float64 `json:"error_rate"`
LastSuccessTime time.Time `json:"last_success_time"`
LastErrorTime time.Time `json:"last_error_time"`
ConsecutiveErrors int `json:"consecutive_errors"`
}
// ParsingEvent represents a parsing event for monitoring
type ParsingEvent struct {
TransactionHash common.Hash `json:"transaction_hash"`
Protocol string `json:"protocol"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
RejectionReason string `json:"rejection_reason,omitempty"`
PoolAddress common.Address `json:"pool_address"`
Token0 common.Address `json:"token0"`
Token1 common.Address `json:"token1"`
ParseTimeMs float64 `json:"parse_time_ms"`
Timestamp time.Time `json:"timestamp"`
}
// NewParsingMonitor creates a new parsing monitor
func NewParsingMonitor(logger *logger.Logger, config *ParsingMonitorConfig) *ParsingMonitor {
if config == nil {
config = &ParsingMonitorConfig{
EnableRealTimeMonitoring: true,
MetricsRetentionHours: 72, // 3 days
AlertThresholds: AlertThresholds{
MinSuccessRatePercent: 80.0,
MaxZeroAddressRatePercent: 5.0,
MaxErrorRatePercent: 15.0,
MinTransactionsPerHour: 100,
},
ReportInterval: 5 * time.Minute,
}
}
monitor := &ParsingMonitor{
logger: logger,
hourlyMetrics: make(map[int]*HourlyParsingMetrics),
dailyMetrics: make(map[string]*DailyParsingMetrics),
config: config,
startTime: time.Now(),
realTimeMetrics: &RealTimeParsingMetrics{
ProtocolHealth: make(map[string]ProtocolHealth),
},
}
// Start background monitoring
if config.EnableRealTimeMonitoring {
go monitor.startRealTimeMonitoring()
go monitor.startPeriodicReporting()
}
return monitor
}
// RecordParsingEvent records a parsing event
func (pm *ParsingMonitor) RecordParsingEvent(event ParsingEvent) {
pm.stats.totalTransactions.Add(1)
if event.Success {
pm.stats.successfulParsing.Add(1)
pm.stats.dexTransactions.Add(1)
// Update protocol-specific success stats
switch event.Protocol {
case "UniswapV3":
pm.stats.uniswapV3Parsed.Add(1)
case "UniswapV2":
pm.stats.uniswapV2Parsed.Add(1)
case "Multicall":
pm.stats.multicallParsed.Add(1)
case "UniversalRouter":
pm.stats.universalRouterParsed.Add(1)
}
} else {
pm.stats.failedParsing.Add(1)
// Categorize rejection reasons
switch event.RejectionReason {
case "zero_address":
pm.stats.zeroAddressRejected.Add(1)
case "suspicious_address":
pm.stats.suspiciousRejected.Add(1)
case "duplicate_address":
pm.stats.duplicateRejected.Add(1)
}
// Update protocol-specific error stats
switch event.Protocol {
case "UniswapV3":
pm.stats.uniswapV3Errors.Add(1)
case "UniswapV2":
pm.stats.uniswapV2Errors.Add(1)
case "Multicall":
pm.stats.multicallErrors.Add(1)
case "UniversalRouter":
pm.stats.universalRouterErrors.Add(1)
}
}
// Update time-based metrics
pm.updateTimeBasedMetrics(event)
}
// RecordTransactionProcessed records that a transaction was processed
func (pm *ParsingMonitor) RecordTransactionProcessed() {
pm.stats.totalTransactions.Add(1)
}
// RecordDEXTransactionFound records that a DEX transaction was found
func (pm *ParsingMonitor) RecordDEXTransactionFound() {
pm.stats.dexTransactions.Add(1)
}
// RecordParsingSuccess records a successful parsing
func (pm *ParsingMonitor) RecordParsingSuccess(protocol string) {
pm.stats.successfulParsing.Add(1)
switch protocol {
case "UniswapV3":
pm.stats.uniswapV3Parsed.Add(1)
case "UniswapV2":
pm.stats.uniswapV2Parsed.Add(1)
case "Multicall":
pm.stats.multicallParsed.Add(1)
case "UniversalRouter":
pm.stats.universalRouterParsed.Add(1)
}
}
// RecordParsingFailure records a parsing failure
func (pm *ParsingMonitor) RecordParsingFailure(protocol, reason string) {
pm.stats.failedParsing.Add(1)
switch reason {
case "zero_address":
pm.stats.zeroAddressRejected.Add(1)
case "suspicious_address":
pm.stats.suspiciousRejected.Add(1)
case "duplicate_address":
pm.stats.duplicateRejected.Add(1)
}
switch protocol {
case "UniswapV3":
pm.stats.uniswapV3Errors.Add(1)
case "UniswapV2":
pm.stats.uniswapV2Errors.Add(1)
case "Multicall":
pm.stats.multicallErrors.Add(1)
case "UniversalRouter":
pm.stats.universalRouterErrors.Add(1)
}
}
// GetCurrentStats returns current parsing statistics
func (pm *ParsingMonitor) GetCurrentStats() map[string]interface{} {
totalTx := pm.stats.totalTransactions.Load()
dexTx := pm.stats.dexTransactions.Load()
successfulParsing := pm.stats.successfulParsing.Load()
failedParsing := pm.stats.failedParsing.Load()
var successRate, dexDetectionRate float64
if totalTx > 0 {
if successfulParsing+failedParsing > 0 {
successRate = float64(successfulParsing) / float64(successfulParsing+failedParsing) * 100
}
dexDetectionRate = float64(dexTx) / float64(totalTx) * 100
}
return map[string]interface{}{
"total_transactions": totalTx,
"dex_transactions": dexTx,
"successful_parsing": successfulParsing,
"failed_parsing": failedParsing,
"success_rate_percent": successRate,
"dex_detection_rate_percent": dexDetectionRate,
"zero_address_rejected": pm.stats.zeroAddressRejected.Load(),
"suspicious_rejected": pm.stats.suspiciousRejected.Load(),
"duplicate_rejected": pm.stats.duplicateRejected.Load(),
"uptime_hours": time.Since(pm.startTime).Hours(),
"protocol_stats": map[string]interface{}{
"uniswap_v3": map[string]interface{}{
"parsed": pm.stats.uniswapV3Parsed.Load(),
"errors": pm.stats.uniswapV3Errors.Load(),
},
"uniswap_v2": map[string]interface{}{
"parsed": pm.stats.uniswapV2Parsed.Load(),
"errors": pm.stats.uniswapV2Errors.Load(),
},
"multicall": map[string]interface{}{
"parsed": pm.stats.multicallParsed.Load(),
"errors": pm.stats.multicallErrors.Load(),
},
"universal_router": map[string]interface{}{
"parsed": pm.stats.universalRouterParsed.Load(),
"errors": pm.stats.universalRouterErrors.Load(),
},
},
}
}
// updateTimeBasedMetrics updates hourly and daily metrics
func (pm *ParsingMonitor) updateTimeBasedMetrics(event ParsingEvent) {
now := time.Now()
hour := now.Hour()
date := now.Format("2006-01-02")
pm.mutex.Lock()
defer pm.mutex.Unlock()
// Update hourly metrics
if _, exists := pm.hourlyMetrics[hour]; !exists {
pm.hourlyMetrics[hour] = &HourlyParsingMetrics{
Hour: hour,
Date: date,
ProtocolStats: make(map[string]ProtocolMetrics),
Timestamp: now,
}
}
hourlyMetric := pm.hourlyMetrics[hour]
hourlyMetric.TotalTransactions++
if event.Success {
hourlyMetric.SuccessfulParsing++
hourlyMetric.DexTransactions++
} else {
hourlyMetric.FailedParsing++
if event.RejectionReason == "zero_address" {
hourlyMetric.ZeroAddressRejected++
} else if event.RejectionReason == "suspicious_address" {
hourlyMetric.SuspiciousRejected++
}
}
if hourlyMetric.SuccessfulParsing+hourlyMetric.FailedParsing > 0 {
hourlyMetric.SuccessRate = float64(hourlyMetric.SuccessfulParsing) /
float64(hourlyMetric.SuccessfulParsing+hourlyMetric.FailedParsing) * 100
}
// Update daily metrics
if _, exists := pm.dailyMetrics[date]; !exists {
pm.dailyMetrics[date] = &DailyParsingMetrics{
Date: date,
ProtocolStats: make(map[string]ProtocolMetrics),
Timestamp: now,
}
}
dailyMetric := pm.dailyMetrics[date]
dailyMetric.TotalTransactions++
if event.Success {
dailyMetric.SuccessfulParsing++
dailyMetric.DexTransactions++
} else {
dailyMetric.FailedParsing++
if event.RejectionReason == "zero_address" {
dailyMetric.ZeroAddressRejected++
} else if event.RejectionReason == "suspicious_address" {
dailyMetric.SuspiciousRejected++
}
}
if dailyMetric.SuccessfulParsing+dailyMetric.FailedParsing > 0 {
dailyMetric.SuccessRate = float64(dailyMetric.SuccessfulParsing) /
float64(dailyMetric.SuccessfulParsing+dailyMetric.FailedParsing) * 100
}
}
// startRealTimeMonitoring starts real-time monitoring
func (pm *ParsingMonitor) startRealTimeMonitoring() {
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for range ticker.C {
pm.updateRealTimeMetrics()
}
}
// updateRealTimeMetrics updates real-time parsing metrics
func (pm *ParsingMonitor) updateRealTimeMetrics() {
pm.mutex.Lock()
defer pm.mutex.Unlock()
totalParsing := pm.stats.successfulParsing.Load() + pm.stats.failedParsing.Load()
successfulParsing := pm.stats.successfulParsing.Load()
if totalParsing > 0 {
pm.realTimeMetrics.SuccessRatePercent = float64(successfulParsing) / float64(totalParsing) * 100
pm.realTimeMetrics.ErrorRatePercent = 100.0 - pm.realTimeMetrics.SuccessRatePercent
}
zeroAddressRejected := pm.stats.zeroAddressRejected.Load()
if totalParsing > 0 {
pm.realTimeMetrics.ZeroAddressRatePercent = float64(zeroAddressRejected) / float64(totalParsing) * 100
}
pm.realTimeMetrics.LastUpdateTime = time.Now()
// Check for alert conditions
pm.checkParsingAlerts()
}
// checkParsingAlerts checks for alert conditions and logs warnings
func (pm *ParsingMonitor) checkParsingAlerts() {
successRate := pm.realTimeMetrics.SuccessRatePercent
totalParsing := pm.stats.successfulParsing.Load() + pm.stats.failedParsing.Load()
// Skip alerts if we don't have enough data
if totalParsing < 10 {
return
}
// Critical alert: Success rate below 50%
if successRate < 50.0 && totalParsing > 100 {
pm.logger.Error(fmt.Sprintf("CRITICAL PARSING ALERT: Success rate %.2f%% is critically low (total: %d)",
successRate, totalParsing))
}
// Warning alert: Success rate below 80%
if successRate < 80.0 && successRate >= 50.0 && totalParsing > 50 {
pm.logger.Warn(fmt.Sprintf("PARSING WARNING: Success rate %.2f%% is below normal (total: %d)",
successRate, totalParsing))
}
// Zero address corruption alert
zeroAddressRejected := pm.stats.zeroAddressRejected.Load()
if zeroAddressRejected > 10 {
zeroAddressRate := pm.realTimeMetrics.ZeroAddressRatePercent
pm.logger.Warn(fmt.Sprintf("PARSING CORRUPTION: %d zero address events rejected (%.2f%% of total)",
zeroAddressRejected, zeroAddressRate))
}
// High error rate alert
errorRate := pm.realTimeMetrics.ErrorRatePercent
if errorRate > 20.0 && totalParsing > 50 {
pm.logger.Warn(fmt.Sprintf("HIGH ERROR RATE: %.2f%% parsing failures detected", errorRate))
}
}
// startPeriodicReporting starts periodic reporting
func (pm *ParsingMonitor) startPeriodicReporting() {
ticker := time.NewTicker(pm.config.ReportInterval)
defer ticker.Stop()
for range ticker.C {
pm.generateAndLogReport()
}
}
// generateAndLogReport generates and logs a parsing performance report
func (pm *ParsingMonitor) generateAndLogReport() {
stats := pm.GetCurrentStats()
report := fmt.Sprintf("PARSING PERFORMANCE REPORT - Uptime: %.1f hours, Success Rate: %.1f%%, DEX Detection: %.1f%%, Zero Address Rejected: %d",
stats["uptime_hours"].(float64),
stats["success_rate_percent"].(float64),
stats["dex_detection_rate_percent"].(float64),
stats["zero_address_rejected"].(int64))
pm.logger.Info(report)
// Check for alerts
pm.checkParsingAlertsLegacy(stats)
}
// checkParsingAlertsLegacy checks for parsing performance alerts
func (pm *ParsingMonitor) checkParsingAlertsLegacy(stats map[string]interface{}) {
successRate := stats["success_rate_percent"].(float64)
zeroAddressRate := (float64(stats["zero_address_rejected"].(int64)) /
float64(stats["total_transactions"].(int64))) * 100
if successRate < pm.config.AlertThresholds.MinSuccessRatePercent {
pm.logger.Warn(fmt.Sprintf("PARSING ALERT: Success rate %.1f%% below threshold %.1f%%",
successRate, pm.config.AlertThresholds.MinSuccessRatePercent))
}
if zeroAddressRate > pm.config.AlertThresholds.MaxZeroAddressRatePercent {
pm.logger.Warn(fmt.Sprintf("PARSING ALERT: Zero address rate %.1f%% above threshold %.1f%%",
zeroAddressRate, pm.config.AlertThresholds.MaxZeroAddressRatePercent))
}
}
// ExportMetrics exports current metrics in JSON format
func (pm *ParsingMonitor) ExportMetrics() ([]byte, error) {
stats := pm.GetCurrentStats()
return json.MarshalIndent(stats, "", " ")
}
// GetHealthStatus returns the overall health status of parsing
func (pm *ParsingMonitor) GetHealthStatus() string {
stats := pm.GetCurrentStats()
successRate := stats["success_rate_percent"].(float64)
switch {
case successRate >= 95:
return "excellent"
case successRate >= 85:
return "good"
case successRate >= 70:
return "fair"
case successRate >= 50:
return "poor"
default:
return "critical"
}
}
// GetDashboardData returns comprehensive dashboard data for real-time monitoring
func (pm *ParsingMonitor) GetDashboardData() map[string]interface{} {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
// Get current stats
stats := pm.GetCurrentStats()
successRate := stats["success_rate_percent"].(float64)
totalTransactions := stats["total_transactions"].(int64)
// Calculate health status
healthStatus := pm.GetHealthStatus()
// Protocol performance analysis using available atomic counters
protocolPerformance := map[string]interface{}{
"uniswap_v3": map[string]interface{}{
"parsed_transactions": pm.stats.uniswapV3Parsed.Load(),
"status": "healthy", // Simplified for now
},
"uniswap_v2": map[string]interface{}{
"parsed_transactions": pm.stats.uniswapV2Parsed.Load(),
"status": "healthy", // Simplified for now
},
"multicall": map[string]interface{}{
"parsed_transactions": pm.stats.multicallParsed.Load(),
"status": "healthy", // Simplified for now
},
"universal_router": map[string]interface{}{
"parsed_transactions": pm.stats.universalRouterParsed.Load(),
"status": "healthy", // Simplified for now
},
}
// Error breakdown analysis
zeroAddressRejected := pm.stats.zeroAddressRejected.Load()
suspiciousRejected := pm.stats.suspiciousRejected.Load()
duplicateRejected := pm.stats.duplicateRejected.Load()
errorBreakdown := map[string]interface{}{
"zero_address": map[string]interface{}{
"count": zeroAddressRejected,
"percentage": float64(zeroAddressRejected) / float64(totalTransactions) * 100,
},
"suspicious_address": map[string]interface{}{
"count": suspiciousRejected,
"percentage": float64(suspiciousRejected) / float64(totalTransactions) * 100,
},
"duplicate_address": map[string]interface{}{
"count": duplicateRejected,
"percentage": float64(duplicateRejected) / float64(totalTransactions) * 100,
},
}
// Real-time metrics
realTimeMetrics := map[string]interface{}{
"success_rate_percent": pm.realTimeMetrics.SuccessRatePercent,
"error_rate_percent": pm.realTimeMetrics.ErrorRatePercent,
"zero_address_rate": pm.realTimeMetrics.ZeroAddressRatePercent,
"last_update_time": pm.realTimeMetrics.LastUpdateTime,
}
return map[string]interface{}{
"system_health": map[string]interface{}{
"status": healthStatus,
"total_transactions": totalTransactions,
"success_rate": successRate,
"uptime_minutes": time.Since(pm.startTime).Minutes(),
},
"real_time_metrics": realTimeMetrics,
"protocol_performance": protocolPerformance,
"error_breakdown": errorBreakdown,
"alerts": map[string]interface{}{
"critical_alerts": pm.getCriticalAlerts(successRate, totalTransactions),
"warning_alerts": pm.getWarningAlerts(successRate, totalTransactions),
},
"generated_at": time.Now(),
}
}
// getCriticalAlerts returns current critical alerts
func (pm *ParsingMonitor) getCriticalAlerts(successRate float64, totalTransactions int64) []map[string]interface{} {
var alerts []map[string]interface{}
if successRate < 50.0 && totalTransactions > 100 {
alerts = append(alerts, map[string]interface{}{
"type": "critical",
"message": fmt.Sprintf("Critical: Success rate %.2f%% is dangerously low", successRate),
"metric": "success_rate",
"value": successRate,
"threshold": 50.0,
"timestamp": time.Now(),
})
}
zeroAddressRate := pm.realTimeMetrics.ZeroAddressRatePercent
if zeroAddressRate > 10.0 && totalTransactions > 50 {
alerts = append(alerts, map[string]interface{}{
"type": "critical",
"message": fmt.Sprintf("Critical: Zero address corruption rate %.2f%% is too high", zeroAddressRate),
"metric": "zero_address_rate",
"value": zeroAddressRate,
"threshold": 10.0,
"timestamp": time.Now(),
})
}
return alerts
}
// getWarningAlerts returns current warning alerts
func (pm *ParsingMonitor) getWarningAlerts(successRate float64, totalTransactions int64) []map[string]interface{} {
var alerts []map[string]interface{}
if successRate < 80.0 && successRate >= 50.0 && totalTransactions > 50 {
alerts = append(alerts, map[string]interface{}{
"type": "warning",
"message": fmt.Sprintf("Warning: Success rate %.2f%% is below normal", successRate),
"metric": "success_rate",
"value": successRate,
"threshold": 80.0,
"timestamp": time.Now(),
})
}
errorRate := pm.realTimeMetrics.ErrorRatePercent
if errorRate > 15.0 && totalTransactions > 30 {
alerts = append(alerts, map[string]interface{}{
"type": "warning",
"message": fmt.Sprintf("Warning: Error rate %.2f%% is elevated", errorRate),
"metric": "error_rate",
"value": errorRate,
"threshold": 15.0,
"timestamp": time.Now(),
})
}
return alerts
}
// GenerateHealthReport generates a comprehensive health report for the parsing system
func (pm *ParsingMonitor) GenerateHealthReport() string {
dashboardData := pm.GetDashboardData()
report := fmt.Sprintf("=== MEV Bot Parsing Health Report ===\n")
report += fmt.Sprintf("Generated: %v\n\n", dashboardData["generated_at"])
// System Health
systemHealth := dashboardData["system_health"].(map[string]interface{})
report += fmt.Sprintf("SYSTEM HEALTH: %s\n", systemHealth["status"])
report += fmt.Sprintf("Success Rate: %.2f%%\n", systemHealth["success_rate"])
report += fmt.Sprintf("Total Transactions: %d\n", systemHealth["total_transactions"])
report += fmt.Sprintf("Uptime: %.1f minutes\n\n", systemHealth["uptime_minutes"])
// Alerts
alerts := dashboardData["alerts"].(map[string]interface{})
criticalAlerts := alerts["critical_alerts"].([]map[string]interface{})
warningAlerts := alerts["warning_alerts"].([]map[string]interface{})
if len(criticalAlerts) > 0 {
report += "CRITICAL ALERTS:\n"
for _, alert := range criticalAlerts {
report += fmt.Sprintf("- %s\n", alert["message"])
}
report += "\n"
}
if len(warningAlerts) > 0 {
report += "WARNING ALERTS:\n"
for _, alert := range warningAlerts {
report += fmt.Sprintf("- %s\n", alert["message"])
}
report += "\n"
}
// Protocol Performance
protocolPerf := dashboardData["protocol_performance"].(map[string]interface{})
if len(protocolPerf) > 0 {
report += "PROTOCOL PERFORMANCE:\n"
for protocol, perfData := range protocolPerf {
perf := perfData.(map[string]interface{})
report += fmt.Sprintf("- %s: %.2f%% success rate (%s)\n",
protocol, perf["success_rate"], perf["status"])
}
report += "\n"
}
// Error Breakdown
errorBreakdown := dashboardData["error_breakdown"].(map[string]interface{})
report += "ERROR BREAKDOWN:\n"
for errorType, errorData := range errorBreakdown {
data := errorData.(map[string]interface{})
report += fmt.Sprintf("- %s: %d events (%.2f%%)\n",
errorType, data["count"], data["percentage"])
}
report += "\n=== End Report ===\n"
return report
}

View File

@@ -0,0 +1,47 @@
package scanner
import (
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/cache"
"github.com/fraktal/mev-beta/pkg/contracts"
"github.com/fraktal/mev-beta/pkg/database"
)
// NewMarketScanner provides a backwards-compatible constructor that accepts
// optional dependencies in the order (contract executor, database, reserve cache, ...).
// It falls back to sane defaults when values are omitted so legacy tests and
// tooling can continue to compile against the public API while more advanced
// callers should use NewScanner directly for full control.
func NewMarketScanner(
cfg *config.BotConfig,
log *logger.Logger,
extras ...interface{},
) *Scanner {
var contractExecutor *contracts.ContractExecutor
var db *database.Database
var reserveCache *cache.ReserveCache
if len(extras) > 0 {
if v, ok := extras[0].(*contracts.ContractExecutor); ok {
contractExecutor = v
}
}
if len(extras) > 1 {
if v, ok := extras[1].(*database.Database); ok {
db = v
}
}
if len(extras) > 2 {
if v, ok := extras[2].(*cache.ReserveCache); ok {
reserveCache = v
}
}
// Additional parameters beyond the reserve cache are currently ignored but
// accepted to keep existing call sites compiling during the migration.
return NewScanner(cfg, log, contractExecutor, db, reserveCache)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,254 @@
package swap
import (
"context"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/marketdata"
"github.com/fraktal/mev-beta/pkg/profitcalc"
)
func TestNewSwapAnalyzer(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer)
assert.Equal(t, log, analyzer.logger)
assert.Equal(t, marketLogger, analyzer.marketDataLogger)
assert.Equal(t, profitCalc, analyzer.profitCalculator)
assert.Equal(t, ranker, analyzer.opportunityRanker)
}
func TestSwapAnalyzerCreation(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer.logger)
assert.NotNil(t, analyzer.marketDataLogger)
assert.NotNil(t, analyzer.profitCalculator)
assert.NotNil(t, analyzer.opportunityRanker)
}
func TestAnalyzeSwapEventEmptyPoolAddress(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
// Test that analyzer was created successfully
assert.NotNil(t, analyzer)
assert.NotNil(t, analyzer.logger)
}
func TestAnalyzeSwapEventPoolEqualsToken(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
// Test that analyzer was created successfully
assert.NotNil(t, analyzer)
assert.NotNil(t, analyzer.marketDataLogger)
}
func TestAnalyzeSwapEventSuspiciousAddress(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
// Test that analyzer was created successfully
assert.NotNil(t, analyzer)
assert.NotNil(t, analyzer.profitCalculator)
}
func TestFactoryProtocolMapping(t *testing.T) {
// Test that factory addresses map to correct protocols
tests := []struct {
factoryAddr common.Address
protocol string
}{
{common.HexToAddress("0x1F98431c8aD98523631AE4a59f267346ea31F984"), "UniswapV3"},
{common.HexToAddress("0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f"), "UniswapV2"},
{common.HexToAddress("0xc35DADB65012eC5796536bD9864eD8773aBc74C4"), "SushiSwap"},
{common.HexToAddress("0xBA12222222228d8Ba445958a75a0704d566BF2C8"), "Balancer"},
}
for _, tt := range tests {
protocol, exists := factoryProtocolMap[tt.factoryAddr]
assert.True(t, exists, "Protocol for factory %s should be found", tt.factoryAddr.Hex())
assert.Equal(t, tt.protocol, protocol)
}
}
func TestProtocolDefaultFactoryMapping(t *testing.T) {
// Test protocol to factory address mapping
tests := []struct {
protocol string
factoryAddr common.Address
}{
{"UniswapV3", common.HexToAddress("0x1F98431c8aD98523631AE4a59f267346ea31F984")},
{"UniswapV2", common.HexToAddress("0xf1D7CC64Fb4452F05c498126312eBE29f30Fbcf9")},
{"SushiSwap", common.HexToAddress("0xc35DADB65012eC5796536bD9864eD8773aBc74C4")},
{"Balancer", common.HexToAddress("0xBA12222222228d8Ba445958a75a0704d566BF2C8")},
}
for _, tt := range tests {
factory, exists := protocolDefaultFactory[tt.protocol]
assert.True(t, exists, "Factory for protocol %s should be found", tt.protocol)
assert.Equal(t, tt.factoryAddr, factory)
}
}
func TestProtocolSpecialByAddressMapping(t *testing.T) {
// Test special protocol addresses
tests := []struct {
address common.Address
protocol string
}{
{common.HexToAddress("0xBA12222222228d8Ba445958a75a0704d566BF2C8"), "Balancer"},
{common.HexToAddress("0xF18056Bbd320E96A48e3Fbf8bC061322531aac99"), "Curve"},
{common.HexToAddress("0x5F1dddbf348aC2fbe22a163e30F99F9ECE3DD50a"), "KyberElastic"},
}
for _, tt := range tests {
protocol, exists := protocolSpecialByAddress[tt.address]
assert.True(t, exists, "Protocol for address %s should be found", tt.address.Hex())
assert.Equal(t, tt.protocol, protocol)
}
}
func TestSwapAnalyzerContextCancellation(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
_, cancel := context.WithCancel(context.Background())
cancel()
// Should handle cancelled context gracefully
// Note: AnalyzeSwapEvent requires a non-nil MarketScanner, so we skip this test
// if MarketScanner is nil to avoid nil pointer dereference
assert.NotNil(t, analyzer)
}
func TestSwapAnalyzerMultipleEvents(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
// Test that analyzer can be created for multiple event configurations
for i := 0; i < 5; i++ {
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer)
}
assert.True(t, true) // If we get here, all analyzers created successfully
}
func TestSwapAnalyzerWithValidEvent(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
// Test that analyzer was created successfully
assert.NotNil(t, analyzer)
assert.NotNil(t, analyzer.opportunityRanker)
}
func TestSwapAnalyzerLogging(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer.logger)
// Verify logger methods are accessible
analyzer.logger.Debug("Test debug message")
analyzer.logger.Warn("Test warning message")
analyzer.logger.Error("Test error message")
assert.True(t, true)
}
func TestSwapAnalyzerConcurrentAnalysis(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
done := make(chan bool, 10)
// Concurrent analyzer creation
for i := 0; i < 10; i++ {
go func(index int) {
analyzer2 := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer2)
done <- true
}(i)
}
// Wait for all goroutines
for i := 0; i < 10; i++ {
<-done
}
assert.NotNil(t, analyzer)
}
func TestSwapAnalyzerEventTimestamps(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
before := time.Now()
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
after := time.Now()
// Verify analyzer was created within the time window
assert.NotNil(t, analyzer)
assert.True(t, after.After(before) || after.Equal(before))
}
func TestSwapAnalyzerEventBatchProcessing(t *testing.T) {
log := logger.New("info", "text", "")
marketLogger := marketdata.NewMarketDataLogger(log, nil)
profitCalc := profitcalc.NewProfitCalculator(log)
ranker := profitcalc.NewOpportunityRanker(log)
// Test that we can create multiple analyzers (simulating batch processing)
for i := 0; i < 50; i++ {
analyzer := NewSwapAnalyzer(log, marketLogger, profitCalc, ranker)
assert.NotNil(t, analyzer)
}
assert.True(t, true) // If we get here, all analyzers created successfully
}