Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
801 lines
29 KiB
Go
801 lines
29 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
// "crypto/tls" // Temporarily commented out - not used without security manager
|
|
"fmt"
|
|
"math/big"
|
|
"net/url"
|
|
"os"
|
|
"os/signal"
|
|
"strconv"
|
|
"strings"
|
|
"syscall"
|
|
"time"
|
|
|
|
// "github.com/ethereum/go-ethereum/common" // Not used - pool discovery disabled
|
|
"github.com/joho/godotenv"
|
|
"github.com/urfave/cli/v2"
|
|
|
|
"github.com/fraktal/mev-beta/internal/config"
|
|
"github.com/fraktal/mev-beta/internal/logger"
|
|
"github.com/fraktal/mev-beta/internal/monitoring"
|
|
|
|
// "github.com/fraktal/mev-beta/internal/tokens" // Not used - pool discovery disabled
|
|
"github.com/fraktal/mev-beta/pkg/arbitrage"
|
|
"github.com/fraktal/mev-beta/pkg/metrics"
|
|
"github.com/fraktal/mev-beta/pkg/pools"
|
|
"github.com/fraktal/mev-beta/pkg/security"
|
|
pkgtokens "github.com/fraktal/mev-beta/pkg/tokens"
|
|
"github.com/fraktal/mev-beta/pkg/transport"
|
|
)
|
|
|
|
func main() {
|
|
app := &cli.App{
|
|
Name: "mev-bot",
|
|
Usage: "An MEV bot that monitors Arbitrum sequencer for swap opportunities",
|
|
Commands: []*cli.Command{
|
|
{
|
|
Name: "start",
|
|
Usage: "Start the MEV bot",
|
|
Action: func(c *cli.Context) error {
|
|
return startBot()
|
|
},
|
|
},
|
|
{
|
|
Name: "scan",
|
|
Usage: "Scan for potential arbitrage opportunities",
|
|
Action: func(c *cli.Context) error {
|
|
return scanOpportunities()
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
if err := app.Run(os.Args); err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
func startBot() error {
|
|
// Load environment variables based on GO_ENV
|
|
envMode := strings.ToLower(os.Getenv("GO_ENV"))
|
|
if envMode == "" {
|
|
envMode = "development"
|
|
}
|
|
|
|
var envFile string
|
|
if envMode == "development" {
|
|
envFile = ".env"
|
|
} else {
|
|
envFile = fmt.Sprintf(".env.%s", envMode)
|
|
}
|
|
|
|
if _, err := os.Stat(envFile); err == nil {
|
|
if err := godotenv.Load(envFile); err != nil {
|
|
fmt.Printf("Warning: failed to load %s: %v\n", envFile, err)
|
|
} else {
|
|
fmt.Printf("Loaded environment variables from %s\n", envFile)
|
|
}
|
|
} else {
|
|
fmt.Printf("Warning: %s not found; proceeding without mode-specific env overrides\n", envFile)
|
|
}
|
|
|
|
// Load configuration based on GO_ENV
|
|
// SECURITY FIX: Respect GO_ENV to prevent accidental production config loading in development
|
|
var configFile string
|
|
switch envMode {
|
|
case "production":
|
|
configFile = "config/arbitrum_production.yaml"
|
|
if _, err := os.Stat(configFile); err != nil {
|
|
return fmt.Errorf("production config not found: %s (create from arbitrum_production.yaml.template)", configFile)
|
|
}
|
|
case "staging":
|
|
configFile = "config/staging.yaml"
|
|
if _, err := os.Stat(configFile); err != nil {
|
|
return fmt.Errorf("staging config not found: %s", configFile)
|
|
}
|
|
default: // development
|
|
// In development, prefer local.yaml > config.yaml
|
|
if _, err := os.Stat("config/local.yaml"); err == nil {
|
|
configFile = "config/local.yaml"
|
|
} else {
|
|
configFile = "config/config.yaml"
|
|
}
|
|
}
|
|
|
|
fmt.Printf("Using configuration: %s (GO_ENV=%s)\n", configFile, envMode)
|
|
fmt.Printf("DEBUG: [1/20] About to load config file...\n")
|
|
|
|
cfg, err := config.Load(configFile)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to load config: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [2/20] ✅ Config loaded successfully\n")
|
|
|
|
// Initialize logger
|
|
fmt.Printf("DEBUG: [3/20] Initializing logger...\n")
|
|
log := logger.New(cfg.Log.Level, cfg.Log.Format, cfg.Log.File)
|
|
fmt.Printf("DEBUG: [4/20] ✅ Logger initialized\n")
|
|
log.Info(fmt.Sprintf("Starting MEV bot with Enhanced Security - Config: %s", configFile))
|
|
|
|
// Validate RPC endpoints for security
|
|
fmt.Printf("DEBUG: [5/20] Validating RPC endpoints...\n")
|
|
if err := validateRPCEndpoint(cfg.Arbitrum.RPCEndpoint); err != nil {
|
|
return fmt.Errorf("RPC endpoint validation failed: %w", err)
|
|
}
|
|
if cfg.Arbitrum.WSEndpoint != "" {
|
|
if err := validateRPCEndpoint(cfg.Arbitrum.WSEndpoint); err != nil {
|
|
return fmt.Errorf("WebSocket endpoint validation failed: %w", err)
|
|
}
|
|
}
|
|
fmt.Printf("DEBUG: [6/20] ✅ RPC endpoints validated\n")
|
|
|
|
log.Debug(fmt.Sprintf("RPC Endpoint: %s", cfg.Arbitrum.RPCEndpoint))
|
|
log.Debug(fmt.Sprintf("WS Endpoint: %s", cfg.Arbitrum.WSEndpoint))
|
|
log.Debug(fmt.Sprintf("Chain ID: %d", cfg.Arbitrum.ChainID))
|
|
|
|
// Initialize comprehensive security framework
|
|
// Check if security manager should be enabled via environment variable
|
|
var securityManager *security.SecurityManager
|
|
if os.Getenv("SECURITY_MANAGER_ENABLED") == "true" || envMode == "production" {
|
|
log.Info("🔒 Initializing security manager...")
|
|
securityKeyDir := getEnvOrDefault("MEV_BOT_KEYSTORE_PATH", "keystore")
|
|
securityConfig := &security.SecurityConfig{
|
|
KeyStoreDir: securityKeyDir,
|
|
EncryptionEnabled: true,
|
|
TransactionRPS: 100,
|
|
RPCRPS: 200,
|
|
MaxBurstSize: 50,
|
|
FailureThreshold: 5,
|
|
RecoveryTimeout: 5 * time.Minute,
|
|
// TLSMinVersion: tls.VersionTLS12, // TLS 1.2 minimum - commented out to avoid import
|
|
EmergencyStopFile: "emergency.stop",
|
|
MaxGasPrice: "50000000000", // 50 gwei
|
|
AlertWebhookURL: os.Getenv("SECURITY_WEBHOOK_URL"),
|
|
LogLevel: cfg.Log.Level,
|
|
RPCURL: cfg.Arbitrum.RPCEndpoint,
|
|
}
|
|
|
|
var err error
|
|
securityManager, err = security.NewSecurityManager(securityConfig)
|
|
if err != nil {
|
|
log.Warn(fmt.Sprintf("Failed to initialize security manager: %v (continuing without security)", err))
|
|
securityManager = nil
|
|
} else {
|
|
defer func() {
|
|
shutdownCtx, cancelShutdown := context.WithTimeout(context.Background(), 15*time.Second)
|
|
defer cancelShutdown()
|
|
if err := securityManager.Shutdown(shutdownCtx); err != nil {
|
|
log.Error("Failed to shutdown security manager", "error", err)
|
|
}
|
|
}()
|
|
log.Info("✅ Security framework initialized successfully")
|
|
}
|
|
} else {
|
|
log.Warn("⚠️ Security manager DISABLED (set SECURITY_MANAGER_ENABLED=true to enable)")
|
|
}
|
|
|
|
// Initialize metrics collector
|
|
fmt.Printf("DEBUG: [7/20] Initializing metrics collector...\n")
|
|
metricsCollector := metrics.NewMetricsCollector(log)
|
|
fmt.Printf("DEBUG: [8/20] ✅ Metrics collector initialized\n")
|
|
|
|
// Start metrics server if enabled
|
|
var metricsServer *metrics.MetricsServer
|
|
if os.Getenv("METRICS_ENABLED") == "true" {
|
|
fmt.Printf("DEBUG: Starting metrics server...\n")
|
|
metricsPort := os.Getenv("METRICS_PORT")
|
|
if metricsPort == "" {
|
|
metricsPort = "9090"
|
|
}
|
|
metricsServer = metrics.NewMetricsServer(metricsCollector, log, metricsPort)
|
|
go func() {
|
|
if err := metricsServer.Start(); err != nil {
|
|
log.Error("Metrics server error: ", err)
|
|
}
|
|
}()
|
|
log.Info(fmt.Sprintf("Metrics server started on port %s", metricsPort))
|
|
}
|
|
|
|
// Initialize unified provider manager
|
|
fmt.Printf("DEBUG: [9/20] Initializing provider manager...\n")
|
|
log.Info("Initializing provider manager with separate read-only, execution, and testing pools...")
|
|
|
|
// Use existing providers.yaml config file for runtime
|
|
// SECURITY NOTE: providers.yaml should not be committed with actual credentials
|
|
// Use providers.yaml.template as reference and create providers.yaml locally
|
|
providerConfigPath := "config/providers.yaml"
|
|
if _, err := os.Stat(providerConfigPath); err != nil {
|
|
return fmt.Errorf("providers config not found: %s (create from providers.yaml.template)", providerConfigPath)
|
|
}
|
|
|
|
providerManager, err := transport.NewUnifiedProviderManager(providerConfigPath)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to initialize provider manager: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [10/20] ✅ Provider manager initialized\n")
|
|
defer func() {
|
|
if err := providerManager.Close(); err != nil {
|
|
log.Error("Failed to close provider manager", "error", err)
|
|
}
|
|
}()
|
|
|
|
// Get execution client for transaction operations
|
|
fmt.Printf("DEBUG: [11/20] Getting execution client...\n")
|
|
executionClient, err := providerManager.GetExecutionHTTPClient()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get execution client: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [12/20] ✅ Execution client obtained\n")
|
|
|
|
// Log provider statistics
|
|
providerStats := providerManager.GetAllStats()
|
|
log.Info(fmt.Sprintf("Provider manager initialized with %d pool(s)", len(providerStats)-1)) // -1 for summary
|
|
|
|
// Create key manager for secure transaction signing
|
|
fmt.Printf("DEBUG: [13/20] Creating key manager...\n")
|
|
encryptionKey := os.Getenv("MEV_BOT_ENCRYPTION_KEY")
|
|
if encryptionKey == "" {
|
|
return fmt.Errorf("MEV_BOT_ENCRYPTION_KEY environment variable is required for secure operations")
|
|
}
|
|
|
|
keystorePath := getEnvOrDefault("MEV_BOT_KEYSTORE_PATH", "keystore")
|
|
if strings.TrimSpace(keystorePath) == "" {
|
|
keystorePath = "keystore"
|
|
}
|
|
log.Info(fmt.Sprintf("Using keystore path: %s", keystorePath))
|
|
|
|
keyManagerConfig := &security.KeyManagerConfig{
|
|
KeystorePath: keystorePath,
|
|
EncryptionKey: encryptionKey,
|
|
KeyRotationDays: 30,
|
|
MaxSigningRate: 100,
|
|
SessionTimeout: time.Hour,
|
|
AuditLogPath: getEnvOrDefault("MEV_BOT_AUDIT_LOG", "logs/audit.log"),
|
|
BackupPath: getEnvOrDefault("MEV_BOT_BACKUP_PATH", "backups"),
|
|
}
|
|
keyManager, err := security.NewKeyManager(keyManagerConfig, log)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create key manager: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [14/20] ✅ Key manager created\n")
|
|
|
|
// Setup graceful shutdown BEFORE creating services
|
|
fmt.Printf("DEBUG: [15/20] Setting up context and shutdown handlers...\n")
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel() // Ensure context is canceled on function exit
|
|
|
|
// Get read-only provider pool for RPC operations
|
|
fmt.Printf("DEBUG: [16/20] Getting read-only provider pool...\n")
|
|
readOnlyPool, err := providerManager.GetPoolForMode(transport.ModeReadOnly)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get read-only provider pool: %w", err)
|
|
}
|
|
|
|
// Get RPC client for pool discovery
|
|
fmt.Printf("DEBUG: [17/20] Getting RPC client for pool discovery...\n")
|
|
rpcClient, err := readOnlyPool.GetRPCClient(false) // Use HTTP for reliability
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get RPC client for pool discovery: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [18/20] ✅ RPC client obtained\n")
|
|
|
|
// Initialize Pool Discovery System BEFORE arbitrage check
|
|
fmt.Printf("DEBUG: [19/20] Initializing pool discovery system...\n")
|
|
log.Info("Initializing pool discovery system...")
|
|
poolDiscovery := pools.NewPoolDiscovery(rpcClient, log)
|
|
poolCount := poolDiscovery.GetPoolCount()
|
|
log.Info(fmt.Sprintf("✅ Loaded %d pools from cache", poolCount))
|
|
fmt.Printf("DEBUG: [20/20] ✅ Pool discovery initialized (loaded %d pools)\n", poolCount)
|
|
|
|
// 🚀 ACTIVE POOL DISCOVERY: DISABLED during startup to prevent hang
|
|
// CRITICAL FIX: The comprehensive pool discovery loop makes 190 RPC calls
|
|
// Some calls to DiscoverPoolsForTokenPair() hang/timeout (especially WETH/GRT pair 0-9)
|
|
// This blocks bot startup for 5+ minutes, preventing operational use
|
|
//
|
|
// SOLUTION: Skip discovery loop during startup - we already have 314 pools from cache
|
|
// Pool discovery can be run as a background task AFTER bot starts
|
|
//
|
|
// Evidence from logs:
|
|
// - Bot hangs at pair 0-9 (WETH/GRT) consistently
|
|
// - Bot was working with 330 pools at 06:02 UTC (no discovery loop blocking)
|
|
// - 314 pools already loaded from cache is sufficient for initial operation
|
|
|
|
fmt.Printf("DEBUG: [21/25] Skipping comprehensive pool discovery (prevents startup hang)\n")
|
|
fmt.Printf("DEBUG: [22/25] About to call first log.Info...\n")
|
|
log.Info("⚠️ SKIPPED: Comprehensive pool discovery loop (prevents 5min startup hang)")
|
|
fmt.Printf("DEBUG: [23/25] ✅ First log.Info completed\n")
|
|
fmt.Printf("DEBUG: [24/25] About to call poolDiscovery.GetPoolCount()...\n")
|
|
poolCount2 := poolDiscovery.GetPoolCount()
|
|
fmt.Printf("DEBUG: [25/25] ✅ GetPoolCount returned: %d\n", poolCount2)
|
|
log.Info(fmt.Sprintf("📊 Using cached pools only - %d pools loaded from data/pools.json", poolCount2))
|
|
fmt.Printf("DEBUG: [26/30] ✅ Second log.Info completed\n")
|
|
log.Info("💡 TIP: Run pool discovery as background task after bot starts")
|
|
fmt.Printf("DEBUG: [27/30] ✅ Third log.Info completed\n")
|
|
|
|
// Variables kept for future use when pool discovery is re-enabled
|
|
_ = poolCount2 // totalPools - unused but kept for later
|
|
_ = 0 // discoveredPools - unused
|
|
_ = 0 // discoveredPairs - unused
|
|
fmt.Printf("DEBUG: [28/30] ✅ Pool discovery section complete\n")
|
|
|
|
// Pool discovery loop DISABLED - uncomment below to re-enable (causes 5min+ startup hang)
|
|
/*
|
|
fmt.Printf("DEBUG: [21/25] About to start pool discovery...\n")
|
|
log.Info("🔍 Starting comprehensive pool discovery for TOP 20 tokens (190 pairs expected)...")
|
|
fmt.Printf("DEBUG: [22/25] Pool discovery log message sent\n")
|
|
discoveredPools := 0
|
|
discoveredPairs := 0
|
|
|
|
// Get all token addresses from configuration
|
|
fmt.Printf("DEBUG: [23/25] Getting Arbitrum tokens...\n")
|
|
arbTokens := tokens.GetArbitrumTokens()
|
|
fmt.Printf("DEBUG: [24/25] ✅ Got Arbitrum tokens\n")
|
|
|
|
// Build comprehensive token list - TOP 20 TOKENS
|
|
fmt.Printf("DEBUG: [25/30] Building token list...\n")
|
|
tokenList := []struct {
|
|
name string
|
|
address common.Address
|
|
}{
|
|
// Tier 1 - Major Assets (10)
|
|
{"WETH", arbTokens.WETH},
|
|
{"USDC", arbTokens.USDC},
|
|
{"USDT", arbTokens.USDT},
|
|
{"ARB", arbTokens.ARB},
|
|
{"WBTC", arbTokens.WBTC},
|
|
{"DAI", arbTokens.DAI},
|
|
{"LINK", arbTokens.LINK},
|
|
{"UNI", arbTokens.UNI},
|
|
{"GMX", arbTokens.GMX},
|
|
{"GRT", arbTokens.GRT},
|
|
|
|
// Tier 2 - DeFi Blue Chips (5)
|
|
{"USDC.e", arbTokens.USDCe},
|
|
{"PENDLE", arbTokens.PENDLE},
|
|
{"RDNT", arbTokens.RDNT},
|
|
{"MAGIC", arbTokens.MAGIC},
|
|
{"GRAIL", arbTokens.GRAIL},
|
|
|
|
// Tier 3 - Additional High Volume (5)
|
|
{"AAVE", arbTokens.AAVE},
|
|
{"CRV", arbTokens.CRV},
|
|
{"BAL", arbTokens.BAL},
|
|
{"COMP", arbTokens.COMP},
|
|
{"MKR", arbTokens.MKR},
|
|
}
|
|
fmt.Printf("DEBUG: [26/30] ✅ Token list built (%d tokens)\n", len(tokenList))
|
|
|
|
// Discover pools for all token pairs
|
|
fmt.Printf("DEBUG: [27/30] Creating discovery context with 5min timeout...\n")
|
|
discoveryCtx, discoveryCancel := context.WithTimeout(ctx, 5*time.Minute)
|
|
defer discoveryCancel()
|
|
fmt.Printf("DEBUG: [28/30] ✅ Discovery context created\n")
|
|
|
|
fmt.Printf("DEBUG: [29/30] Starting nested loop for %d token pairs...\n", (len(tokenList)*(len(tokenList)-1))/2)
|
|
for i := 0; i < len(tokenList); i++ {
|
|
for j := i + 1; j < len(tokenList); j++ {
|
|
token0 := tokenList[i]
|
|
token1 := tokenList[j]
|
|
fmt.Printf("DEBUG: [LOOP] Discovering pools for %s/%s (pair %d-%d)...\n", token0.name, token1.name, i, j)
|
|
|
|
// Discover pools for this token pair
|
|
pools, err := poolDiscovery.DiscoverPoolsForTokenPair(token0.address, token1.address)
|
|
if err != nil {
|
|
log.Debug(fmt.Sprintf("No pools found for %s/%s: %v", token0.name, token1.name, err))
|
|
continue
|
|
}
|
|
|
|
if len(pools) > 0 {
|
|
discoveredPools += len(pools)
|
|
discoveredPairs++
|
|
log.Info(fmt.Sprintf("✅ Found %d pool(s) for %s/%s", len(pools), token0.name, token1.name))
|
|
}
|
|
|
|
// Check context to allow early termination if needed
|
|
select {
|
|
case <-discoveryCtx.Done():
|
|
log.Warn("Pool discovery interrupted by context cancellation")
|
|
goto discoveryComplete
|
|
default:
|
|
// Continue discovery
|
|
}
|
|
}
|
|
}
|
|
|
|
discoveryComplete:
|
|
totalPools := poolDiscovery.GetPoolCount()
|
|
log.Info(fmt.Sprintf("🎉 Pool discovery complete! Monitoring %d pools across %d pairs", totalPools, discoveredPairs))
|
|
log.Info(fmt.Sprintf("📊 Discovery summary: %d new pools discovered, %d pairs active", discoveredPools, discoveredPairs))
|
|
|
|
// 🔧 FIX #1: Save discovered pools to disk cache
|
|
log.Info("💾 Saving pool cache to disk...")
|
|
poolDiscovery.SavePoolCache()
|
|
log.Info("✅ Pool cache saved successfully to data/pools.json")
|
|
*/
|
|
|
|
// Create arbitrage database
|
|
fmt.Printf("DEBUG: [29/35] Creating arbitrage database...\n")
|
|
arbitrageDB, err := arbitrage.NewSQLiteDatabase(cfg.Database.File, log)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create arbitrage database: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [30/35] ✅ Arbitrage database created\n")
|
|
defer func() {
|
|
if err := arbitrageDB.Close(); err != nil {
|
|
log.Error("Failed to close arbitrage database", "error", err)
|
|
}
|
|
}()
|
|
|
|
// Check if arbitrage service is enabled
|
|
fmt.Printf("DEBUG: [31/35] Checking if arbitrage service is enabled...\n")
|
|
if !cfg.Arbitrage.Enabled {
|
|
log.Info("Arbitrage service is disabled in configuration")
|
|
return fmt.Errorf("arbitrage service disabled - enable in config to run")
|
|
}
|
|
fmt.Printf("DEBUG: [32/35] ✅ Arbitrage service is enabled\n")
|
|
|
|
// Initialize Token Metadata Cache
|
|
fmt.Printf("DEBUG: [33/35] Initializing token metadata cache...\n")
|
|
log.Info("Initializing token metadata cache...")
|
|
tokenCache := pkgtokens.NewMetadataCache(log)
|
|
fmt.Printf("DEBUG: [34/35] ✅ Token metadata cache initialized\n")
|
|
|
|
// CRITICAL FIX #4: Populate token cache with all 20 known Arbitrum tokens
|
|
// This ensures the detection engine has pricing data for all major tokens
|
|
// Previously only 6 tokens were loaded, preventing pair creation
|
|
fmt.Printf("DEBUG: [34.5/35] Populating token cache with 20 known tokens...\n")
|
|
tokenCache.PopulateWithKnownTokens()
|
|
fmt.Printf("DEBUG: [34.7/35] ✅ Token cache populated\n")
|
|
|
|
fmt.Printf("DEBUG: [35/45] Calling tokenCache.Count()...\n")
|
|
tokenCount := tokenCache.Count()
|
|
fmt.Printf("DEBUG: [36/45] ✅ tokenCache.Count() returned: %d\n", tokenCount)
|
|
log.Info(fmt.Sprintf("✅ Loaded %d tokens from cache (including 20 known Arbitrum tokens)", tokenCount))
|
|
fmt.Printf("DEBUG: [37/45] ✅ Token count logged\n")
|
|
|
|
// Create arbitrage service with context and pool discovery
|
|
fmt.Printf("DEBUG: [38/45] About to log 'Creating arbitrage service'...\n")
|
|
log.Info("Creating arbitrage service...")
|
|
fmt.Printf("DEBUG: [39/45] About to call arbitrage.NewArbitrageServiceWithFullConfig()...\n")
|
|
// PHASE 1: Pass full config for L2 optimizations
|
|
arbitrageService, err := arbitrage.NewArbitrageServiceWithFullConfig(
|
|
ctx,
|
|
executionClient,
|
|
log,
|
|
cfg, // Full config for L2 optimizations
|
|
&cfg.Arbitrage, // Legacy arbitrage config
|
|
keyManager,
|
|
arbitrageDB,
|
|
poolDiscovery,
|
|
tokenCache,
|
|
)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create arbitrage service: %w", err)
|
|
}
|
|
fmt.Printf("DEBUG: [40/45] ✅ Arbitrage service created successfully\n")
|
|
|
|
// Initialize data integrity monitoring system
|
|
fmt.Printf("DEBUG: [41/45] Initializing integrity monitor...\n")
|
|
log.Info("Initializing data integrity monitoring system...")
|
|
integrityMonitor := monitoring.NewIntegrityMonitor(log)
|
|
fmt.Printf("DEBUG: [42/45] ✅ Integrity monitor initialized\n")
|
|
|
|
// Initialize dashboard server
|
|
fmt.Printf("DEBUG: [43/45] Setting up dashboard server...\n")
|
|
dashboardPort := 60376
|
|
if portEnv := os.Getenv("DASHBOARD_PORT"); portEnv != "" {
|
|
if port, err := strconv.Atoi(portEnv); err == nil {
|
|
dashboardPort = port
|
|
}
|
|
}
|
|
fmt.Printf("DEBUG: [44/45] Creating dashboard server on port %d...\n", dashboardPort)
|
|
dashboardServer := monitoring.NewDashboardServer(log, integrityMonitor, integrityMonitor.GetHealthCheckRunner(), dashboardPort)
|
|
fmt.Printf("DEBUG: [45/45] ✅ Dashboard server created\n")
|
|
|
|
// Start dashboard server
|
|
fmt.Printf("DEBUG: [46/50] Starting dashboard server goroutine...\n")
|
|
go func() {
|
|
// TEMPORARY FIX: Skip log.Info inside goroutine - may be causing deadlock
|
|
// log.Info(fmt.Sprintf("Starting monitoring dashboard on port %d...", dashboardPort))
|
|
fmt.Printf("DEBUG: [GOROUTINE] Starting dashboard server on port %d...\n", dashboardPort)
|
|
if err := dashboardServer.Start(); err != nil {
|
|
fmt.Printf("DEBUG: [GOROUTINE] Dashboard server error: %v\n", err)
|
|
// log.Error("Dashboard server error", "error", err)
|
|
}
|
|
}()
|
|
fmt.Printf("DEBUG: [47/50] ✅ Dashboard goroutine started\n")
|
|
|
|
// Start integrity monitoring
|
|
fmt.Printf("DEBUG: [48/50] Starting integrity monitor goroutine...\n")
|
|
go func() {
|
|
// TEMPORARY FIX: Skip log.Info inside goroutine - may be causing deadlock
|
|
// log.Info("Starting integrity monitoring...")
|
|
fmt.Printf("DEBUG: [GOROUTINE] Starting integrity monitoring...\n")
|
|
integrityMonitor.StartHealthCheckRunner(ctx)
|
|
}()
|
|
fmt.Printf("DEBUG: [49/50] ✅ Integrity monitor goroutine started\n")
|
|
|
|
fmt.Printf("DEBUG: [50/50] About to log monitoring system messages...\n")
|
|
log.Info("Data integrity monitoring system initialized successfully")
|
|
fmt.Printf("DEBUG: [51/55] ✅ First monitoring log completed\n")
|
|
log.Info(fmt.Sprintf("Dashboard available at http://localhost:%d", dashboardPort))
|
|
fmt.Printf("DEBUG: [52/55] ✅ Second monitoring log completed\n")
|
|
|
|
fmt.Printf("DEBUG: [53/60] Setting up signal handlers...\n")
|
|
sigChan := make(chan os.Signal, 1)
|
|
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
// Handle signals in a goroutine to cancel context immediately
|
|
go func() {
|
|
<-sigChan
|
|
fmt.Printf("DEBUG: [SIGNAL] Shutdown signal received\n")
|
|
log.Info("Shutdown signal received, canceling context...")
|
|
cancel() // This will cancel the context and stop all operations
|
|
}()
|
|
fmt.Printf("DEBUG: [54/60] ✅ Signal handlers ready\n")
|
|
|
|
// Start the arbitrage service with context
|
|
fmt.Printf("DEBUG: [55/60] About to start arbitrage service...\n")
|
|
log.Info("Starting arbitrage service...")
|
|
fmt.Printf("DEBUG: [56/60] ✅ Log message completed\n")
|
|
errChan := make(chan error, 1)
|
|
fmt.Printf("DEBUG: [57/60] Starting arbitrage service goroutine...\n")
|
|
go func() {
|
|
fmt.Printf("DEBUG: [GOROUTINE] Calling arbitrageService.Start()...\n")
|
|
if err := arbitrageService.Start(); err != nil {
|
|
errChan <- fmt.Errorf("arbitrage service error: %w", err)
|
|
}
|
|
}()
|
|
defer func() {
|
|
if err := arbitrageService.Stop(); err != nil {
|
|
log.Error("Failed to stop arbitrage service", "error", err)
|
|
}
|
|
}()
|
|
fmt.Printf("DEBUG: [58/60] ✅ Arbitrage service goroutine started\n")
|
|
log.Info("Arbitrage service started successfully")
|
|
fmt.Printf("DEBUG: [59/60] ✅ Arbitrage service confirmed started\n")
|
|
|
|
log.Info("MEV bot started successfully - monitoring for arbitrage opportunities...")
|
|
log.Info("Press Ctrl+C to stop the bot gracefully...")
|
|
fmt.Printf("DEBUG: [60/60] ✅✅✅ BOT FULLY STARTED - Entering main loop ✅✅✅\n")
|
|
|
|
// Wait for context cancellation or error
|
|
select {
|
|
case <-ctx.Done():
|
|
log.Info("Context canceled, stopping MEV bot...")
|
|
case err := <-errChan:
|
|
log.Error("Service error occurred: ", err)
|
|
return err
|
|
}
|
|
|
|
// Stop monitoring services
|
|
log.Info("Stopping monitoring services...")
|
|
if err := dashboardServer.Stop(); err != nil {
|
|
log.Error("Failed to stop dashboard server gracefully", "error", err)
|
|
}
|
|
integrityMonitor.StopHealthCheckRunner()
|
|
|
|
// Stop metrics server if running
|
|
if metricsServer != nil {
|
|
if err := metricsServer.Stop(); err != nil {
|
|
log.Error("Failed to stop metrics server gracefully", "error", err)
|
|
}
|
|
}
|
|
|
|
// Get final stats
|
|
stats := arbitrageService.GetStats()
|
|
log.Info(fmt.Sprintf("Final Statistics - Opportunities: %d, Executions: %d, Successful: %d, Total Profit: %s ETH",
|
|
stats.TotalOpportunitiesDetected,
|
|
stats.TotalOpportunitiesExecuted,
|
|
stats.TotalSuccessfulExecutions,
|
|
formatEther(stats.TotalProfitRealized)))
|
|
|
|
log.Info("MEV bot stopped gracefully")
|
|
return nil
|
|
}
|
|
|
|
// formatEther formats wei amount to ether string
|
|
func formatEther(wei *big.Int) string {
|
|
if wei == nil {
|
|
return "0.000000"
|
|
}
|
|
eth := new(big.Float).SetInt(wei)
|
|
eth.Quo(eth, big.NewFloat(1e18))
|
|
return fmt.Sprintf("%.6f", eth)
|
|
}
|
|
|
|
// getEnvOrDefault returns environment variable value or default if not set
|
|
func getEnvOrDefault(key, defaultValue string) string {
|
|
if value := os.Getenv(key); value != "" {
|
|
return value
|
|
}
|
|
return defaultValue
|
|
}
|
|
|
|
// validateRPCEndpoint validates RPC endpoint URL for security
|
|
func validateRPCEndpoint(endpoint string) error {
|
|
if endpoint == "" {
|
|
return fmt.Errorf("RPC endpoint cannot be empty")
|
|
}
|
|
|
|
u, err := url.Parse(endpoint)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid RPC endpoint URL: %w", err)
|
|
}
|
|
|
|
// Check for valid schemes
|
|
switch u.Scheme {
|
|
case "http", "https", "ws", "wss":
|
|
// Valid schemes
|
|
default:
|
|
return fmt.Errorf("invalid RPC scheme: %s (must be http, https, ws, or wss)", u.Scheme)
|
|
}
|
|
|
|
// Check for localhost/private networks in production
|
|
if strings.Contains(u.Hostname(), "localhost") || strings.Contains(u.Hostname(), "127.0.0.1") {
|
|
// Allow localhost only if explicitly enabled
|
|
if os.Getenv("MEV_BOT_ALLOW_LOCALHOST") != "true" {
|
|
return fmt.Errorf("localhost RPC endpoints not allowed in production (set MEV_BOT_ALLOW_LOCALHOST=true to override)")
|
|
}
|
|
}
|
|
|
|
// Validate hostname is not empty
|
|
if u.Hostname() == "" {
|
|
return fmt.Errorf("RPC endpoint must have a valid hostname")
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func scanOpportunities() error {
|
|
fmt.Println("Scanning for arbitrage opportunities...")
|
|
|
|
// Load configuration (scan mode uses same config loading as startBot)
|
|
envMode := strings.ToLower(os.Getenv("GO_ENV"))
|
|
if envMode == "" {
|
|
envMode = "development"
|
|
}
|
|
|
|
var configFile string
|
|
switch envMode {
|
|
case "production":
|
|
configFile = "config/arbitrum_production.yaml"
|
|
case "staging":
|
|
configFile = "config/staging.yaml"
|
|
default:
|
|
if _, err := os.Stat("config/local.yaml"); err == nil {
|
|
configFile = "config/local.yaml"
|
|
} else {
|
|
configFile = "config/config.yaml"
|
|
}
|
|
}
|
|
|
|
cfg, err := config.Load(configFile)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to load config: %w", err)
|
|
}
|
|
|
|
// Initialize logger
|
|
log := logger.New(cfg.Log.Level, cfg.Log.Format, cfg.Log.File)
|
|
log.Info(fmt.Sprintf("Starting one-time arbitrage opportunity scan (config: %s)...", configFile))
|
|
|
|
// Initialize provider manager for scanning
|
|
providerManager, err := transport.NewUnifiedProviderManager("config/providers.yaml")
|
|
if err != nil {
|
|
return fmt.Errorf("failed to initialize provider manager: %w", err)
|
|
}
|
|
defer func() {
|
|
if err := providerManager.Close(); err != nil {
|
|
log.Error("Failed to close provider manager in scan mode", "error", err)
|
|
}
|
|
}()
|
|
|
|
// Get read-only client for scanning (more efficient)
|
|
client, err := providerManager.GetReadOnlyHTTPClient()
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get read-only client: %w", err)
|
|
}
|
|
|
|
// Create key manager (not used for scanning but needed for service)
|
|
encryptionKey := os.Getenv("MEV_BOT_ENCRYPTION_KEY")
|
|
if encryptionKey == "" {
|
|
return fmt.Errorf("MEV_BOT_ENCRYPTION_KEY environment variable is required")
|
|
}
|
|
|
|
keyManagerConfig := &security.KeyManagerConfig{
|
|
KeystorePath: getEnvOrDefault("MEV_BOT_KEYSTORE_PATH", "keystore"),
|
|
EncryptionKey: encryptionKey,
|
|
KeyRotationDays: 30,
|
|
MaxSigningRate: 100,
|
|
SessionTimeout: time.Hour,
|
|
AuditLogPath: getEnvOrDefault("MEV_BOT_AUDIT_LOG", "logs/audit.log"),
|
|
BackupPath: getEnvOrDefault("MEV_BOT_BACKUP_PATH", "backups"),
|
|
}
|
|
keyManager, err := security.NewKeyManager(keyManagerConfig, log)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create key manager: %w", err)
|
|
}
|
|
|
|
// Create arbitrage database
|
|
arbitrageDB, err := arbitrage.NewSQLiteDatabase(cfg.Database.File, log)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create arbitrage database: %w", err)
|
|
}
|
|
defer func() {
|
|
if err := arbitrageDB.Close(); err != nil {
|
|
log.Error("Failed to close arbitrage database in scan mode", "error", err)
|
|
}
|
|
}()
|
|
|
|
// Get read-only provider pool for RPC operations in scan mode
|
|
readOnlyPool, err := providerManager.GetPoolForMode(transport.ModeReadOnly)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get read-only provider pool in scan mode: %w", err)
|
|
}
|
|
|
|
// Get RPC client for pool discovery in scan mode
|
|
rpcClient, err := readOnlyPool.GetRPCClient(false)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to get RPC client for pool discovery in scan mode: %w", err)
|
|
}
|
|
|
|
// Initialize pool discovery and token cache for scan mode
|
|
poolDiscovery := pools.NewPoolDiscovery(rpcClient, log)
|
|
tokenCache := pkgtokens.NewMetadataCache(log)
|
|
|
|
// Create arbitrage service with scanning enabled but execution disabled
|
|
scanConfig := cfg.Arbitrage
|
|
scanConfig.MaxConcurrentExecutions = 0 // Disable execution for scan mode
|
|
|
|
arbitrageService, err := arbitrage.NewArbitrageService(
|
|
context.Background(),
|
|
client,
|
|
log,
|
|
&scanConfig,
|
|
keyManager,
|
|
arbitrageDB,
|
|
poolDiscovery,
|
|
tokenCache,
|
|
)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create arbitrage service: %w", err)
|
|
}
|
|
|
|
// Start the service in scan mode
|
|
if err := arbitrageService.Start(); err != nil {
|
|
return fmt.Errorf("failed to start arbitrage service: %w", err)
|
|
}
|
|
defer func() {
|
|
if err := arbitrageService.Stop(); err != nil {
|
|
log.Error("Failed to stop arbitrage service in scan mode", "error", err)
|
|
}
|
|
}()
|
|
|
|
// Create context with timeout for scanning
|
|
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
|
defer cancel()
|
|
|
|
log.Info("Scanning for 30 seconds...")
|
|
|
|
// Wait for scan duration
|
|
<-ctx.Done()
|
|
|
|
// Get and display results
|
|
stats := arbitrageService.GetStats()
|
|
log.Info(fmt.Sprintf("Scan Results - Opportunities Detected: %d", stats.TotalOpportunitiesDetected))
|
|
|
|
// Get recent opportunities from database
|
|
history, err := arbitrageDB.GetExecutionHistory(ctx, 10)
|
|
if err == nil && len(history) > 0 {
|
|
log.Info(fmt.Sprintf("Found %d recent opportunities in database", len(history)))
|
|
}
|
|
|
|
fmt.Printf("Scan completed. Found %d arbitrage opportunities.\n", stats.TotalOpportunitiesDetected)
|
|
fmt.Println("Check the database and logs for detailed results.")
|
|
|
|
return nil
|
|
}
|