feat: create v2-prep branch with comprehensive planning
Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
879
orig/internal/config/config.go
Normal file
879
orig/internal/config/config.go
Normal file
@@ -0,0 +1,879 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Config represents the application configuration
|
||||
type Config struct {
|
||||
Arbitrum ArbitrumConfig `yaml:"arbitrum"`
|
||||
Bot BotConfig `yaml:"bot"`
|
||||
Uniswap UniswapConfig `yaml:"uniswap"`
|
||||
Log LogConfig `yaml:"log"`
|
||||
Database DatabaseConfig `yaml:"database"`
|
||||
Ethereum EthereumConfig `yaml:"ethereum"`
|
||||
Contracts ContractsConfig `yaml:"contracts"`
|
||||
Arbitrage ArbitrageConfig `yaml:"arbitrage"`
|
||||
Features Features `yaml:"features"`
|
||||
ArbitrageOptimized ArbitrageOptimizedConfig `yaml:"arbitrage_optimized"`
|
||||
}
|
||||
|
||||
// ArbitrumConfig represents the Arbitrum node configuration
|
||||
type ArbitrumConfig struct {
|
||||
// Chain ID for Arbitrum (42161 for mainnet)
|
||||
ChainID int64 `yaml:"chain_id"`
|
||||
|
||||
// Reading endpoints (WSS preferred for real-time monitoring)
|
||||
ReadingEndpoints []EndpointConfig `yaml:"reading_endpoints"`
|
||||
|
||||
// Execution endpoints (HTTP/HTTPS or WSS for transaction submission)
|
||||
ExecutionEndpoints []EndpointConfig `yaml:"execution_endpoints"`
|
||||
|
||||
// Fallback endpoints for failover scenarios
|
||||
FallbackEndpoints []EndpointConfig `yaml:"fallback_endpoints"`
|
||||
|
||||
// Legacy fields for backward compatibility
|
||||
RPCEndpoint string `yaml:"rpc_endpoint,omitempty"`
|
||||
WSEndpoint string `yaml:"ws_endpoint,omitempty"`
|
||||
|
||||
// Global rate limiting configuration
|
||||
RateLimit RateLimitConfig `yaml:"rate_limit"`
|
||||
}
|
||||
|
||||
// EndpointConfig represents an RPC endpoint configuration
|
||||
type EndpointConfig struct {
|
||||
// RPC endpoint URL
|
||||
URL string `yaml:"url"`
|
||||
// Endpoint name for identification
|
||||
Name string `yaml:"name"`
|
||||
// Priority (lower number = higher priority)
|
||||
Priority int `yaml:"priority"`
|
||||
// Maximum requests per second for this endpoint
|
||||
MaxRPS int `yaml:"max_rps"`
|
||||
// Maximum concurrent connections
|
||||
MaxConcurrent int `yaml:"max_concurrent"`
|
||||
// Connection timeout in seconds
|
||||
TimeoutSeconds int `yaml:"timeout_seconds"`
|
||||
// Health check interval in seconds
|
||||
HealthCheckInterval int `yaml:"health_check_interval"`
|
||||
// Rate limiting configuration for this endpoint
|
||||
RateLimit RateLimitConfig `yaml:"rate_limit"`
|
||||
}
|
||||
|
||||
// RateLimitConfig represents rate limiting configuration
|
||||
type RateLimitConfig struct {
|
||||
// Maximum requests per second
|
||||
RequestsPerSecond int `yaml:"requests_per_second"`
|
||||
// Maximum concurrent requests
|
||||
MaxConcurrent int `yaml:"max_concurrent"`
|
||||
// Burst size for rate limiting
|
||||
Burst int `yaml:"burst"`
|
||||
}
|
||||
|
||||
// BotConfig represents the bot configuration
|
||||
type BotConfig struct {
|
||||
// Enable or disable the bot
|
||||
Enabled bool `yaml:"enabled"`
|
||||
// Polling interval in seconds
|
||||
PollingInterval int `yaml:"polling_interval"`
|
||||
// Minimum profit threshold in USD
|
||||
MinProfitThreshold float64 `yaml:"min_profit_threshold"`
|
||||
// Gas price multiplier (for faster transactions)
|
||||
GasPriceMultiplier float64 `yaml:"gas_price_multiplier"`
|
||||
// Maximum number of concurrent workers for processing
|
||||
MaxWorkers int `yaml:"max_workers"`
|
||||
// Buffer size for channels
|
||||
ChannelBufferSize int `yaml:"channel_buffer_size"`
|
||||
// Timeout for RPC calls in seconds
|
||||
RPCTimeout int `yaml:"rpc_timeout"`
|
||||
}
|
||||
|
||||
// UniswapConfig represents the Uniswap configuration
|
||||
type UniswapConfig struct {
|
||||
// Factory contract address
|
||||
FactoryAddress string `yaml:"factory_address"`
|
||||
// Position manager contract address
|
||||
PositionManagerAddress string `yaml:"position_manager_address"`
|
||||
// Supported fee tiers
|
||||
FeeTiers []int64 `yaml:"fee_tiers"`
|
||||
// Cache configuration for pool data
|
||||
Cache CacheConfig `yaml:"cache"`
|
||||
}
|
||||
|
||||
// CacheConfig represents caching configuration
|
||||
type CacheConfig struct {
|
||||
// Enable or disable caching
|
||||
Enabled bool `yaml:"enabled"`
|
||||
// Cache expiration time in seconds
|
||||
Expiration int `yaml:"expiration"`
|
||||
// Maximum cache size
|
||||
MaxSize int `yaml:"max_size"`
|
||||
}
|
||||
|
||||
// LogConfig represents the logging configuration
|
||||
type LogConfig struct {
|
||||
// Log level (debug, info, warn, error)
|
||||
Level string `yaml:"level"`
|
||||
// Log format (json, text)
|
||||
Format string `yaml:"format"`
|
||||
// Log file path (empty for stdout)
|
||||
File string `yaml:"file"`
|
||||
}
|
||||
|
||||
// DatabaseConfig represents the database configuration
|
||||
type DatabaseConfig struct {
|
||||
// Database file path
|
||||
File string `yaml:"file"`
|
||||
// Maximum number of open connections
|
||||
MaxOpenConnections int `yaml:"max_open_connections"`
|
||||
// Maximum number of idle connections
|
||||
MaxIdleConnections int `yaml:"max_idle_connections"`
|
||||
}
|
||||
|
||||
// EthereumConfig represents the Ethereum account configuration
|
||||
type EthereumConfig struct {
|
||||
// Private key for transaction signing
|
||||
PrivateKey string `yaml:"private_key"`
|
||||
// Account address
|
||||
AccountAddress string `yaml:"account_address"`
|
||||
// Gas price multiplier (for faster transactions)
|
||||
GasPriceMultiplier float64 `yaml:"gas_price_multiplier"`
|
||||
}
|
||||
|
||||
// ContractsConfig represents the smart contract addresses
|
||||
type ContractsConfig struct {
|
||||
// Arbitrage executor contract address
|
||||
ArbitrageExecutor string `yaml:"arbitrage_executor"`
|
||||
// Flash swapper contract address
|
||||
FlashSwapper string `yaml:"flash_swapper"`
|
||||
// Flash loan receiver contract address (Balancer flash loans)
|
||||
FlashLoanReceiver string `yaml:"flash_loan_receiver"`
|
||||
// Balancer Vault address for flash loans
|
||||
BalancerVault string `yaml:"balancer_vault"`
|
||||
// Data fetcher contract address for batch pool data fetching
|
||||
DataFetcher string `yaml:"data_fetcher"`
|
||||
// Authorized caller addresses
|
||||
AuthorizedCallers []string `yaml:"authorized_callers"`
|
||||
// Authorized DEX addresses
|
||||
AuthorizedDEXes []string `yaml:"authorized_dexes"`
|
||||
}
|
||||
|
||||
// Load loads the configuration from a file
|
||||
func Load(filename string) (*Config, error) {
|
||||
// Read the config file
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
// Expand environment variables in the raw YAML
|
||||
expandedData := expandEnvVars(string(data))
|
||||
|
||||
// Parse the YAML
|
||||
var config Config
|
||||
if err := yaml.Unmarshal([]byte(expandedData), &config); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config file: %w", err)
|
||||
}
|
||||
|
||||
// Override with environment variables if they exist
|
||||
config.OverrideWithEnv()
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// expandEnvVars expands ${VAR} and $VAR patterns in the given string
|
||||
func expandEnvVars(s string) string {
|
||||
// Pattern to match ${VAR} and $VAR
|
||||
envVarPattern := regexp.MustCompile(`\$\{([^}]+)\}|\$([A-Za-z_][A-Za-z0-9_]*)`)
|
||||
|
||||
return envVarPattern.ReplaceAllStringFunc(s, func(match string) string {
|
||||
var varName string
|
||||
|
||||
// Handle ${VAR} format
|
||||
if strings.HasPrefix(match, "${") && strings.HasSuffix(match, "}") {
|
||||
varName = match[2 : len(match)-1]
|
||||
} else if strings.HasPrefix(match, "$") {
|
||||
// Handle $VAR format
|
||||
varName = match[1:]
|
||||
}
|
||||
|
||||
// Get environment variable value
|
||||
if value := os.Getenv(varName); value != "" {
|
||||
return value
|
||||
}
|
||||
|
||||
// Return empty string if environment variable is not set
|
||||
// This prevents invalid YAML when variables are missing
|
||||
return ""
|
||||
})
|
||||
}
|
||||
|
||||
// OverrideWithEnv overrides configuration with environment variables
|
||||
func (c *Config) OverrideWithEnv() {
|
||||
// Override legacy RPC endpoint (backward compatibility)
|
||||
if rpcEndpoint := os.Getenv("ARBITRUM_RPC_ENDPOINT"); rpcEndpoint != "" {
|
||||
c.Arbitrum.RPCEndpoint = rpcEndpoint
|
||||
// Also add to execution endpoints if not already configured
|
||||
if len(c.Arbitrum.ExecutionEndpoints) == 0 {
|
||||
// Determine RPS based on endpoint type
|
||||
rps := 200
|
||||
if strings.HasPrefix(rpcEndpoint, "ws") {
|
||||
rps = 300
|
||||
}
|
||||
c.Arbitrum.ExecutionEndpoints = append(c.Arbitrum.ExecutionEndpoints, EndpointConfig{
|
||||
URL: rpcEndpoint,
|
||||
Name: "Arbitrum Public HTTP",
|
||||
Priority: 1,
|
||||
MaxRPS: rps,
|
||||
MaxConcurrent: 20,
|
||||
TimeoutSeconds: 30,
|
||||
HealthCheckInterval: 60,
|
||||
RateLimit: RateLimitConfig{
|
||||
RequestsPerSecond: rps,
|
||||
MaxConcurrent: 20,
|
||||
Burst: rps * 2,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Override legacy WebSocket endpoint (backward compatibility)
|
||||
if wsEndpoint := os.Getenv("ARBITRUM_WS_ENDPOINT"); wsEndpoint != "" {
|
||||
c.Arbitrum.WSEndpoint = wsEndpoint
|
||||
// Also add to reading endpoints if not already configured
|
||||
if len(c.Arbitrum.ReadingEndpoints) == 0 {
|
||||
c.Arbitrum.ReadingEndpoints = append(c.Arbitrum.ReadingEndpoints, EndpointConfig{
|
||||
URL: wsEndpoint,
|
||||
Name: "Arbitrum Public WS",
|
||||
Priority: 1,
|
||||
MaxRPS: 300,
|
||||
MaxConcurrent: 25,
|
||||
TimeoutSeconds: 60,
|
||||
HealthCheckInterval: 30,
|
||||
RateLimit: RateLimitConfig{
|
||||
RequestsPerSecond: 300,
|
||||
MaxConcurrent: 25,
|
||||
Burst: 600,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Override reading endpoints from environment
|
||||
if readingEndpoints := os.Getenv("ARBITRUM_READING_ENDPOINTS"); readingEndpoints != "" {
|
||||
c.Arbitrum.ReadingEndpoints = c.parseEndpointsFromEnv(readingEndpoints, "Reading")
|
||||
}
|
||||
|
||||
// Override execution endpoints from environment
|
||||
if executionEndpoints := os.Getenv("ARBITRUM_EXECUTION_ENDPOINTS"); executionEndpoints != "" {
|
||||
c.Arbitrum.ExecutionEndpoints = c.parseEndpointsFromEnv(executionEndpoints, "Execution")
|
||||
}
|
||||
|
||||
// Override fallback endpoints from environment (legacy support)
|
||||
if fallbackEndpoints := os.Getenv("ARBITRUM_FALLBACK_ENDPOINTS"); fallbackEndpoints != "" {
|
||||
// Add to both reading and execution if they're empty
|
||||
fallbackConfigs := c.parseEndpointsFromEnv(fallbackEndpoints, "Fallback")
|
||||
if len(c.Arbitrum.ReadingEndpoints) == 0 {
|
||||
c.Arbitrum.ReadingEndpoints = append(c.Arbitrum.ReadingEndpoints, fallbackConfigs...)
|
||||
}
|
||||
if len(c.Arbitrum.ExecutionEndpoints) == 0 {
|
||||
c.Arbitrum.ExecutionEndpoints = append(c.Arbitrum.ExecutionEndpoints, fallbackConfigs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Override rate limit settings
|
||||
if rps := os.Getenv("RPC_REQUESTS_PER_SECOND"); rps != "" {
|
||||
if val, err := strconv.Atoi(rps); err == nil {
|
||||
c.Arbitrum.RateLimit.RequestsPerSecond = val
|
||||
}
|
||||
}
|
||||
|
||||
if maxConcurrent := os.Getenv("RPC_MAX_CONCURRENT"); maxConcurrent != "" {
|
||||
if val, err := strconv.Atoi(maxConcurrent); err == nil {
|
||||
c.Arbitrum.RateLimit.MaxConcurrent = val
|
||||
}
|
||||
}
|
||||
|
||||
// Override bot settings
|
||||
if maxWorkers := os.Getenv("BOT_MAX_WORKERS"); maxWorkers != "" {
|
||||
if val, err := strconv.Atoi(maxWorkers); err == nil {
|
||||
c.Bot.MaxWorkers = val
|
||||
}
|
||||
}
|
||||
|
||||
if channelBufferSize := os.Getenv("BOT_CHANNEL_BUFFER_SIZE"); channelBufferSize != "" {
|
||||
if val, err := strconv.Atoi(channelBufferSize); err == nil {
|
||||
c.Bot.ChannelBufferSize = val
|
||||
}
|
||||
}
|
||||
|
||||
// Override Ethereum settings
|
||||
if privateKey := os.Getenv("ETHEREUM_PRIVATE_KEY"); privateKey != "" {
|
||||
c.Ethereum.PrivateKey = privateKey
|
||||
}
|
||||
|
||||
if accountAddress := os.Getenv("ETHEREUM_ACCOUNT_ADDRESS"); accountAddress != "" {
|
||||
c.Ethereum.AccountAddress = accountAddress
|
||||
}
|
||||
|
||||
if gasPriceMultiplier := os.Getenv("ETHEREUM_GAS_PRICE_MULTIPLIER"); gasPriceMultiplier != "" {
|
||||
if val, err := strconv.ParseFloat(gasPriceMultiplier, 64); err == nil {
|
||||
c.Ethereum.GasPriceMultiplier = val
|
||||
}
|
||||
}
|
||||
|
||||
// Override contract addresses
|
||||
if arbitrageExecutor := os.Getenv("CONTRACT_ARBITRAGE_EXECUTOR"); arbitrageExecutor != "" {
|
||||
c.Contracts.ArbitrageExecutor = arbitrageExecutor
|
||||
}
|
||||
|
||||
if flashSwapper := os.Getenv("CONTRACT_FLASH_SWAPPER"); flashSwapper != "" {
|
||||
c.Contracts.FlashSwapper = flashSwapper
|
||||
}
|
||||
}
|
||||
|
||||
// parseEndpointsFromEnv parses comma-separated endpoint URLs from environment variable
|
||||
func (c *Config) parseEndpointsFromEnv(endpointsStr, namePrefix string) []EndpointConfig {
|
||||
if endpointsStr == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
urls := strings.Split(endpointsStr, ",")
|
||||
endpoints := make([]EndpointConfig, 0, len(urls))
|
||||
|
||||
for i, url := range urls {
|
||||
url = strings.TrimSpace(url)
|
||||
if url == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine defaults based on URL scheme
|
||||
var maxRPS, maxConcurrent, timeoutSeconds, healthCheckInterval int
|
||||
if strings.HasPrefix(url, "ws") {
|
||||
// WebSocket endpoints - higher rate limits for real-time data
|
||||
maxRPS = 300
|
||||
maxConcurrent = 25
|
||||
timeoutSeconds = 60
|
||||
healthCheckInterval = 30
|
||||
} else {
|
||||
// HTTP endpoints - conservative rate limits
|
||||
maxRPS = 200
|
||||
maxConcurrent = 20
|
||||
timeoutSeconds = 30
|
||||
healthCheckInterval = 60
|
||||
}
|
||||
|
||||
endpoint := EndpointConfig{
|
||||
URL: url,
|
||||
Name: fmt.Sprintf("%s-%d", namePrefix, i+1),
|
||||
Priority: i + 1, // Lower number = higher priority
|
||||
MaxRPS: maxRPS,
|
||||
MaxConcurrent: maxConcurrent,
|
||||
TimeoutSeconds: timeoutSeconds,
|
||||
HealthCheckInterval: healthCheckInterval,
|
||||
RateLimit: RateLimitConfig{
|
||||
RequestsPerSecond: maxRPS,
|
||||
MaxConcurrent: maxConcurrent,
|
||||
Burst: maxRPS * 2, // Allow burst of 2x normal rate
|
||||
},
|
||||
}
|
||||
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// CreateProviderConfigFile creates a temporary YAML config file for the transport system
|
||||
func (c *Config) CreateProviderConfigFile(tempPath string) error {
|
||||
// Convert config to provider format
|
||||
providerConfig := c.ConvertToProviderConfig()
|
||||
|
||||
// Marshal to YAML
|
||||
yamlData, err := yaml.Marshal(providerConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal provider config: %w", err)
|
||||
}
|
||||
|
||||
// Write to file
|
||||
if err := os.WriteFile(tempPath, yamlData, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write provider config file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertToProviderConfig converts ArbitrumConfig to transport.ProvidersConfig
|
||||
func (c *Config) createProviderConfig(endpoint EndpointConfig, features []string) map[string]interface{} {
|
||||
provider := map[string]interface{}{
|
||||
"name": endpoint.Name,
|
||||
"type": "standard",
|
||||
"http_endpoint": "",
|
||||
"ws_endpoint": "",
|
||||
"priority": endpoint.Priority,
|
||||
"rate_limit": map[string]interface{}{
|
||||
"requests_per_second": endpoint.RateLimit.RequestsPerSecond,
|
||||
"burst": endpoint.RateLimit.Burst,
|
||||
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
|
||||
"retry_delay": "1s",
|
||||
"max_retries": 3,
|
||||
},
|
||||
"features": features,
|
||||
"health_check": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"interval": fmt.Sprintf("%ds", endpoint.HealthCheckInterval),
|
||||
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
|
||||
},
|
||||
}
|
||||
|
||||
// Determine endpoint type and assign to appropriate field
|
||||
if strings.HasPrefix(endpoint.URL, "ws") {
|
||||
provider["ws_endpoint"] = endpoint.URL
|
||||
} else {
|
||||
provider["http_endpoint"] = endpoint.URL
|
||||
}
|
||||
|
||||
return provider
|
||||
}
|
||||
|
||||
func (c *Config) ConvertToProviderConfig() map[string]interface{} {
|
||||
providerConfigs := make([]map[string]interface{}, 0)
|
||||
|
||||
// Handle legacy configuration if new endpoints are not configured
|
||||
if len(c.Arbitrum.ReadingEndpoints) == 0 && len(c.Arbitrum.ExecutionEndpoints) == 0 {
|
||||
// Use legacy RPC and WS endpoints
|
||||
if c.Arbitrum.RPCEndpoint != "" {
|
||||
// Set default rate limits if zero
|
||||
rps := c.Arbitrum.RateLimit.RequestsPerSecond
|
||||
if rps <= 0 {
|
||||
if strings.HasPrefix(c.Arbitrum.RPCEndpoint, "ws") {
|
||||
rps = 300 // Default for WebSocket
|
||||
} else {
|
||||
rps = 200 // Default for HTTP
|
||||
}
|
||||
}
|
||||
burst := c.Arbitrum.RateLimit.Burst
|
||||
if burst <= 0 {
|
||||
burst = rps * 2 // Default burst is 2x RPS
|
||||
}
|
||||
|
||||
provider := map[string]interface{}{
|
||||
"name": "Legacy-RPC",
|
||||
"type": "standard",
|
||||
"priority": 1,
|
||||
"rate_limit": map[string]interface{}{
|
||||
"requests_per_second": rps,
|
||||
"burst": burst,
|
||||
"timeout": "30s",
|
||||
"retry_delay": "1s",
|
||||
"max_retries": 3,
|
||||
},
|
||||
"features": []string{"execution", "reading"},
|
||||
"health_check": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"interval": "60s",
|
||||
"timeout": "30s",
|
||||
},
|
||||
}
|
||||
|
||||
// Determine endpoint type and assign to appropriate field
|
||||
if strings.HasPrefix(c.Arbitrum.RPCEndpoint, "ws") {
|
||||
provider["http_endpoint"] = ""
|
||||
provider["ws_endpoint"] = c.Arbitrum.RPCEndpoint
|
||||
} else {
|
||||
provider["http_endpoint"] = c.Arbitrum.RPCEndpoint
|
||||
provider["ws_endpoint"] = ""
|
||||
}
|
||||
|
||||
providerConfigs = append(providerConfigs, provider)
|
||||
}
|
||||
|
||||
if c.Arbitrum.WSEndpoint != "" {
|
||||
// Set default rate limits if zero
|
||||
rps := c.Arbitrum.RateLimit.RequestsPerSecond
|
||||
if rps <= 0 {
|
||||
rps = 300 // Default for WebSocket
|
||||
}
|
||||
burst := c.Arbitrum.RateLimit.Burst
|
||||
if burst <= 0 {
|
||||
burst = rps * 2 // Default burst is 2x RPS
|
||||
}
|
||||
|
||||
provider := map[string]interface{}{
|
||||
"name": "Legacy-WSS",
|
||||
"type": "standard",
|
||||
"http_endpoint": "",
|
||||
"ws_endpoint": c.Arbitrum.WSEndpoint,
|
||||
"priority": 1,
|
||||
"rate_limit": map[string]interface{}{
|
||||
"requests_per_second": rps,
|
||||
"burst": burst,
|
||||
"timeout": "60s",
|
||||
"retry_delay": "1s",
|
||||
"max_retries": 3,
|
||||
},
|
||||
"features": []string{"reading", "real_time"},
|
||||
"health_check": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"interval": "30s",
|
||||
"timeout": "60s",
|
||||
},
|
||||
}
|
||||
providerConfigs = append(providerConfigs, provider)
|
||||
}
|
||||
|
||||
// Create simple pool configuration for legacy mode
|
||||
providerPools := make(map[string]interface{})
|
||||
if len(providerConfigs) > 0 {
|
||||
providerNames := make([]string, 0)
|
||||
for _, provider := range providerConfigs {
|
||||
providerNames = append(providerNames, provider["name"].(string))
|
||||
}
|
||||
|
||||
// Use same providers for both reading and execution in legacy mode
|
||||
providerPools["read_only"] = map[string]interface{}{
|
||||
"strategy": "priority_based",
|
||||
"max_concurrent_connections": 25,
|
||||
"health_check_interval": "30s",
|
||||
"failover_enabled": true,
|
||||
"providers": providerNames,
|
||||
}
|
||||
providerPools["execution"] = map[string]interface{}{
|
||||
"strategy": "priority_based",
|
||||
"max_concurrent_connections": 20,
|
||||
"health_check_interval": "30s",
|
||||
"failover_enabled": true,
|
||||
"providers": providerNames,
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"provider_pools": providerPools,
|
||||
"providers": providerConfigs,
|
||||
"rotation": map[string]interface{}{
|
||||
"strategy": "priority_based",
|
||||
"health_check_required": true,
|
||||
"fallover_enabled": true,
|
||||
"retry_failed_after": "5m",
|
||||
},
|
||||
"global_limits": map[string]interface{}{
|
||||
"max_concurrent_connections": 50,
|
||||
"connection_timeout": "30s",
|
||||
"read_timeout": "60s",
|
||||
"write_timeout": "30s",
|
||||
"idle_timeout": "300s",
|
||||
},
|
||||
"monitoring": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"metrics_interval": "60s",
|
||||
"log_slow_requests": true,
|
||||
"slow_request_threshold": "5s",
|
||||
"track_provider_performance": true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Convert reading endpoints
|
||||
for _, endpoint := range c.Arbitrum.ReadingEndpoints {
|
||||
providerConfigs = append(providerConfigs, c.createProviderConfig(endpoint, []string{"reading", "real_time"}))
|
||||
}
|
||||
|
||||
// Convert execution endpoints
|
||||
for _, endpoint := range c.Arbitrum.ExecutionEndpoints {
|
||||
providerConfigs = append(providerConfigs, c.createProviderConfig(endpoint, []string{"execution", "transaction_submission"}))
|
||||
}
|
||||
|
||||
// Build provider pool configurations
|
||||
providerPools := make(map[string]interface{})
|
||||
|
||||
// Reading pool configuration
|
||||
if len(c.Arbitrum.ReadingEndpoints) > 0 {
|
||||
readingProviders := make([]string, 0)
|
||||
for _, endpoint := range c.Arbitrum.ReadingEndpoints {
|
||||
readingProviders = append(readingProviders, endpoint.Name)
|
||||
}
|
||||
providerPools["read_only"] = map[string]interface{}{
|
||||
"strategy": "websocket_preferred",
|
||||
"max_concurrent_connections": 25,
|
||||
"health_check_interval": "30s",
|
||||
"failover_enabled": true,
|
||||
"providers": readingProviders,
|
||||
}
|
||||
}
|
||||
|
||||
// Execution pool configuration
|
||||
if len(c.Arbitrum.ExecutionEndpoints) > 0 {
|
||||
executionProviders := make([]string, 0)
|
||||
for _, endpoint := range c.Arbitrum.ExecutionEndpoints {
|
||||
executionProviders = append(executionProviders, endpoint.Name)
|
||||
}
|
||||
providerPools["execution"] = map[string]interface{}{
|
||||
"strategy": "reliability_first",
|
||||
"max_concurrent_connections": 20,
|
||||
"health_check_interval": "30s",
|
||||
"failover_enabled": true,
|
||||
"providers": executionProviders,
|
||||
}
|
||||
}
|
||||
|
||||
// Complete configuration
|
||||
return map[string]interface{}{
|
||||
"provider_pools": providerPools,
|
||||
"providers": providerConfigs,
|
||||
"rotation": map[string]interface{}{
|
||||
"strategy": "priority_based",
|
||||
"health_check_required": true,
|
||||
"fallover_enabled": true,
|
||||
"retry_failed_after": "5m",
|
||||
},
|
||||
"global_limits": map[string]interface{}{
|
||||
"max_concurrent_connections": 50,
|
||||
"connection_timeout": "30s",
|
||||
"read_timeout": "60s",
|
||||
"write_timeout": "30s",
|
||||
"idle_timeout": "300s",
|
||||
},
|
||||
"monitoring": map[string]interface{}{
|
||||
"enabled": true,
|
||||
"metrics_interval": "60s",
|
||||
"log_slow_requests": true,
|
||||
"slow_request_threshold": "5s",
|
||||
"track_provider_performance": true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateEnvironmentVariables validates all required environment variables
|
||||
func (c *Config) ValidateEnvironmentVariables() error {
|
||||
// Validate RPC endpoint
|
||||
if c.Arbitrum.RPCEndpoint == "" {
|
||||
return fmt.Errorf("ARBITRUM_RPC_ENDPOINT environment variable is required")
|
||||
}
|
||||
|
||||
if err := validateRPCEndpoint(c.Arbitrum.RPCEndpoint); err != nil {
|
||||
return fmt.Errorf("invalid ARBITRUM_RPC_ENDPOINT: %w", err)
|
||||
}
|
||||
|
||||
// Validate WebSocket endpoint if provided
|
||||
if c.Arbitrum.WSEndpoint != "" {
|
||||
if err := validateRPCEndpoint(c.Arbitrum.WSEndpoint); err != nil {
|
||||
return fmt.Errorf("invalid ARBITRUM_WS_ENDPOINT: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate Ethereum private key
|
||||
if c.Ethereum.PrivateKey == "" {
|
||||
return fmt.Errorf("ETHEREUM_PRIVATE_KEY environment variable is required")
|
||||
}
|
||||
|
||||
// Validate account address
|
||||
if c.Ethereum.AccountAddress == "" {
|
||||
return fmt.Errorf("ETHEREUM_ACCOUNT_ADDRESS environment variable is required")
|
||||
}
|
||||
|
||||
// Validate contract addresses
|
||||
if c.Contracts.ArbitrageExecutor == "" {
|
||||
return fmt.Errorf("CONTRACT_ARBITRAGE_EXECUTOR environment variable is required")
|
||||
}
|
||||
|
||||
if c.Contracts.FlashSwapper == "" {
|
||||
return fmt.Errorf("CONTRACT_FLASH_SWAPPER environment variable is required")
|
||||
}
|
||||
|
||||
// Validate numeric values
|
||||
if c.Arbitrum.RateLimit.RequestsPerSecond < 0 {
|
||||
return fmt.Errorf("RPC_REQUESTS_PER_SECOND must be non-negative")
|
||||
}
|
||||
|
||||
if c.Arbitrum.RateLimit.MaxConcurrent < 0 {
|
||||
return fmt.Errorf("RPC_MAX_CONCURRENT must be non-negative")
|
||||
}
|
||||
|
||||
if c.Bot.MaxWorkers <= 0 {
|
||||
return fmt.Errorf("BOT_MAX_WORKERS must be positive")
|
||||
}
|
||||
|
||||
if c.Bot.ChannelBufferSize < 0 {
|
||||
return fmt.Errorf("BOT_CHANNEL_BUFFER_SIZE must be non-negative")
|
||||
}
|
||||
|
||||
if c.Ethereum.GasPriceMultiplier < 0 {
|
||||
return fmt.Errorf("ETHEREUM_GAS_PRICE_MULTIPLIER must be non-negative")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateRPCEndpoint validates RPC endpoint URL for security and format
|
||||
func validateRPCEndpoint(endpoint string) error {
|
||||
if endpoint == "" {
|
||||
return fmt.Errorf("RPC endpoint cannot be empty")
|
||||
}
|
||||
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid RPC endpoint URL: %w", err)
|
||||
}
|
||||
|
||||
// Check for valid schemes
|
||||
switch u.Scheme {
|
||||
case "http", "https", "ws", "wss":
|
||||
// Valid schemes
|
||||
default:
|
||||
return fmt.Errorf("invalid RPC scheme: %s (must be http, https, ws, or wss)", u.Scheme)
|
||||
}
|
||||
|
||||
// Check for localhost/private networks in production
|
||||
if strings.Contains(u.Hostname(), "localhost") || strings.Contains(u.Hostname(), "127.0.0.1") {
|
||||
// Allow localhost only if explicitly enabled
|
||||
if os.Getenv("MEV_BOT_ALLOW_LOCALHOST") != "true" {
|
||||
return fmt.Errorf("localhost RPC endpoints not allowed in production (set MEV_BOT_ALLOW_LOCALHOST=true to override)")
|
||||
}
|
||||
}
|
||||
|
||||
// Validate hostname is not empty
|
||||
if u.Hostname() == "" {
|
||||
return fmt.Errorf("RPC endpoint must have a valid hostname")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArbitrageConfig represents the arbitrage service configuration
|
||||
type ArbitrageConfig struct {
|
||||
// Enable or disable arbitrage service
|
||||
Enabled bool `yaml:"enabled"`
|
||||
|
||||
// Contract addresses
|
||||
ArbitrageContractAddress string `yaml:"arbitrage_contract_address"`
|
||||
FlashSwapContractAddress string `yaml:"flash_swap_contract_address"`
|
||||
|
||||
// Profitability settings
|
||||
MinProfitWei int64 `yaml:"min_profit_wei"`
|
||||
MinROIPercent float64 `yaml:"min_roi_percent"`
|
||||
MinSignificantSwapSize int64 `yaml:"min_significant_swap_size"`
|
||||
SlippageTolerance float64 `yaml:"slippage_tolerance"`
|
||||
|
||||
// Scanning configuration
|
||||
MinScanAmountWei int64 `yaml:"min_scan_amount_wei"`
|
||||
MaxScanAmountWei int64 `yaml:"max_scan_amount_wei"`
|
||||
|
||||
// Gas configuration
|
||||
MaxGasPriceWei int64 `yaml:"max_gas_price_wei"`
|
||||
|
||||
// Execution limits
|
||||
MaxConcurrentExecutions int `yaml:"max_concurrent_executions"`
|
||||
MaxOpportunitiesPerEvent int `yaml:"max_opportunities_per_event"`
|
||||
|
||||
// Timing settings
|
||||
OpportunityTTL time.Duration `yaml:"opportunity_ttl"`
|
||||
MaxPathAge time.Duration `yaml:"max_path_age"`
|
||||
StatsUpdateInterval time.Duration `yaml:"stats_update_interval"`
|
||||
|
||||
// Pool discovery configuration
|
||||
PoolDiscoveryConfig PoolDiscoveryConfig `yaml:"pool_discovery"`
|
||||
}
|
||||
|
||||
// PoolDiscoveryConfig represents pool discovery service configuration
|
||||
type PoolDiscoveryConfig struct {
|
||||
// Enable or disable pool discovery
|
||||
Enabled bool `yaml:"enabled"`
|
||||
|
||||
// Block range to scan for new pools
|
||||
BlockRange uint64 `yaml:"block_range"`
|
||||
|
||||
// Polling interval for new pools
|
||||
PollingInterval time.Duration `yaml:"polling_interval"`
|
||||
|
||||
// DEX factory addresses to monitor
|
||||
FactoryAddresses []string `yaml:"factory_addresses"`
|
||||
|
||||
// Minimum liquidity threshold for pools
|
||||
MinLiquidityWei int64 `yaml:"min_liquidity_wei"`
|
||||
|
||||
// Cache configuration
|
||||
CacheSize int `yaml:"cache_size"`
|
||||
CacheTTL time.Duration `yaml:"cache_ttl"`
|
||||
}
|
||||
|
||||
// Features represents Layer 2 optimization feature flags
|
||||
type Features struct {
|
||||
// Phase 1: Configuration tuning
|
||||
UseArbitrumOptimizedTimeouts bool `yaml:"use_arbitrum_optimized_timeouts"`
|
||||
UseDynamicTTL bool `yaml:"use_dynamic_ttl"`
|
||||
|
||||
// Phase 2: Transaction filtering
|
||||
EnableDEXPrefilter bool `yaml:"enable_dex_prefilter"`
|
||||
|
||||
// Phase 3: Sequencer optimization
|
||||
UseDirectSequencerFeed bool `yaml:"use_direct_sequencer_feed"`
|
||||
|
||||
// Phase 4-5: Timeboost
|
||||
EnableTimeboost bool `yaml:"enable_timeboost"`
|
||||
}
|
||||
|
||||
// ArbitrageOptimizedConfig represents Arbitrum-optimized arbitrage timing
|
||||
type ArbitrageOptimizedConfig struct {
|
||||
// Opportunity lifecycle (tuned for 250ms blocks)
|
||||
OpportunityTTL time.Duration `yaml:"opportunity_ttl"`
|
||||
MaxPathAge time.Duration `yaml:"max_path_age"`
|
||||
ExecutionDeadline time.Duration `yaml:"execution_deadline"`
|
||||
|
||||
// Legacy values for rollback
|
||||
LegacyOpportunityTTL time.Duration `yaml:"legacy_opportunity_ttl"`
|
||||
LegacyMaxPathAge time.Duration `yaml:"legacy_max_path_age"`
|
||||
|
||||
// Dynamic TTL settings
|
||||
DynamicTTL DynamicTTLConfig `yaml:"dynamic_ttl"`
|
||||
}
|
||||
|
||||
// DynamicTTLConfig represents dynamic TTL calculation settings
|
||||
type DynamicTTLConfig struct {
|
||||
MinTTLBlocks int `yaml:"min_ttl_blocks"`
|
||||
MaxTTLBlocks int `yaml:"max_ttl_blocks"`
|
||||
ProfitMultiplier bool `yaml:"profit_multiplier"`
|
||||
VolatilityAdjustment bool `yaml:"volatility_adjustment"`
|
||||
}
|
||||
|
||||
// GetOpportunityTTL returns the active opportunity TTL based on feature flags
|
||||
func (c *Config) GetOpportunityTTL() time.Duration {
|
||||
if c.Features.UseArbitrumOptimizedTimeouts {
|
||||
return c.ArbitrageOptimized.OpportunityTTL
|
||||
}
|
||||
// Fallback to legacy config
|
||||
if c.Arbitrage.OpportunityTTL > 0 {
|
||||
return c.Arbitrage.OpportunityTTL
|
||||
}
|
||||
// Default fallback
|
||||
return 30 * time.Second
|
||||
}
|
||||
|
||||
// GetMaxPathAge returns the active max path age based on feature flags
|
||||
func (c *Config) GetMaxPathAge() time.Duration {
|
||||
if c.Features.UseArbitrumOptimizedTimeouts {
|
||||
return c.ArbitrageOptimized.MaxPathAge
|
||||
}
|
||||
// Fallback to legacy config
|
||||
if c.Arbitrage.MaxPathAge > 0 {
|
||||
return c.Arbitrage.MaxPathAge
|
||||
}
|
||||
// Default fallback
|
||||
return 60 * time.Second
|
||||
}
|
||||
|
||||
// GetExecutionDeadline returns the execution deadline
|
||||
func (c *Config) GetExecutionDeadline() time.Duration {
|
||||
if c.Features.UseArbitrumOptimizedTimeouts && c.ArbitrageOptimized.ExecutionDeadline > 0 {
|
||||
return c.ArbitrageOptimized.ExecutionDeadline
|
||||
}
|
||||
// Default fallback for Arbitrum (12 blocks @ 250ms)
|
||||
return 3 * time.Second
|
||||
}
|
||||
139
orig/internal/config/config_test.go
Normal file
139
orig/internal/config/config_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
// Create a temporary config file for testing
|
||||
tmpFile, err := os.CreateTemp("", "config_test_*.yaml")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
// Write test config content
|
||||
configContent := `
|
||||
arbitrum:
|
||||
rpc_endpoint: "${ARBITRUM_RPC_ENDPOINT}"
|
||||
ws_endpoint: "${ARBITRUM_WS_ENDPOINT}"
|
||||
chain_id: 42161
|
||||
rate_limit:
|
||||
requests_per_second: 5
|
||||
max_concurrent: 3
|
||||
burst: 10
|
||||
|
||||
bot:
|
||||
enabled: true
|
||||
polling_interval: 3
|
||||
min_profit_threshold: 10.0
|
||||
gas_price_multiplier: 1.2
|
||||
max_workers: 3
|
||||
channel_buffer_size: 50
|
||||
rpc_timeout: 30
|
||||
|
||||
uniswap:
|
||||
factory_address: "0x1F98431c8aD98523631AE4a59f267346ea31F984"
|
||||
position_manager_address: "0xC36442b4a4522E871399CD717aBDD847Ab11FE88"
|
||||
fee_tiers:
|
||||
- 500
|
||||
- 3000
|
||||
- 10000
|
||||
cache:
|
||||
enabled: true
|
||||
expiration: 300
|
||||
max_size: 10000
|
||||
|
||||
log:
|
||||
level: "debug"
|
||||
format: "text"
|
||||
file: "logs/mev-bot.log"
|
||||
|
||||
database:
|
||||
file: "mev-bot.db"
|
||||
max_open_connections: 10
|
||||
max_idle_connections: 5
|
||||
`
|
||||
_, err = tmpFile.Write([]byte(configContent))
|
||||
require.NoError(t, err)
|
||||
err = tmpFile.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set environment variables for test
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", "wss://arbitrum-mainnet.core.chainstack.com/53c30e7a941160679fdcc396c894fc57")
|
||||
os.Setenv("ARBITRUM_WS_ENDPOINT", "wss://arbitrum-mainnet.core.chainstack.com/53c30e7a941160679fdcc396c894fc57")
|
||||
defer func() {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
os.Unsetenv("ARBITRUM_WS_ENDPOINT")
|
||||
}()
|
||||
|
||||
// Test loading the config
|
||||
cfg, err := Load(tmpFile.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the loaded config
|
||||
assert.Equal(t, "wss://arbitrum-mainnet.core.chainstack.com/53c30e7a941160679fdcc396c894fc57", cfg.Arbitrum.RPCEndpoint)
|
||||
assert.Equal(t, "wss://arbitrum-mainnet.core.chainstack.com/53c30e7a941160679fdcc396c894fc57", cfg.Arbitrum.WSEndpoint)
|
||||
assert.Equal(t, int64(42161), cfg.Arbitrum.ChainID)
|
||||
assert.Equal(t, 5, cfg.Arbitrum.RateLimit.RequestsPerSecond)
|
||||
assert.True(t, cfg.Bot.Enabled)
|
||||
assert.Equal(t, 3, cfg.Bot.PollingInterval)
|
||||
assert.Equal(t, 10.0, cfg.Bot.MinProfitThreshold)
|
||||
assert.Equal(t, "0x1F98431c8aD98523631AE4a59f267346ea31F984", cfg.Uniswap.FactoryAddress)
|
||||
assert.Len(t, cfg.Uniswap.FeeTiers, 3)
|
||||
assert.Equal(t, true, cfg.Uniswap.Cache.Enabled)
|
||||
assert.Equal(t, "debug", cfg.Log.Level)
|
||||
assert.Equal(t, "logs/mev-bot.log", cfg.Log.File)
|
||||
assert.Equal(t, "mev-bot.db", cfg.Database.File)
|
||||
}
|
||||
|
||||
func TestLoadWithInvalidFile(t *testing.T) {
|
||||
// Test loading a non-existent config file
|
||||
_, err := Load("/non/existent/file.yaml")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestOverrideWithEnv(t *testing.T) {
|
||||
// Create a temporary config file for testing
|
||||
tmpFile, err := os.CreateTemp("", "config_test_*.yaml")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(tmpFile.Name())
|
||||
|
||||
// Write test config content
|
||||
configContent := `
|
||||
arbitrum:
|
||||
rpc_endpoint: "https://arb1.arbitrum.io/rpc"
|
||||
rate_limit:
|
||||
requests_per_second: 10
|
||||
max_concurrent: 5
|
||||
|
||||
bot:
|
||||
max_workers: 10
|
||||
channel_buffer_size: 100
|
||||
`
|
||||
_, err = tmpFile.Write([]byte(configContent))
|
||||
require.NoError(t, err)
|
||||
err = tmpFile.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set environment variables to override config
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", "https://override.arbitrum.io/rpc")
|
||||
os.Setenv("RPC_REQUESTS_PER_SECOND", "20")
|
||||
os.Setenv("BOT_MAX_WORKERS", "20")
|
||||
defer func() {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
os.Unsetenv("RPC_REQUESTS_PER_SECOND")
|
||||
os.Unsetenv("BOT_MAX_WORKERS")
|
||||
}()
|
||||
|
||||
// Load the config
|
||||
cfg, err := Load(tmpFile.Name())
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the overridden values
|
||||
assert.Equal(t, "https://override.arbitrum.io/rpc", cfg.Arbitrum.RPCEndpoint)
|
||||
assert.Equal(t, 20, cfg.Arbitrum.RateLimit.RequestsPerSecond)
|
||||
assert.Equal(t, 20, cfg.Bot.MaxWorkers)
|
||||
}
|
||||
Reference in New Issue
Block a user