saving in place

This commit is contained in:
Krypto Kajun
2025-10-04 09:31:02 -05:00
parent 76c1b5cee1
commit f358f49aa9
295 changed files with 72071 additions and 17209 deletions

View File

@@ -26,22 +26,42 @@ type Config struct {
// ArbitrumConfig represents the Arbitrum node configuration
type ArbitrumConfig struct {
// Primary RPC endpoint
RPCEndpoint string `yaml:"rpc_endpoint"`
// WebSocket endpoint for Arbitrum node (optional)
WSEndpoint string `yaml:"ws_endpoint"`
// Chain ID for Arbitrum (42161 for mainnet)
ChainID int64 `yaml:"chain_id"`
// Rate limiting configuration for RPC endpoint
RateLimit RateLimitConfig `yaml:"rate_limit"`
// Fallback RPC endpoints
// Reading endpoints (WSS preferred for real-time monitoring)
ReadingEndpoints []EndpointConfig `yaml:"reading_endpoints"`
// Execution endpoints (HTTP/HTTPS or WSS for transaction submission)
ExecutionEndpoints []EndpointConfig `yaml:"execution_endpoints"`
// Fallback endpoints for failover scenarios
FallbackEndpoints []EndpointConfig `yaml:"fallback_endpoints"`
// Legacy fields for backward compatibility
RPCEndpoint string `yaml:"rpc_endpoint,omitempty"`
WSEndpoint string `yaml:"ws_endpoint,omitempty"`
// Global rate limiting configuration
RateLimit RateLimitConfig `yaml:"rate_limit"`
}
// EndpointConfig represents a fallback RPC endpoint configuration
// EndpointConfig represents an RPC endpoint configuration
type EndpointConfig struct {
// RPC endpoint URL
URL string `yaml:"url"`
// Endpoint name for identification
Name string `yaml:"name"`
// Priority (lower number = higher priority)
Priority int `yaml:"priority"`
// Maximum requests per second for this endpoint
MaxRPS int `yaml:"max_rps"`
// Maximum concurrent connections
MaxConcurrent int `yaml:"max_concurrent"`
// Connection timeout in seconds
TimeoutSeconds int `yaml:"timeout_seconds"`
// Health check interval in seconds
HealthCheckInterval int `yaml:"health_check_interval"`
// Rate limiting configuration for this endpoint
RateLimit RateLimitConfig `yaml:"rate_limit"`
}
@@ -190,32 +210,74 @@ func expandEnvVars(s string) string {
// OverrideWithEnv overrides configuration with environment variables
func (c *Config) OverrideWithEnv() {
// Override RPC endpoint
// Override legacy RPC endpoint (backward compatibility)
if rpcEndpoint := os.Getenv("ARBITRUM_RPC_ENDPOINT"); rpcEndpoint != "" {
c.Arbitrum.RPCEndpoint = rpcEndpoint
// Also add to execution endpoints if not already configured
if len(c.Arbitrum.ExecutionEndpoints) == 0 {
// Determine RPS based on endpoint type
rps := 200
if strings.HasPrefix(rpcEndpoint, "ws") {
rps = 300
}
c.Arbitrum.ExecutionEndpoints = append(c.Arbitrum.ExecutionEndpoints, EndpointConfig{
URL: rpcEndpoint,
Name: "Primary RPC",
Priority: 1,
MaxRPS: rps,
MaxConcurrent: 20,
TimeoutSeconds: 30,
HealthCheckInterval: 60,
RateLimit: RateLimitConfig{
RequestsPerSecond: rps,
MaxConcurrent: 20,
Burst: rps * 2,
},
})
}
}
// Override WebSocket endpoint
// Override legacy WebSocket endpoint (backward compatibility)
if wsEndpoint := os.Getenv("ARBITRUM_WS_ENDPOINT"); wsEndpoint != "" {
c.Arbitrum.WSEndpoint = wsEndpoint
// Also add to reading endpoints if not already configured
if len(c.Arbitrum.ReadingEndpoints) == 0 {
c.Arbitrum.ReadingEndpoints = append(c.Arbitrum.ReadingEndpoints, EndpointConfig{
URL: wsEndpoint,
Name: "Primary WSS",
Priority: 1,
MaxRPS: 300,
MaxConcurrent: 25,
TimeoutSeconds: 60,
HealthCheckInterval: 30,
RateLimit: RateLimitConfig{
RequestsPerSecond: 300,
MaxConcurrent: 25,
Burst: 600,
},
})
}
}
// Override fallback endpoints from environment
// Override reading endpoints from environment
if readingEndpoints := os.Getenv("ARBITRUM_READING_ENDPOINTS"); readingEndpoints != "" {
c.Arbitrum.ReadingEndpoints = c.parseEndpointsFromEnv(readingEndpoints, "Reading")
}
// Override execution endpoints from environment
if executionEndpoints := os.Getenv("ARBITRUM_EXECUTION_ENDPOINTS"); executionEndpoints != "" {
c.Arbitrum.ExecutionEndpoints = c.parseEndpointsFromEnv(executionEndpoints, "Execution")
}
// Override fallback endpoints from environment (legacy support)
if fallbackEndpoints := os.Getenv("ARBITRUM_FALLBACK_ENDPOINTS"); fallbackEndpoints != "" {
endpoints := strings.Split(fallbackEndpoints, ",")
c.Arbitrum.FallbackEndpoints = make([]EndpointConfig, 0, len(endpoints))
for _, endpoint := range endpoints {
endpoint = strings.TrimSpace(endpoint)
if endpoint != "" {
c.Arbitrum.FallbackEndpoints = append(c.Arbitrum.FallbackEndpoints, EndpointConfig{
URL: endpoint,
RateLimit: RateLimitConfig{
RequestsPerSecond: 100,
MaxConcurrent: 10,
Burst: 20,
},
})
}
// Add to both reading and execution if they're empty
fallbackConfigs := c.parseEndpointsFromEnv(fallbackEndpoints, "Fallback")
if len(c.Arbitrum.ReadingEndpoints) == 0 {
c.Arbitrum.ReadingEndpoints = append(c.Arbitrum.ReadingEndpoints, fallbackConfigs...)
}
if len(c.Arbitrum.ExecutionEndpoints) == 0 {
c.Arbitrum.ExecutionEndpoints = append(c.Arbitrum.ExecutionEndpoints, fallbackConfigs...)
}
}
@@ -270,6 +332,341 @@ func (c *Config) OverrideWithEnv() {
}
}
// parseEndpointsFromEnv parses comma-separated endpoint URLs from environment variable
func (c *Config) parseEndpointsFromEnv(endpointsStr, namePrefix string) []EndpointConfig {
if endpointsStr == "" {
return nil
}
urls := strings.Split(endpointsStr, ",")
endpoints := make([]EndpointConfig, 0, len(urls))
for i, url := range urls {
url = strings.TrimSpace(url)
if url == "" {
continue
}
// Determine defaults based on URL scheme
var maxRPS, maxConcurrent, timeoutSeconds, healthCheckInterval int
if strings.HasPrefix(url, "ws") {
// WebSocket endpoints - higher rate limits for real-time data
maxRPS = 300
maxConcurrent = 25
timeoutSeconds = 60
healthCheckInterval = 30
} else {
// HTTP endpoints - conservative rate limits
maxRPS = 200
maxConcurrent = 20
timeoutSeconds = 30
healthCheckInterval = 60
}
endpoint := EndpointConfig{
URL: url,
Name: fmt.Sprintf("%s-%d", namePrefix, i+1),
Priority: i + 1, // Lower number = higher priority
MaxRPS: maxRPS,
MaxConcurrent: maxConcurrent,
TimeoutSeconds: timeoutSeconds,
HealthCheckInterval: healthCheckInterval,
RateLimit: RateLimitConfig{
RequestsPerSecond: maxRPS,
MaxConcurrent: maxConcurrent,
Burst: maxRPS * 2, // Allow burst of 2x normal rate
},
}
endpoints = append(endpoints, endpoint)
}
return endpoints
}
// CreateProviderConfigFile creates a temporary YAML config file for the transport system
func (c *Config) CreateProviderConfigFile(tempPath string) error {
// Convert config to provider format
providerConfig := c.ConvertToProviderConfig()
// Marshal to YAML
yamlData, err := yaml.Marshal(providerConfig)
if err != nil {
return fmt.Errorf("failed to marshal provider config: %w", err)
}
// Write to file
if err := os.WriteFile(tempPath, yamlData, 0644); err != nil {
return fmt.Errorf("failed to write provider config file: %w", err)
}
return nil
}
// ConvertToProviderConfig converts ArbitrumConfig to transport.ProvidersConfig
func (c *Config) ConvertToProviderConfig() map[string]interface{} {
providerConfigs := make([]map[string]interface{}, 0)
// Handle legacy configuration if new endpoints are not configured
if len(c.Arbitrum.ReadingEndpoints) == 0 && len(c.Arbitrum.ExecutionEndpoints) == 0 {
// Use legacy RPC and WS endpoints
if c.Arbitrum.RPCEndpoint != "" {
// Set default rate limits if zero
rps := c.Arbitrum.RateLimit.RequestsPerSecond
if rps <= 0 {
if strings.HasPrefix(c.Arbitrum.RPCEndpoint, "ws") {
rps = 300 // Default for WebSocket
} else {
rps = 200 // Default for HTTP
}
}
burst := c.Arbitrum.RateLimit.Burst
if burst <= 0 {
burst = rps * 2 // Default burst is 2x RPS
}
provider := map[string]interface{}{
"name": "Legacy-RPC",
"type": "standard",
"priority": 1,
"rate_limit": map[string]interface{}{
"requests_per_second": rps,
"burst": burst,
"timeout": "30s",
"retry_delay": "1s",
"max_retries": 3,
},
"features": []string{"execution", "reading"},
"health_check": map[string]interface{}{
"enabled": true,
"interval": "60s",
"timeout": "30s",
},
}
// Determine endpoint type and assign to appropriate field
if strings.HasPrefix(c.Arbitrum.RPCEndpoint, "ws") {
provider["http_endpoint"] = ""
provider["ws_endpoint"] = c.Arbitrum.RPCEndpoint
} else {
provider["http_endpoint"] = c.Arbitrum.RPCEndpoint
provider["ws_endpoint"] = ""
}
providerConfigs = append(providerConfigs, provider)
}
if c.Arbitrum.WSEndpoint != "" {
// Set default rate limits if zero
rps := c.Arbitrum.RateLimit.RequestsPerSecond
if rps <= 0 {
rps = 300 // Default for WebSocket
}
burst := c.Arbitrum.RateLimit.Burst
if burst <= 0 {
burst = rps * 2 // Default burst is 2x RPS
}
provider := map[string]interface{}{
"name": "Legacy-WSS",
"type": "standard",
"http_endpoint": "",
"ws_endpoint": c.Arbitrum.WSEndpoint,
"priority": 1,
"rate_limit": map[string]interface{}{
"requests_per_second": rps,
"burst": burst,
"timeout": "60s",
"retry_delay": "1s",
"max_retries": 3,
},
"features": []string{"reading", "real_time"},
"health_check": map[string]interface{}{
"enabled": true,
"interval": "30s",
"timeout": "60s",
},
}
providerConfigs = append(providerConfigs, provider)
}
// Create simple pool configuration for legacy mode
providerPools := make(map[string]interface{})
if len(providerConfigs) > 0 {
providerNames := make([]string, 0)
for _, provider := range providerConfigs {
providerNames = append(providerNames, provider["name"].(string))
}
// Use same providers for both reading and execution in legacy mode
providerPools["read_only"] = map[string]interface{}{
"strategy": "priority_based",
"max_concurrent_connections": 25,
"health_check_interval": "30s",
"failover_enabled": true,
"providers": providerNames,
}
providerPools["execution"] = map[string]interface{}{
"strategy": "priority_based",
"max_concurrent_connections": 20,
"health_check_interval": "30s",
"failover_enabled": true,
"providers": providerNames,
}
}
return map[string]interface{}{
"provider_pools": providerPools,
"providers": providerConfigs,
"rotation": map[string]interface{}{
"strategy": "priority_based",
"health_check_required": true,
"fallover_enabled": true,
"retry_failed_after": "5m",
},
"global_limits": map[string]interface{}{
"max_concurrent_connections": 50,
"connection_timeout": "30s",
"read_timeout": "60s",
"write_timeout": "30s",
"idle_timeout": "300s",
},
"monitoring": map[string]interface{}{
"enabled": true,
"metrics_interval": "60s",
"log_slow_requests": true,
"slow_request_threshold": "5s",
"track_provider_performance": true,
},
}
}
// Convert reading endpoints
for _, endpoint := range c.Arbitrum.ReadingEndpoints {
provider := map[string]interface{}{
"name": endpoint.Name,
"type": "standard",
"http_endpoint": "",
"ws_endpoint": "",
"priority": endpoint.Priority,
"rate_limit": map[string]interface{}{
"requests_per_second": endpoint.RateLimit.RequestsPerSecond,
"burst": endpoint.RateLimit.Burst,
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
"retry_delay": "1s",
"max_retries": 3,
},
"features": []string{"reading", "real_time"},
"health_check": map[string]interface{}{
"enabled": true,
"interval": fmt.Sprintf("%ds", endpoint.HealthCheckInterval),
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
},
}
// Determine endpoint type and assign to appropriate field
if strings.HasPrefix(endpoint.URL, "ws") {
provider["ws_endpoint"] = endpoint.URL
} else {
provider["http_endpoint"] = endpoint.URL
}
providerConfigs = append(providerConfigs, provider)
}
// Convert execution endpoints
for _, endpoint := range c.Arbitrum.ExecutionEndpoints {
provider := map[string]interface{}{
"name": endpoint.Name,
"type": "standard",
"http_endpoint": "",
"ws_endpoint": "",
"priority": endpoint.Priority,
"rate_limit": map[string]interface{}{
"requests_per_second": endpoint.RateLimit.RequestsPerSecond,
"burst": endpoint.RateLimit.Burst,
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
"retry_delay": "1s",
"max_retries": 3,
},
"features": []string{"execution", "transaction_submission"},
"health_check": map[string]interface{}{
"enabled": true,
"interval": fmt.Sprintf("%ds", endpoint.HealthCheckInterval),
"timeout": fmt.Sprintf("%ds", endpoint.TimeoutSeconds),
},
}
// Determine endpoint type and assign to appropriate field
if strings.HasPrefix(endpoint.URL, "ws") {
provider["ws_endpoint"] = endpoint.URL
} else {
provider["http_endpoint"] = endpoint.URL
}
providerConfigs = append(providerConfigs, provider)
}
// Build provider pool configurations
providerPools := make(map[string]interface{})
// Reading pool configuration
if len(c.Arbitrum.ReadingEndpoints) > 0 {
readingProviders := make([]string, 0)
for _, endpoint := range c.Arbitrum.ReadingEndpoints {
readingProviders = append(readingProviders, endpoint.Name)
}
providerPools["read_only"] = map[string]interface{}{
"strategy": "websocket_preferred",
"max_concurrent_connections": 25,
"health_check_interval": "30s",
"failover_enabled": true,
"providers": readingProviders,
}
}
// Execution pool configuration
if len(c.Arbitrum.ExecutionEndpoints) > 0 {
executionProviders := make([]string, 0)
for _, endpoint := range c.Arbitrum.ExecutionEndpoints {
executionProviders = append(executionProviders, endpoint.Name)
}
providerPools["execution"] = map[string]interface{}{
"strategy": "reliability_first",
"max_concurrent_connections": 20,
"health_check_interval": "30s",
"failover_enabled": true,
"providers": executionProviders,
}
}
// Complete configuration
return map[string]interface{}{
"provider_pools": providerPools,
"providers": providerConfigs,
"rotation": map[string]interface{}{
"strategy": "priority_based",
"health_check_required": true,
"fallover_enabled": true,
"retry_failed_after": "5m",
},
"global_limits": map[string]interface{}{
"max_concurrent_connections": 50,
"connection_timeout": "30s",
"read_timeout": "60s",
"write_timeout": "30s",
"idle_timeout": "300s",
},
"monitoring": map[string]interface{}{
"enabled": true,
"metrics_interval": "60s",
"log_slow_requests": true,
"slow_request_threshold": "5s",
"track_provider_performance": true,
},
}
}
// ValidateEnvironmentVariables validates all required environment variables
func (c *Config) ValidateEnvironmentVariables() error {
// Validate RPC endpoint

View File

@@ -17,21 +17,21 @@ func TestLoad(t *testing.T) {
// Write test config content
configContent := `
arbitrum:
rpc_endpoint: "https://arb1.arbitrum.io/rpc"
ws_endpoint: ""
rpc_endpoint: "${ARBITRUM_RPC_ENDPOINT}"
ws_endpoint: "${ARBITRUM_WS_ENDPOINT}"
chain_id: 42161
rate_limit:
requests_per_second: 10
max_concurrent: 5
burst: 20
requests_per_second: 5
max_concurrent: 3
burst: 10
bot:
enabled: true
polling_interval: 1
polling_interval: 3
min_profit_threshold: 10.0
gas_price_multiplier: 1.2
max_workers: 10
channel_buffer_size: 100
max_workers: 3
channel_buffer_size: 50
rpc_timeout: 30
uniswap:
@@ -47,9 +47,9 @@ uniswap:
max_size: 10000
log:
level: "info"
level: "debug"
format: "text"
file: ""
file: "logs/mev-bot.log"
database:
file: "mev-bot.db"
@@ -61,21 +61,31 @@ database:
err = tmpFile.Close()
require.NoError(t, err)
// Set environment variables for test
os.Setenv("ARBITRUM_RPC_ENDPOINT", "wss://arbitrum-mainnet.core.chainstack.com/f69d14406bc00700da9b936504e1a870")
os.Setenv("ARBITRUM_WS_ENDPOINT", "wss://arbitrum-mainnet.core.chainstack.com/f69d14406bc00700da9b936504e1a870")
defer func() {
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
os.Unsetenv("ARBITRUM_WS_ENDPOINT")
}()
// Test loading the config
cfg, err := Load(tmpFile.Name())
require.NoError(t, err)
// Verify the loaded config
assert.Equal(t, "https://arb1.arbitrum.io/rpc", cfg.Arbitrum.RPCEndpoint)
assert.Equal(t, "wss://arbitrum-mainnet.core.chainstack.com/f69d14406bc00700da9b936504e1a870", cfg.Arbitrum.RPCEndpoint)
assert.Equal(t, "wss://arbitrum-mainnet.core.chainstack.com/f69d14406bc00700da9b936504e1a870", cfg.Arbitrum.WSEndpoint)
assert.Equal(t, int64(42161), cfg.Arbitrum.ChainID)
assert.Equal(t, 10, cfg.Arbitrum.RateLimit.RequestsPerSecond)
assert.Equal(t, 5, cfg.Arbitrum.RateLimit.RequestsPerSecond)
assert.True(t, cfg.Bot.Enabled)
assert.Equal(t, 1, cfg.Bot.PollingInterval)
assert.Equal(t, 3, cfg.Bot.PollingInterval)
assert.Equal(t, 10.0, cfg.Bot.MinProfitThreshold)
assert.Equal(t, "0x1F98431c8aD98523631AE4a59f267346ea31F984", cfg.Uniswap.FactoryAddress)
assert.Len(t, cfg.Uniswap.FeeTiers, 3)
assert.Equal(t, true, cfg.Uniswap.Cache.Enabled)
assert.Equal(t, "info", cfg.Log.Level)
assert.Equal(t, "debug", cfg.Log.Level)
assert.Equal(t, "logs/mev-bot.log", cfg.Log.File)
assert.Equal(t, "mev-bot.db", cfg.Database.File)
}

View File

@@ -36,14 +36,17 @@ type AdaptiveEndpoint struct {
}
// EndpointMetrics tracks performance metrics for an endpoint
// All fields must be 64-bit aligned for atomic access
type EndpointMetrics struct {
TotalRequests int64
SuccessfulRequests int64
FailedRequests int64
TotalLatency int64 // nanoseconds
LastRequestTime int64 // unix timestamp
SuccessRate float64
AverageLatency float64 // milliseconds
// Non-atomic fields - must be protected by mutex when accessed
mu sync.RWMutex
SuccessRate float64
AverageLatency float64 // milliseconds
}
// CircuitBreaker implements circuit breaker pattern for failed endpoints
@@ -86,8 +89,13 @@ func NewAdaptiveRateLimiter(cfg *config.ArbitrumConfig, logger *logger.Logger) *
// Create adaptive endpoint for primary endpoint
arl.addEndpoint(cfg.RPCEndpoint, cfg.RateLimit)
// Create adaptive endpoints for fallback endpoints
for _, endpoint := range cfg.FallbackEndpoints {
// Create adaptive endpoints for reading endpoints
for _, endpoint := range cfg.ReadingEndpoints {
arl.addEndpoint(endpoint.URL, endpoint.RateLimit)
}
// Create adaptive endpoints for execution endpoints
for _, endpoint := range cfg.ExecutionEndpoints {
arl.addEndpoint(endpoint.URL, endpoint.RateLimit)
}
@@ -231,6 +239,37 @@ func (arl *AdaptiveRateLimiter) getBestEndpoint() string {
return bestEndpoint
}
// updateDerivedMetrics safely updates calculated metrics with proper synchronization
func (em *EndpointMetrics) updateDerivedMetrics() {
totalRequests := atomic.LoadInt64(&em.TotalRequests)
successfulRequests := atomic.LoadInt64(&em.SuccessfulRequests)
totalLatency := atomic.LoadInt64(&em.TotalLatency)
em.mu.Lock()
defer em.mu.Unlock()
// Calculate success rate
if totalRequests > 0 {
em.SuccessRate = float64(successfulRequests) / float64(totalRequests)
} else {
em.SuccessRate = 0.0
}
// Calculate average latency in milliseconds
if totalRequests > 0 {
em.AverageLatency = float64(totalLatency) / float64(totalRequests) / 1e6 // ns to ms
} else {
em.AverageLatency = 0.0
}
}
// getCalculatedMetrics safely returns derived metrics
func (em *EndpointMetrics) getCalculatedMetrics() (float64, float64) {
em.mu.RLock()
defer em.mu.RUnlock()
return em.SuccessRate, em.AverageLatency
}
// calculateEndpointScore calculates a score for endpoint selection
func (arl *AdaptiveRateLimiter) calculateEndpointScore(endpoint *AdaptiveEndpoint) float64 {
// Base score on success rate (0-1)
@@ -238,13 +277,17 @@ func (arl *AdaptiveRateLimiter) calculateEndpointScore(endpoint *AdaptiveEndpoin
latencyWeight := 0.3
loadWeight := 0.1
successScore := endpoint.metrics.SuccessRate
// Update derived metrics first
endpoint.metrics.updateDerivedMetrics()
// Get calculated metrics safely
successScore, avgLatency := endpoint.metrics.getCalculatedMetrics()
// Invert latency score (lower latency = higher score)
latencyScore := 1.0
if endpoint.metrics.AverageLatency > 0 {
if avgLatency > 0 {
// Normalize latency score (assuming 1000ms is poor, 100ms is good)
latencyScore = 1.0 - (endpoint.metrics.AverageLatency / 1000.0)
latencyScore = 1.0 - (avgLatency / 1000.0)
if latencyScore < 0 {
latencyScore = 0
}

View File

@@ -36,8 +36,18 @@ func NewLimiterManager(cfg *config.ArbitrumConfig) *LimiterManager {
Config: cfg.RateLimit,
}
// Create limiters for fallback endpoints
for _, endpoint := range cfg.FallbackEndpoints {
// Create limiters for reading endpoints
for _, endpoint := range cfg.ReadingEndpoints {
limiter := createLimiter(endpoint.RateLimit)
lm.limiters[endpoint.URL] = &EndpointLimiter{
URL: endpoint.URL,
Limiter: limiter,
Config: endpoint.RateLimit,
}
}
// Create limiters for execution endpoints
for _, endpoint := range cfg.ExecutionEndpoints {
limiter := createLimiter(endpoint.RateLimit)
lm.limiters[endpoint.URL] = &EndpointLimiter{
URL: endpoint.URL,