feat: create v2-prep branch with comprehensive planning
Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
333
orig/pkg/arbitrum/connection_test.go
Normal file
333
orig/pkg/arbitrum/connection_test.go
Normal file
@@ -0,0 +1,333 @@
|
||||
//go:build legacy_arbitrum
|
||||
// +build legacy_arbitrum
|
||||
|
||||
package arbitrum
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/fraktal/mev-beta/internal/config"
|
||||
"github.com/fraktal/mev-beta/internal/logger"
|
||||
)
|
||||
|
||||
func newTestLogger() *logger.Logger {
|
||||
return logger.New("error", "text", "")
|
||||
}
|
||||
|
||||
func TestConnectionManager_GetPrimaryEndpoint(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envValue string
|
||||
configValue string
|
||||
expectedResult string
|
||||
}{
|
||||
{
|
||||
name: "Environment variable takes precedence",
|
||||
envValue: "wss://env-endpoint.com",
|
||||
configValue: "wss://config-endpoint.com",
|
||||
expectedResult: "wss://env-endpoint.com",
|
||||
},
|
||||
{
|
||||
name: "Config value used when env not set",
|
||||
envValue: "",
|
||||
configValue: "wss://config-endpoint.com",
|
||||
expectedResult: "wss://config-endpoint.com",
|
||||
},
|
||||
{
|
||||
name: "Default fallback when neither set",
|
||||
envValue: "",
|
||||
configValue: "",
|
||||
expectedResult: "wss://arbitrum-mainnet.core.chainstack.com/53c30e7a941160679fdcc396c894fc57",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set up environment
|
||||
if tt.envValue != "" {
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", tt.envValue)
|
||||
} else {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
}
|
||||
defer os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
|
||||
// Create connection manager with config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: tt.configValue,
|
||||
}
|
||||
cm := NewConnectionManager(cfg, newTestLogger())
|
||||
|
||||
// Test
|
||||
result := cm.getPrimaryEndpoint()
|
||||
assert.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectionManager_GetFallbackEndpoints(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
envValue string
|
||||
configEndpoints []config.EndpointConfig
|
||||
expectedContains []string
|
||||
}{
|
||||
{
|
||||
name: "Environment variable endpoints",
|
||||
envValue: "https://endpoint1.com,https://endpoint2.com, https://endpoint3.com ",
|
||||
expectedContains: []string{
|
||||
"https://endpoint1.com",
|
||||
"https://endpoint2.com",
|
||||
"https://endpoint3.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Config endpoints used when env not set",
|
||||
envValue: "",
|
||||
configEndpoints: []config.EndpointConfig{
|
||||
{URL: "https://config1.com"},
|
||||
{URL: "https://config2.com"},
|
||||
},
|
||||
expectedContains: []string{
|
||||
"https://config1.com",
|
||||
"https://config2.com",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Default endpoints when nothing configured",
|
||||
envValue: "",
|
||||
expectedContains: []string{
|
||||
"https://arb1.arbitrum.io/rpc",
|
||||
"https://arbitrum.llamarpc.com",
|
||||
"https://arbitrum-one.publicnode.com",
|
||||
"https://arbitrum-one.public.blastapi.io",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set up environment
|
||||
if tt.envValue != "" {
|
||||
os.Setenv("ARBITRUM_FALLBACK_ENDPOINTS", tt.envValue)
|
||||
} else {
|
||||
os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
}
|
||||
defer os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
|
||||
// Create connection manager with config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
FallbackEndpoints: tt.configEndpoints,
|
||||
}
|
||||
cm := NewConnectionManager(cfg, newTestLogger())
|
||||
|
||||
// Test
|
||||
result := cm.getFallbackEndpoints()
|
||||
|
||||
// Check that all expected endpoints are present
|
||||
for _, expected := range tt.expectedContains {
|
||||
assert.Contains(t, result, expected, "Expected endpoint %s not found in results", expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectionManager_ConnectWithTimeout(t *testing.T) {
|
||||
cm := NewConnectionManager(&config.ArbitrumConfig{}, newTestLogger())
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Invalid endpoint fails quickly", func(t *testing.T) {
|
||||
start := time.Now()
|
||||
_, err := cm.connectWithTimeout(ctx, "wss://invalid-endpoint-that-does-not-exist.com")
|
||||
duration := time.Since(start)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Less(t, duration, 15*time.Second, "Should fail within timeout period")
|
||||
})
|
||||
|
||||
t.Run("Connection respects context cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
_, err := cm.connectWithTimeout(ctx, "wss://very-slow-endpoint.com")
|
||||
duration := time.Since(start)
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Less(t, duration, 2*time.Second, "Should respect context timeout")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConnectionManager_GetClientWithRetry(t *testing.T) {
|
||||
// Save original environment variables
|
||||
originalRPC := os.Getenv("ARBITRUM_RPC_ENDPOINT")
|
||||
originalFallback := os.Getenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
|
||||
// Unset environment variables to ensure we use our test config
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
|
||||
// Restore environment variables after test
|
||||
defer func() {
|
||||
if originalRPC != "" {
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", originalRPC)
|
||||
} else {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
}
|
||||
if originalFallback != "" {
|
||||
os.Setenv("ARBITRUM_FALLBACK_ENDPOINTS", originalFallback)
|
||||
} else {
|
||||
os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a config with invalid endpoints to ensure failure
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "wss://invalid-endpoint-for-testing.com",
|
||||
FallbackEndpoints: []config.EndpointConfig{
|
||||
{URL: "https://invalid-fallback1.com"},
|
||||
{URL: "https://invalid-fallback2.com"},
|
||||
},
|
||||
}
|
||||
cm := NewConnectionManager(cfg, newTestLogger())
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("Retry logic with exponential backoff", func(t *testing.T) {
|
||||
start := time.Now()
|
||||
|
||||
// This should fail but test the retry mechanism
|
||||
_, err := cm.GetClientWithRetry(ctx, 3)
|
||||
duration := time.Since(start)
|
||||
|
||||
// Should have attempted 3 times with exponential backoff
|
||||
// First attempt: immediate
|
||||
// Second attempt: 1 second wait
|
||||
// Third attempt: 2 second wait
|
||||
// Total minimum time should be around 3 seconds
|
||||
assert.Error(t, err)
|
||||
// The actual error message might vary, so we'll just check that it's an error
|
||||
// and that enough time has passed for the retries
|
||||
_ = duration // Keep the variable to avoid unused error
|
||||
})
|
||||
}
|
||||
|
||||
func TestConnectionManager_HealthyClient(t *testing.T) {
|
||||
t.Run("GetHealthyClient returns error when no endpoints work", func(t *testing.T) {
|
||||
// Set invalid endpoints to test failure case
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", "wss://invalid-endpoint.com")
|
||||
os.Setenv("ARBITRUM_FALLBACK_ENDPOINTS", "https://invalid1.com,https://invalid2.com")
|
||||
defer func() {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := GetHealthyClient(ctx)
|
||||
assert.Error(t, err, "Should fail when all endpoints are invalid")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConnectionManager_Configuration(t *testing.T) {
|
||||
t.Run("Environment variables override config file", func(t *testing.T) {
|
||||
// Set environment variables
|
||||
os.Setenv("ARBITRUM_RPC_ENDPOINT", "wss://env-primary.com")
|
||||
os.Setenv("ARBITRUM_FALLBACK_ENDPOINTS", "https://env-fallback1.com,https://env-fallback2.com")
|
||||
defer func() {
|
||||
os.Unsetenv("ARBITRUM_RPC_ENDPOINT")
|
||||
os.Unsetenv("ARBITRUM_FALLBACK_ENDPOINTS")
|
||||
}()
|
||||
|
||||
// Create config with different values
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "wss://config-primary.com",
|
||||
FallbackEndpoints: []config.EndpointConfig{
|
||||
{URL: "https://config-fallback1.com"},
|
||||
{URL: "https://config-fallback2.com"},
|
||||
},
|
||||
}
|
||||
|
||||
cm := NewConnectionManager(cfg, newTestLogger())
|
||||
|
||||
// Test that environment variables take precedence
|
||||
primary := cm.getPrimaryEndpoint()
|
||||
assert.Equal(t, "wss://env-primary.com", primary)
|
||||
|
||||
fallbacks := cm.getFallbackEndpoints()
|
||||
assert.Contains(t, fallbacks, "https://env-fallback1.com")
|
||||
assert.Contains(t, fallbacks, "https://env-fallback2.com")
|
||||
assert.NotContains(t, fallbacks, "https://config-fallback1.com")
|
||||
assert.NotContains(t, fallbacks, "https://config-fallback2.com")
|
||||
})
|
||||
}
|
||||
|
||||
func TestConnectionManager_Lifecycle(t *testing.T) {
|
||||
cm := NewConnectionManager(&config.ArbitrumConfig{}, newTestLogger())
|
||||
|
||||
t.Run("Close handles nil clients gracefully", func(t *testing.T) {
|
||||
// Should not panic
|
||||
assert.NotPanics(t, func() {
|
||||
cm.Close()
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Multiple close calls are safe", func(t *testing.T) {
|
||||
assert.NotPanics(t, func() {
|
||||
cm.Close()
|
||||
cm.Close()
|
||||
cm.Close()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Integration test that requires real network access
|
||||
func TestConnectionManager_RealEndpoints(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping integration test in short mode")
|
||||
}
|
||||
|
||||
t.Run("Public Arbitrum RPC endpoints should be reachable", func(t *testing.T) {
|
||||
publicEndpoints := []string{
|
||||
"https://arb1.arbitrum.io/rpc",
|
||||
"https://arbitrum.llamarpc.com",
|
||||
"https://arbitrum-one.publicnode.com",
|
||||
}
|
||||
|
||||
cm := NewConnectionManager(&config.ArbitrumConfig{}, newTestLogger())
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
successCount := 0
|
||||
for _, endpoint := range publicEndpoints {
|
||||
client, err := cm.connectWithTimeout(ctx, endpoint)
|
||||
if err == nil {
|
||||
client.Close()
|
||||
successCount++
|
||||
t.Logf("Successfully connected to %s", endpoint)
|
||||
} else {
|
||||
t.Logf("Failed to connect to %s: %v", endpoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
// At least one public endpoint should be working
|
||||
assert.Greater(t, successCount, 0, "At least one public Arbitrum RPC should be reachable")
|
||||
})
|
||||
}
|
||||
|
||||
// Benchmark connection establishment
|
||||
func BenchmarkConnectionManager_GetClient(b *testing.B) {
|
||||
cm := NewConnectionManager(&config.ArbitrumConfig{}, newTestLogger())
|
||||
ctx := context.Background()
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// This will fail but tests the connection attempt performance
|
||||
_, _ = cm.GetClient(ctx)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user