feat: create v2-prep branch with comprehensive planning
Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
494
orig/internal/ratelimit/adaptive.go
Normal file
494
orig/internal/ratelimit/adaptive.go
Normal file
@@ -0,0 +1,494 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/fraktal/mev-beta/internal/config"
|
||||
"github.com/fraktal/mev-beta/internal/logger"
|
||||
)
|
||||
|
||||
// AdaptiveRateLimiter implements adaptive rate limiting that adjusts to endpoint capacity
|
||||
type AdaptiveRateLimiter struct {
|
||||
endpoints map[string]*AdaptiveEndpoint
|
||||
mu sync.RWMutex
|
||||
logger *logger.Logger
|
||||
defaultConfig config.RateLimitConfig
|
||||
adjustInterval time.Duration
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
// AdaptiveEndpoint represents an endpoint with adaptive rate limiting
|
||||
type AdaptiveEndpoint struct {
|
||||
URL string
|
||||
limiter *rate.Limiter
|
||||
config config.RateLimitConfig
|
||||
circuitBreaker *CircuitBreaker
|
||||
metrics *EndpointMetrics
|
||||
healthChecker *HealthChecker
|
||||
lastAdjustment time.Time
|
||||
consecutiveErrors int64
|
||||
consecutiveSuccess int64
|
||||
}
|
||||
|
||||
// EndpointMetrics tracks performance metrics for an endpoint
|
||||
// All fields must be 64-bit aligned for atomic access
|
||||
type EndpointMetrics struct {
|
||||
TotalRequests int64
|
||||
SuccessfulRequests int64
|
||||
FailedRequests int64
|
||||
TotalLatency int64 // nanoseconds
|
||||
LastRequestTime int64 // unix timestamp
|
||||
// Non-atomic fields - must be protected by mutex when accessed
|
||||
mu sync.RWMutex
|
||||
SuccessRate float64
|
||||
AverageLatency float64 // milliseconds
|
||||
}
|
||||
|
||||
// CircuitBreaker implements circuit breaker pattern for failed endpoints
|
||||
type CircuitBreaker struct {
|
||||
state int32 // 0: Closed, 1: Open, 2: HalfOpen
|
||||
failureCount int64
|
||||
lastFailTime int64
|
||||
threshold int64
|
||||
timeout time.Duration // How long to wait before trying again
|
||||
testRequests int64 // Number of test requests in half-open state
|
||||
}
|
||||
|
||||
// Circuit breaker states
|
||||
const (
|
||||
CircuitClosed = 0
|
||||
CircuitOpen = 1
|
||||
CircuitHalfOpen = 2
|
||||
)
|
||||
|
||||
// HealthChecker monitors endpoint health
|
||||
type HealthChecker struct {
|
||||
endpoint string
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
isHealthy int64 // atomic bool
|
||||
lastCheck int64 // unix timestamp
|
||||
stopChan chan struct{}
|
||||
}
|
||||
|
||||
// NewAdaptiveRateLimiter creates a new adaptive rate limiter
|
||||
func NewAdaptiveRateLimiter(cfg *config.ArbitrumConfig, logger *logger.Logger) *AdaptiveRateLimiter {
|
||||
arl := &AdaptiveRateLimiter{
|
||||
endpoints: make(map[string]*AdaptiveEndpoint),
|
||||
logger: logger,
|
||||
defaultConfig: cfg.RateLimit,
|
||||
adjustInterval: 30 * time.Second,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Create adaptive endpoint for primary endpoint
|
||||
arl.addEndpoint(cfg.RPCEndpoint, cfg.RateLimit)
|
||||
|
||||
// Create adaptive endpoints for reading endpoints
|
||||
for _, endpoint := range cfg.ReadingEndpoints {
|
||||
arl.addEndpoint(endpoint.URL, endpoint.RateLimit)
|
||||
}
|
||||
|
||||
// Create adaptive endpoints for execution endpoints
|
||||
for _, endpoint := range cfg.ExecutionEndpoints {
|
||||
arl.addEndpoint(endpoint.URL, endpoint.RateLimit)
|
||||
}
|
||||
|
||||
// Start background adjustment routine
|
||||
go arl.adjustmentLoop()
|
||||
|
||||
return arl
|
||||
}
|
||||
|
||||
// addEndpoint adds a new adaptive endpoint
|
||||
func (arl *AdaptiveRateLimiter) addEndpoint(url string, config config.RateLimitConfig) {
|
||||
endpoint := &AdaptiveEndpoint{
|
||||
URL: url,
|
||||
limiter: rate.NewLimiter(rate.Limit(config.RequestsPerSecond), config.Burst),
|
||||
config: config,
|
||||
circuitBreaker: &CircuitBreaker{
|
||||
threshold: 10, // Break after 10 consecutive failures
|
||||
timeout: 60 * time.Second,
|
||||
},
|
||||
metrics: &EndpointMetrics{},
|
||||
healthChecker: &HealthChecker{
|
||||
endpoint: url,
|
||||
interval: 30 * time.Second,
|
||||
timeout: 5 * time.Second,
|
||||
isHealthy: 1, // Start assuming healthy
|
||||
stopChan: make(chan struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
arl.mu.Lock()
|
||||
arl.endpoints[url] = endpoint
|
||||
arl.mu.Unlock()
|
||||
|
||||
// Start health checker for this endpoint
|
||||
go endpoint.healthChecker.start()
|
||||
|
||||
arl.logger.Info(fmt.Sprintf("Added adaptive rate limiter for endpoint: %s", url))
|
||||
}
|
||||
|
||||
// WaitForBestEndpoint waits for the best available endpoint
|
||||
func (arl *AdaptiveRateLimiter) WaitForBestEndpoint(ctx context.Context) (string, error) {
|
||||
// Find the best available endpoint
|
||||
bestEndpoint := arl.getBestEndpoint()
|
||||
if bestEndpoint == "" {
|
||||
return "", fmt.Errorf("no healthy endpoints available")
|
||||
}
|
||||
|
||||
// Wait for rate limiter
|
||||
arl.mu.RLock()
|
||||
endpoint := arl.endpoints[bestEndpoint]
|
||||
arl.mu.RUnlock()
|
||||
|
||||
if endpoint == nil {
|
||||
return "", fmt.Errorf("endpoint not found: %s", bestEndpoint)
|
||||
}
|
||||
|
||||
// Check circuit breaker
|
||||
if !endpoint.circuitBreaker.canExecute() {
|
||||
return "", fmt.Errorf("circuit breaker open for endpoint: %s", bestEndpoint)
|
||||
}
|
||||
|
||||
// Wait for rate limiter
|
||||
err := endpoint.limiter.Wait(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return bestEndpoint, nil
|
||||
}
|
||||
|
||||
// RecordResult records the result of a request for adaptive adjustment
|
||||
func (arl *AdaptiveRateLimiter) RecordResult(endpointURL string, success bool, latency time.Duration) {
|
||||
arl.mu.RLock()
|
||||
endpoint, exists := arl.endpoints[endpointURL]
|
||||
arl.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return
|
||||
}
|
||||
|
||||
// Update metrics atomically
|
||||
atomic.AddInt64(&endpoint.metrics.TotalRequests, 1)
|
||||
atomic.AddInt64(&endpoint.metrics.TotalLatency, latency.Nanoseconds())
|
||||
atomic.StoreInt64(&endpoint.metrics.LastRequestTime, time.Now().Unix())
|
||||
|
||||
if success {
|
||||
atomic.AddInt64(&endpoint.metrics.SuccessfulRequests, 1)
|
||||
atomic.AddInt64(&endpoint.consecutiveSuccess, 1)
|
||||
atomic.StoreInt64(&endpoint.consecutiveErrors, 0)
|
||||
endpoint.circuitBreaker.recordSuccess()
|
||||
} else {
|
||||
atomic.AddInt64(&endpoint.metrics.FailedRequests, 1)
|
||||
atomic.AddInt64(&endpoint.consecutiveErrors, 1)
|
||||
atomic.StoreInt64(&endpoint.consecutiveSuccess, 0)
|
||||
endpoint.circuitBreaker.recordFailure()
|
||||
}
|
||||
|
||||
// Update calculated metrics
|
||||
arl.updateCalculatedMetrics(endpoint)
|
||||
}
|
||||
|
||||
// updateCalculatedMetrics updates derived metrics
|
||||
func (arl *AdaptiveRateLimiter) updateCalculatedMetrics(endpoint *AdaptiveEndpoint) {
|
||||
totalReq := atomic.LoadInt64(&endpoint.metrics.TotalRequests)
|
||||
successReq := atomic.LoadInt64(&endpoint.metrics.SuccessfulRequests)
|
||||
totalLatency := atomic.LoadInt64(&endpoint.metrics.TotalLatency)
|
||||
|
||||
if totalReq > 0 {
|
||||
endpoint.metrics.SuccessRate = float64(successReq) / float64(totalReq)
|
||||
endpoint.metrics.AverageLatency = float64(totalLatency) / float64(totalReq) / 1000000 // Convert to milliseconds
|
||||
}
|
||||
}
|
||||
|
||||
// getBestEndpoint selects the best available endpoint based on metrics
|
||||
func (arl *AdaptiveRateLimiter) getBestEndpoint() string {
|
||||
arl.mu.RLock()
|
||||
defer arl.mu.RUnlock()
|
||||
|
||||
bestEndpoint := ""
|
||||
bestScore := float64(-1)
|
||||
|
||||
for url, endpoint := range arl.endpoints {
|
||||
// Skip unhealthy endpoints
|
||||
if atomic.LoadInt64(&endpoint.healthChecker.isHealthy) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if circuit breaker is open
|
||||
if !endpoint.circuitBreaker.canExecute() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate score based on success rate, latency, and current load
|
||||
score := arl.calculateEndpointScore(endpoint)
|
||||
if score > bestScore {
|
||||
bestScore = score
|
||||
bestEndpoint = url
|
||||
}
|
||||
}
|
||||
|
||||
return bestEndpoint
|
||||
}
|
||||
|
||||
// updateDerivedMetrics safely updates calculated metrics with proper synchronization
|
||||
func (em *EndpointMetrics) updateDerivedMetrics() {
|
||||
totalRequests := atomic.LoadInt64(&em.TotalRequests)
|
||||
successfulRequests := atomic.LoadInt64(&em.SuccessfulRequests)
|
||||
totalLatency := atomic.LoadInt64(&em.TotalLatency)
|
||||
|
||||
em.mu.Lock()
|
||||
defer em.mu.Unlock()
|
||||
|
||||
// Calculate success rate
|
||||
if totalRequests > 0 {
|
||||
em.SuccessRate = float64(successfulRequests) / float64(totalRequests)
|
||||
} else {
|
||||
em.SuccessRate = 0.0
|
||||
}
|
||||
|
||||
// Calculate average latency in milliseconds
|
||||
if totalRequests > 0 {
|
||||
em.AverageLatency = float64(totalLatency) / float64(totalRequests) / 1e6 // ns to ms
|
||||
} else {
|
||||
em.AverageLatency = 0.0
|
||||
}
|
||||
}
|
||||
|
||||
// getCalculatedMetrics safely returns derived metrics
|
||||
func (em *EndpointMetrics) getCalculatedMetrics() (float64, float64) {
|
||||
em.mu.RLock()
|
||||
defer em.mu.RUnlock()
|
||||
return em.SuccessRate, em.AverageLatency
|
||||
}
|
||||
|
||||
// calculateEndpointScore calculates a score for endpoint selection
|
||||
func (arl *AdaptiveRateLimiter) calculateEndpointScore(endpoint *AdaptiveEndpoint) float64 {
|
||||
// Base score on success rate (0-1)
|
||||
successWeight := 0.6
|
||||
latencyWeight := 0.3
|
||||
loadWeight := 0.1
|
||||
|
||||
// Update derived metrics first
|
||||
endpoint.metrics.updateDerivedMetrics()
|
||||
|
||||
// Get calculated metrics safely
|
||||
successScore, avgLatency := endpoint.metrics.getCalculatedMetrics()
|
||||
|
||||
// Invert latency score (lower latency = higher score)
|
||||
latencyScore := 1.0
|
||||
if avgLatency > 0 {
|
||||
// Normalize latency score (assuming 1000ms is poor, 100ms is good)
|
||||
latencyScore = 1.0 - (avgLatency / 1000.0)
|
||||
if latencyScore < 0 {
|
||||
latencyScore = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Load score based on current rate limiter state
|
||||
loadScore := 1.0 // Simplified - could check current tokens in limiter
|
||||
|
||||
return successScore*successWeight + latencyScore*latencyWeight + loadScore*loadWeight
|
||||
}
|
||||
|
||||
// adjustmentLoop runs periodic adjustments to rate limits
|
||||
func (arl *AdaptiveRateLimiter) adjustmentLoop() {
|
||||
ticker := time.NewTicker(arl.adjustInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
arl.adjustRateLimits()
|
||||
case <-arl.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// adjustRateLimits adjusts rate limits based on observed performance
|
||||
func (arl *AdaptiveRateLimiter) adjustRateLimits() {
|
||||
arl.mu.Lock()
|
||||
defer arl.mu.Unlock()
|
||||
|
||||
for url, endpoint := range arl.endpoints {
|
||||
arl.adjustEndpointRateLimit(url, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
// adjustEndpointRateLimit adjusts rate limit for a specific endpoint
|
||||
func (arl *AdaptiveRateLimiter) adjustEndpointRateLimit(url string, endpoint *AdaptiveEndpoint) {
|
||||
// Don't adjust too frequently
|
||||
if time.Since(endpoint.lastAdjustment) < arl.adjustInterval {
|
||||
return
|
||||
}
|
||||
|
||||
successRate := endpoint.metrics.SuccessRate
|
||||
avgLatency := endpoint.metrics.AverageLatency
|
||||
currentLimit := float64(endpoint.limiter.Limit())
|
||||
|
||||
var newLimit float64 = currentLimit
|
||||
adjustmentFactor := 0.1 // 10% adjustment
|
||||
|
||||
// Increase rate if performing well
|
||||
if successRate > 0.95 && avgLatency < 500 { // 95% success, < 500ms latency
|
||||
newLimit = currentLimit * (1.0 + adjustmentFactor)
|
||||
} else if successRate < 0.8 || avgLatency > 2000 { // < 80% success or > 2s latency
|
||||
newLimit = currentLimit * (1.0 - adjustmentFactor)
|
||||
}
|
||||
|
||||
// Apply bounds
|
||||
minLimit := float64(arl.defaultConfig.RequestsPerSecond) * 0.1 // 10% of default minimum
|
||||
maxLimit := float64(arl.defaultConfig.RequestsPerSecond) * 3.0 // 300% of default maximum
|
||||
|
||||
if newLimit < minLimit {
|
||||
newLimit = minLimit
|
||||
}
|
||||
if newLimit > maxLimit {
|
||||
newLimit = maxLimit
|
||||
}
|
||||
|
||||
// Update if changed significantly
|
||||
if abs(newLimit-currentLimit)/currentLimit > 0.05 { // 5% change threshold
|
||||
endpoint.limiter.SetLimit(rate.Limit(newLimit))
|
||||
endpoint.lastAdjustment = time.Now()
|
||||
|
||||
arl.logger.Info(fmt.Sprintf("Adjusted rate limit for %s: %.2f -> %.2f (success: %.2f%%, latency: %.2fms)",
|
||||
url, currentLimit, newLimit, successRate*100, avgLatency))
|
||||
}
|
||||
}
|
||||
|
||||
// abs returns absolute value of float64
|
||||
func abs(x float64) float64 {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// canExecute checks if circuit breaker allows execution
|
||||
func (cb *CircuitBreaker) canExecute() bool {
|
||||
state := atomic.LoadInt32(&cb.state)
|
||||
now := time.Now().Unix()
|
||||
|
||||
switch state {
|
||||
case CircuitClosed:
|
||||
return true
|
||||
case CircuitOpen:
|
||||
// Check if timeout has passed
|
||||
lastFail := atomic.LoadInt64(&cb.lastFailTime)
|
||||
if now-lastFail > int64(cb.timeout.Seconds()) {
|
||||
// Try to move to half-open
|
||||
if atomic.CompareAndSwapInt32(&cb.state, CircuitOpen, CircuitHalfOpen) {
|
||||
atomic.StoreInt64(&cb.testRequests, 0)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
case CircuitHalfOpen:
|
||||
// Allow limited test requests
|
||||
testReq := atomic.LoadInt64(&cb.testRequests)
|
||||
if testReq < 3 { // Allow up to 3 test requests
|
||||
atomic.AddInt64(&cb.testRequests, 1)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// recordSuccess records a successful request
|
||||
func (cb *CircuitBreaker) recordSuccess() {
|
||||
state := atomic.LoadInt32(&cb.state)
|
||||
if state == CircuitHalfOpen {
|
||||
// Move back to closed after successful test
|
||||
atomic.StoreInt32(&cb.state, CircuitClosed)
|
||||
atomic.StoreInt64(&cb.failureCount, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// recordFailure records a failed request
|
||||
func (cb *CircuitBreaker) recordFailure() {
|
||||
failures := atomic.AddInt64(&cb.failureCount, 1)
|
||||
atomic.StoreInt64(&cb.lastFailTime, time.Now().Unix())
|
||||
|
||||
if failures >= cb.threshold {
|
||||
atomic.StoreInt32(&cb.state, CircuitOpen)
|
||||
}
|
||||
}
|
||||
|
||||
// start starts the health checker
|
||||
func (hc *HealthChecker) start() {
|
||||
ticker := time.NewTicker(hc.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
hc.checkHealth()
|
||||
case <-hc.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkHealth performs a health check on the endpoint
|
||||
func (hc *HealthChecker) checkHealth() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), hc.timeout)
|
||||
defer cancel()
|
||||
|
||||
// Simple health check - try to connect
|
||||
// In production, this might make a simple RPC call
|
||||
healthy := hc.performHealthCheck(ctx)
|
||||
|
||||
if healthy {
|
||||
atomic.StoreInt64(&hc.isHealthy, 1)
|
||||
} else {
|
||||
atomic.StoreInt64(&hc.isHealthy, 0)
|
||||
}
|
||||
|
||||
atomic.StoreInt64(&hc.lastCheck, time.Now().Unix())
|
||||
}
|
||||
|
||||
// performHealthCheck performs the actual health check
|
||||
func (hc *HealthChecker) performHealthCheck(ctx context.Context) bool {
|
||||
// Simplified health check - in production would make actual RPC call
|
||||
// For now, just simulate based on endpoint availability
|
||||
return true // Assume healthy for demo
|
||||
}
|
||||
|
||||
// Stop stops the adaptive rate limiter
|
||||
func (arl *AdaptiveRateLimiter) Stop() {
|
||||
close(arl.stopChan)
|
||||
|
||||
// Stop all health checkers
|
||||
arl.mu.RLock()
|
||||
for _, endpoint := range arl.endpoints {
|
||||
close(endpoint.healthChecker.stopChan)
|
||||
}
|
||||
arl.mu.RUnlock()
|
||||
}
|
||||
|
||||
// GetMetrics returns current metrics for all endpoints
|
||||
func (arl *AdaptiveRateLimiter) GetMetrics() map[string]*EndpointMetrics {
|
||||
arl.mu.RLock()
|
||||
defer arl.mu.RUnlock()
|
||||
|
||||
metrics := make(map[string]*EndpointMetrics)
|
||||
for url, endpoint := range arl.endpoints {
|
||||
// Update calculated metrics before returning
|
||||
arl.updateCalculatedMetrics(endpoint)
|
||||
metrics[url] = endpoint.metrics
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
139
orig/internal/ratelimit/manager.go
Normal file
139
orig/internal/ratelimit/manager.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/fraktal/mev-beta/internal/config"
|
||||
)
|
||||
|
||||
// LimiterManager manages rate limiters for multiple endpoints
|
||||
type LimiterManager struct {
|
||||
limiters map[string]*EndpointLimiter
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// EndpointLimiter represents a rate limiter for a specific endpoint
|
||||
type EndpointLimiter struct {
|
||||
URL string
|
||||
Limiter *rate.Limiter
|
||||
Config config.RateLimitConfig
|
||||
}
|
||||
|
||||
// NewLimiterManager creates a new LimiterManager
|
||||
func NewLimiterManager(cfg *config.ArbitrumConfig) *LimiterManager {
|
||||
lm := &LimiterManager{
|
||||
limiters: make(map[string]*EndpointLimiter),
|
||||
}
|
||||
|
||||
// Create limiter for primary endpoint
|
||||
limiter := createLimiter(cfg.RateLimit)
|
||||
lm.limiters[cfg.RPCEndpoint] = &EndpointLimiter{
|
||||
URL: cfg.RPCEndpoint,
|
||||
Limiter: limiter,
|
||||
Config: cfg.RateLimit,
|
||||
}
|
||||
|
||||
// Create limiters for reading endpoints
|
||||
for _, endpoint := range cfg.ReadingEndpoints {
|
||||
limiter := createLimiter(endpoint.RateLimit)
|
||||
lm.limiters[endpoint.URL] = &EndpointLimiter{
|
||||
URL: endpoint.URL,
|
||||
Limiter: limiter,
|
||||
Config: endpoint.RateLimit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create limiters for execution endpoints
|
||||
for _, endpoint := range cfg.ExecutionEndpoints {
|
||||
limiter := createLimiter(endpoint.RateLimit)
|
||||
lm.limiters[endpoint.URL] = &EndpointLimiter{
|
||||
URL: endpoint.URL,
|
||||
Limiter: limiter,
|
||||
Config: endpoint.RateLimit,
|
||||
}
|
||||
}
|
||||
|
||||
return lm
|
||||
}
|
||||
|
||||
// createLimiter creates a rate limiter based on the configuration
|
||||
func createLimiter(cfg config.RateLimitConfig) *rate.Limiter {
|
||||
// Create a rate limiter with the specified rate and burst
|
||||
r := rate.Limit(cfg.RequestsPerSecond)
|
||||
return rate.NewLimiter(r, cfg.Burst)
|
||||
}
|
||||
|
||||
// WaitForLimit waits for the rate limiter to allow a request
|
||||
func (lm *LimiterManager) WaitForLimit(ctx context.Context, endpointURL string) error {
|
||||
lm.mu.RLock()
|
||||
limiter, exists := lm.limiters[endpointURL]
|
||||
lm.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("no rate limiter found for endpoint: %s", endpointURL)
|
||||
}
|
||||
|
||||
// Wait for permission to make a request
|
||||
return limiter.Limiter.Wait(ctx)
|
||||
}
|
||||
|
||||
// TryWaitForLimit tries to wait for the rate limiter to allow a request without blocking
|
||||
func (lm *LimiterManager) TryWaitForLimit(ctx context.Context, endpointURL string) error {
|
||||
lm.mu.RLock()
|
||||
limiter, exists := lm.limiters[endpointURL]
|
||||
lm.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("no rate limiter found for endpoint: %s", endpointURL)
|
||||
}
|
||||
|
||||
// Try to wait for permission to make a request without blocking
|
||||
if !limiter.Limiter.Allow() {
|
||||
return fmt.Errorf("rate limit exceeded for endpoint: %s", endpointURL)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLimiter returns the rate limiter for a specific endpoint
|
||||
func (lm *LimiterManager) GetLimiter(endpointURL string) (*rate.Limiter, error) {
|
||||
lm.mu.RLock()
|
||||
limiter, exists := lm.limiters[endpointURL]
|
||||
lm.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("no rate limiter found for endpoint: %s", endpointURL)
|
||||
}
|
||||
|
||||
return limiter.Limiter, nil
|
||||
}
|
||||
|
||||
// UpdateLimiter updates the rate limiter for an endpoint
|
||||
func (lm *LimiterManager) UpdateLimiter(endpointURL string, cfg config.RateLimitConfig) {
|
||||
lm.mu.Lock()
|
||||
defer lm.mu.Unlock()
|
||||
|
||||
limiter := createLimiter(cfg)
|
||||
lm.limiters[endpointURL] = &EndpointLimiter{
|
||||
URL: endpointURL,
|
||||
Limiter: limiter,
|
||||
Config: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
// GetEndpoints returns all endpoint URLs
|
||||
func (lm *LimiterManager) GetEndpoints() []string {
|
||||
lm.mu.RLock()
|
||||
defer lm.mu.RUnlock()
|
||||
|
||||
endpoints := make([]string, 0, len(lm.limiters))
|
||||
for url := range lm.limiters {
|
||||
endpoints = append(endpoints, url)
|
||||
}
|
||||
|
||||
return endpoints
|
||||
}
|
||||
243
orig/internal/ratelimit/manager_test.go
Normal file
243
orig/internal/ratelimit/manager_test.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/fraktal/mev-beta/internal/config"
|
||||
)
|
||||
|
||||
func TestNewLimiterManager(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
ReadingEndpoints: []config.EndpointConfig{
|
||||
{
|
||||
URL: "https://read.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 5,
|
||||
Burst: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
ExecutionEndpoints: []config.EndpointConfig{
|
||||
{
|
||||
URL: "https://exec.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 3,
|
||||
Burst: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Verify limiter manager was created correctly
|
||||
assert.NotNil(t, lm)
|
||||
assert.NotNil(t, lm.limiters)
|
||||
assert.Len(t, lm.limiters, 3) // Primary + 1 fallback
|
||||
|
||||
// Check primary endpoint limiter
|
||||
primaryLimiter, exists := lm.limiters[cfg.RPCEndpoint]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, cfg.RPCEndpoint, primaryLimiter.URL)
|
||||
assert.Equal(t, cfg.RateLimit, primaryLimiter.Config)
|
||||
assert.NotNil(t, primaryLimiter.Limiter)
|
||||
|
||||
// Check fallback endpoint limiter
|
||||
fallbackLimiter, exists := lm.limiters[cfg.ReadingEndpoints[0].URL]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, cfg.ReadingEndpoints[0].URL, fallbackLimiter.URL)
|
||||
assert.Equal(t, cfg.ReadingEndpoints[0].RateLimit, fallbackLimiter.Config)
|
||||
assert.NotNil(t, fallbackLimiter.Limiter)
|
||||
}
|
||||
|
||||
func TestWaitForLimit(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Test waiting for limit on existing endpoint
|
||||
ctx := context.Background()
|
||||
err := lm.WaitForLimit(ctx, cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test waiting for limit on non-existing endpoint
|
||||
err = lm.WaitForLimit(ctx, "https://nonexistent.com")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no rate limiter found for endpoint")
|
||||
}
|
||||
|
||||
func TestTryWaitForLimit(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Test trying to wait for limit on existing endpoint
|
||||
ctx := context.Background()
|
||||
err := lm.TryWaitForLimit(ctx, cfg.RPCEndpoint)
|
||||
assert.NoError(t, err) // Should succeed since we have burst capacity
|
||||
|
||||
// Test trying to wait for limit on non-existing endpoint
|
||||
err = lm.TryWaitForLimit(ctx, "https://nonexistent.com")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no rate limiter found for endpoint")
|
||||
}
|
||||
|
||||
func TestGetLimiter(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Test getting limiter for existing endpoint
|
||||
limiter, err := lm.GetLimiter(cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, limiter)
|
||||
assert.IsType(t, &rate.Limiter{}, limiter)
|
||||
|
||||
// Test getting limiter for non-existing endpoint
|
||||
limiter, err = lm.GetLimiter("https://nonexistent.com")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no rate limiter found for endpoint")
|
||||
assert.Nil(t, limiter)
|
||||
}
|
||||
|
||||
func TestUpdateLimiter(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Get original limiter
|
||||
originalLimiter, err := lm.GetLimiter(cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, originalLimiter)
|
||||
|
||||
// Update the limiter
|
||||
newConfig := config.RateLimitConfig{
|
||||
RequestsPerSecond: 20,
|
||||
Burst: 40,
|
||||
}
|
||||
lm.UpdateLimiter(cfg.RPCEndpoint, newConfig)
|
||||
|
||||
// Get updated limiter
|
||||
updatedLimiter, err := lm.GetLimiter(cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedLimiter)
|
||||
|
||||
// The limiter should be different (new instance)
|
||||
assert.NotEqual(t, originalLimiter, updatedLimiter)
|
||||
|
||||
// Check that the config was updated
|
||||
endpointLimiter := lm.limiters[cfg.RPCEndpoint]
|
||||
assert.Equal(t, newConfig, endpointLimiter.Config)
|
||||
}
|
||||
|
||||
func TestGetEndpoints(t *testing.T) {
|
||||
// Create test config
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 10,
|
||||
Burst: 20,
|
||||
},
|
||||
ReadingEndpoints: []config.EndpointConfig{
|
||||
{
|
||||
URL: "https://fallback1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 5,
|
||||
Burst: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
URL: "https://fallback2.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 3,
|
||||
Burst: 6,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Get endpoints
|
||||
endpoints := lm.GetEndpoints()
|
||||
|
||||
// Verify results
|
||||
assert.Len(t, endpoints, 3) // Primary + 2 fallbacks
|
||||
assert.Contains(t, endpoints, cfg.RPCEndpoint)
|
||||
assert.Contains(t, endpoints, cfg.ReadingEndpoints[0].URL)
|
||||
assert.Contains(t, endpoints, cfg.ReadingEndpoints[1].URL)
|
||||
}
|
||||
|
||||
func TestRateLimiting(t *testing.T) {
|
||||
// Create test config with very low rate limit for testing
|
||||
cfg := &config.ArbitrumConfig{
|
||||
RPCEndpoint: "https://arb1.arbitrum.io/rpc",
|
||||
RateLimit: config.RateLimitConfig{
|
||||
RequestsPerSecond: 1, // 1 request per second
|
||||
Burst: 1, // No burst
|
||||
},
|
||||
}
|
||||
|
||||
// Create limiter manager
|
||||
lm := NewLimiterManager(cfg)
|
||||
|
||||
// Make a request (should succeed immediately)
|
||||
start := time.Now()
|
||||
ctx := context.Background()
|
||||
err := lm.WaitForLimit(ctx, cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
duration := time.Since(start)
|
||||
assert.True(t, duration < time.Millisecond*100, "First request should be fast")
|
||||
|
||||
// Make another request immediately (should be delayed)
|
||||
start = time.Now()
|
||||
err = lm.WaitForLimit(ctx, cfg.RPCEndpoint)
|
||||
assert.NoError(t, err)
|
||||
duration = time.Since(start)
|
||||
assert.True(t, duration >= time.Second, "Second request should be delayed by rate limiter")
|
||||
}
|
||||
Reference in New Issue
Block a user