feat: create v2-prep branch with comprehensive planning

Restructured project for V2 refactor:

**Structure Changes:**
- Moved all V1 code to orig/ folder (preserved with git mv)
- Created docs/planning/ directory
- Added orig/README_V1.md explaining V1 preservation

**Planning Documents:**
- 00_V2_MASTER_PLAN.md: Complete architecture overview
  - Executive summary of critical V1 issues
  - High-level component architecture diagrams
  - 5-phase implementation roadmap
  - Success metrics and risk mitigation

- 07_TASK_BREAKDOWN.md: Atomic task breakdown
  - 99+ hours of detailed tasks
  - Every task < 2 hours (atomic)
  - Clear dependencies and success criteria
  - Organized by implementation phase

**V2 Key Improvements:**
- Per-exchange parsers (factory pattern)
- Multi-layer strict validation
- Multi-index pool cache
- Background validation pipeline
- Comprehensive observability

**Critical Issues Addressed:**
- Zero address tokens (strict validation + cache enrichment)
- Parsing accuracy (protocol-specific parsers)
- No audit trail (background validation channel)
- Inefficient lookups (multi-index cache)
- Stats disconnection (event-driven metrics)

Next Steps:
1. Review planning documents
2. Begin Phase 1: Foundation (P1-001 through P1-010)
3. Implement parsers in Phase 2
4. Build cache system in Phase 3
5. Add validation pipeline in Phase 4
6. Migrate and test in Phase 5

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Administrator
2025-11-10 10:14:26 +01:00
parent 1773daffe7
commit 803de231ba
411 changed files with 20390 additions and 8680 deletions

View File

@@ -0,0 +1,464 @@
package datafetcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/holiman/uint256"
"github.com/fraktal/mev-beta/bindings/datafetcher"
"github.com/fraktal/mev-beta/internal/logger"
)
// PoolData represents unified pool data for both V2 and V3 pools
type PoolData struct {
Address common.Address
Token0 common.Address
Token1 common.Address
Fee int64 // V3 only (0 for V2)
Liquidity *uint256.Int // V3 liquidity or derived from V2 reserves
SqrtPriceX96 *uint256.Int // V3 price or calculated from V2 reserves
Tick int // V3 only (0 for V2)
Reserve0 *big.Int // V2 only (nil for V3)
Reserve1 *big.Int // V2 only (nil for V3)
IsV3 bool
BlockNumber *big.Int
Timestamp *big.Int
}
// BatchFetcher provides efficient batch pool data fetching using the DataFetcher contract
type BatchFetcher struct {
client *ethclient.Client
contract *datafetcher.DataFetcher
contractAddr common.Address
logger *logger.Logger
maxBatchSize int
requestTimeout time.Duration
blacklist *PoolBlacklist
}
// NewBatchFetcher creates a new batch fetcher instance
func NewBatchFetcher(
client *ethclient.Client,
contractAddr common.Address,
logger *logger.Logger,
) (*BatchFetcher, error) {
if client == nil {
return nil, fmt.Errorf("client cannot be nil")
}
contract, err := datafetcher.NewDataFetcher(contractAddr, client)
if err != nil {
return nil, fmt.Errorf("failed to instantiate DataFetcher contract: %w", err)
}
// Initialize blacklist with persistence
blacklistPath := "logs/pool_blacklist.json"
blacklist := NewPoolBlacklist(blacklistPath)
return &BatchFetcher{
client: client,
contract: contract,
contractAddr: contractAddr,
logger: logger,
maxBatchSize: 100, // Fetch up to 100 pools per batch
requestTimeout: 30 * time.Second, // FIXED (2025-11-03): Increased from 10s to 30s to handle RPC latency
blacklist: blacklist,
}, nil
}
// FetchPoolsBatch fetches data for multiple pools in a single RPC call
// Automatically detects V2 vs V3 and fetches appropriate data
func (bf *BatchFetcher) FetchPoolsBatch(ctx context.Context, poolAddresses []common.Address) (map[common.Address]*PoolData, error) {
if len(poolAddresses) == 0 {
return make(map[common.Address]*PoolData), nil
}
// Validate and filter pool addresses
validPools := make([]common.Address, 0, len(poolAddresses))
invalidCount := 0
blacklistedCount := 0
for _, addr := range poolAddresses {
// Check if address is valid
if err := ValidatePoolAddress(addr); err != nil {
invalidCount++
bf.blacklist.RecordFailure(addr, FailureInvalidAddress)
continue
}
// Check if blacklisted
if bf.blacklist.IsBlacklisted(addr) {
blacklistedCount++
continue
}
validPools = append(validPools, addr)
}
if invalidCount > 0 || blacklistedCount > 0 {
bf.logger.Debug(fmt.Sprintf("🔍 Filtered pools: %d invalid, %d blacklisted, %d valid",
invalidCount, blacklistedCount, len(validPools)))
}
if len(validPools) == 0 {
return make(map[common.Address]*PoolData), nil
}
// Split into batches if needed
results := make(map[common.Address]*PoolData)
for i := 0; i < len(validPools); i += bf.maxBatchSize {
end := i + bf.maxBatchSize
if end > len(validPools) {
end = len(validPools)
}
batch := validPools[i:end]
batchResults, err := bf.fetchSingleBatch(ctx, batch)
// If batch fetch failed with execution revert, try individual fetches as fallback
if err != nil && bf.isExecutionRevert(err) && len(batch) > 1 {
bf.logger.Debug(fmt.Sprintf("Batch %d-%d failed with revert, trying individual fetches for %d pools", i, end, len(batch)))
// Fetch each pool individually to identify which ones are bad
for _, poolAddr := range batch {
individualResult, individualErr := bf.fetchSingleBatch(ctx, []common.Address{poolAddr})
if individualErr != nil {
// This specific pool is bad
bf.blacklist.RecordFailure(poolAddr, bf.categorizeError(individualErr))
bf.logger.Debug(fmt.Sprintf("Pool %s failed individual fetch: %v", poolAddr.Hex()[:10], individualErr))
} else if len(individualResult) > 0 {
// This pool is good! Merge its data
for addr, data := range individualResult {
results[addr] = data
bf.blacklist.RecordSuccess(addr)
}
} else {
// Pool returned no data
bf.blacklist.RecordFailure(poolAddr, FailureNoData)
}
}
continue
}
// If batch fetch failed for other reasons, record failures
if err != nil {
bf.logger.Warn(fmt.Sprintf("Failed to fetch batch %d-%d: %v", i, end, err))
// Record failures for all pools in this batch
for _, addr := range batch {
bf.blacklist.RecordFailure(addr, bf.categorizeError(err))
}
continue
}
// Batch succeeded - merge results and record successes
for addr, data := range batchResults {
results[addr] = data
bf.blacklist.RecordSuccess(addr)
}
// Record failures for pools that didn't return data
for _, addr := range batch {
if _, exists := batchResults[addr]; !exists {
bf.blacklist.RecordFailure(addr, FailureNoData)
}
}
}
total, blacklisted, expired := bf.blacklist.GetStats()
bf.logger.Debug(fmt.Sprintf("✅ Batch fetched %d/%d pools successfully (Blacklist: %d total, %d active, %d expired)",
len(results), len(poolAddresses), total, blacklisted, expired))
return results, nil
}
// fetchSingleBatch fetches a single batch (max 100 pools) with retry logic
func (bf *BatchFetcher) fetchSingleBatch(ctx context.Context, pools []common.Address) (map[common.Address]*PoolData, error) {
// FIXED (2025-11-03): Added exponential backoff retry logic for transient RPC failures
// This handles temporary network issues, rate limiting, and RPC overload gracefully
maxRetries := 3
baseDelay := time.Second
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
timeoutCtx, cancel := context.WithTimeout(ctx, bf.requestTimeout)
defer cancel()
// FIXED: Use batchFetchV3Data instead of batchFetchAllData
// Reason: batchFetchAllData is missing 'view' modifier in Solidity, causing ABI issues
// batchFetchV3Data is properly marked as 'view' and works correctly
// Call batchFetchV3Data using the generated bindings
opts := &bind.CallOpts{
Context: timeoutCtx,
}
v3Data, err := bf.contract.BatchFetchV3Data(opts, pools)
if err == nil {
// Success on first or retry attempt
return bf.parseV3Data(timeoutCtx, v3Data)
}
lastErr = err
// Check if this is a transient error worth retrying
if bf.isTransientError(err) && attempt < maxRetries-1 {
// Exponential backoff: 1s, 2s, 4s
delayDuration := baseDelay * time.Duration(1<<uint(attempt))
bf.logger.Debug(fmt.Sprintf("Batch fetch attempt %d failed with transient error, retrying in %v: %v",
attempt+1, delayDuration, err))
select {
case <-time.After(delayDuration):
// Continue to next retry
case <-ctx.Done():
return nil, fmt.Errorf("batch fetch cancelled during backoff: %w", ctx.Err())
}
} else {
// Non-transient error or last retry - return immediately
break
}
}
return nil, fmt.Errorf("batch fetch V3 data failed after %d attempts: %w", maxRetries, lastErr)
}
// parseV3Data parses V3 pool data into our unified PoolData format
func (bf *BatchFetcher) parseV3Data(ctx context.Context, v3Data []datafetcher.DataFetcherV3PoolData) (map[common.Address]*PoolData, error) {
// Get current block info for metadata
header, err := bf.client.HeaderByNumber(ctx, nil)
var blockNumber, timestamp *big.Int
if err == nil {
blockNumber = header.Number
timestamp = new(big.Int).SetUint64(header.Time)
} else {
blockNumber = big.NewInt(0)
timestamp = big.NewInt(0)
}
// Parse results into our unified format
results := make(map[common.Address]*PoolData)
// Process V3 data
for _, poolDataItem := range v3Data {
// Calculate liquidity as uint256
liquidity, overflow := uint256.FromBig(poolDataItem.Liquidity)
if overflow {
bf.logger.Warn(fmt.Sprintf("Liquidity overflow for pool %s", poolDataItem.Pool.Hex()))
liquidity = uint256.NewInt(0)
}
// Calculate sqrtPriceX96 as uint256
sqrtPrice, overflow := uint256.FromBig(poolDataItem.SqrtPriceX96)
if overflow {
bf.logger.Warn(fmt.Sprintf("SqrtPriceX96 overflow for pool %s", poolDataItem.Pool.Hex()))
sqrtPrice = uint256.NewInt(0)
}
poolData := &PoolData{
Address: poolDataItem.Pool,
Token0: poolDataItem.Token0,
Token1: poolDataItem.Token1,
Fee: poolDataItem.Fee.Int64(),
Liquidity: liquidity,
SqrtPriceX96: sqrtPrice,
Tick: int(poolDataItem.Tick.Int64()),
IsV3: true,
BlockNumber: blockNumber,
Timestamp: timestamp,
}
results[poolDataItem.Pool] = poolData
}
// Note: V2 data processing removed since we're only calling batchFetchV3Data
// Most pools on Arbitrum are V3 anyway. If V2 support is needed later,
// call bf.contract.BatchFetchV2Data separately and merge results
return results, nil
}
// FetchSinglePool fetches data for a single pool (convenience method)
func (bf *BatchFetcher) FetchSinglePool(ctx context.Context, poolAddr common.Address) (*PoolData, error) {
results, err := bf.FetchPoolsBatch(ctx, []common.Address{poolAddr})
if err != nil {
return nil, err
}
data, exists := results[poolAddr]
if !exists {
return nil, fmt.Errorf("no data returned for pool %s", poolAddr.Hex())
}
return data, nil
}
// approximateLiquidity calculates sqrt(reserve0 * reserve1) for V2 pools
func approximateLiquidity(reserve0, reserve1 *big.Int) *big.Int {
if reserve0.Sign() == 0 || reserve1.Sign() == 0 {
return big.NewInt(0)
}
// Calculate reserve0 * reserve1
product := new(big.Int).Mul(reserve0, reserve1)
// Calculate square root using Newton's method
return new(big.Int).Sqrt(product)
}
// calculateSqrtPriceX96FromReserves calculates sqrtPriceX96 from V2 reserves
// price = reserve1 / reserve0
// sqrtPriceX96 = sqrt(price) * 2^96
func calculateSqrtPriceX96FromReserves(reserve0, reserve1 *big.Int) *uint256.Int {
if reserve0.Sign() == 0 {
return uint256.NewInt(0)
}
// Calculate price ratio: (reserve1 * 2^96) / reserve0
// Then take square root
// First scale reserve1 by 2^192 (so after sqrt we get 2^96 scaling)
q192 := new(big.Int).Lsh(big.NewInt(1), 192) // 2^192
numerator := new(big.Int).Mul(reserve1, q192)
ratio := new(big.Int).Div(numerator, reserve0)
// Take square root
sqrtPrice := new(big.Int).Sqrt(ratio)
result, _ := uint256.FromBig(sqrtPrice)
return result
}
// SetMaxBatchSize sets the maximum batch size
func (bf *BatchFetcher) SetMaxBatchSize(size int) {
if size > 0 && size <= 1000 {
bf.maxBatchSize = size
}
}
// SetRequestTimeout sets the timeout for batch requests
func (bf *BatchFetcher) SetRequestTimeout(timeout time.Duration) {
bf.requestTimeout = timeout
}
// categorizeError determines the type of error for blacklist tracking
func (bf *BatchFetcher) categorizeError(err error) FailureReason {
if err == nil {
return FailureOther
}
errStr := err.Error()
// Check for common error patterns
if contains(errStr, "execution reverted") {
return FailureExecutionRevert
}
if contains(errStr, "no data returned") || contains(errStr, "no data") {
return FailureNoData
}
if contains(errStr, "429") || contains(errStr, "too many requests") || contains(errStr, "rate limit") {
return FailureRateLimit
}
if contains(errStr, "timeout") || contains(errStr, "deadline exceeded") {
return FailureTimeout
}
return FailureOther
}
// contains checks if a string contains a substring (case-insensitive)
func contains(s, substr string) bool {
return len(s) >= len(substr) &&
(s == substr ||
len(s) > len(substr) &&
(hasSubstring(s, substr)))
}
func hasSubstring(s, substr string) bool {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return true
}
}
return false
}
// GetBlacklist returns the blacklist for external access
func (bf *BatchFetcher) GetBlacklist() *PoolBlacklist {
return bf.blacklist
}
// PersistBlacklist forces immediate persistence of the blacklist
func (bf *BatchFetcher) PersistBlacklist() error {
return bf.blacklist.Persist()
}
// isExecutionRevert checks if an error is an execution revert
func (bf *BatchFetcher) isExecutionRevert(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
return contains(errStr, "execution reverted") ||
contains(errStr, "revert") ||
contains(errStr, "reverted")
}
// isTransientError checks if an error is transient (worth retrying)
// ADDED (2025-11-03): Distinguishes between transient (network/timeout) and permanent (contract) errors
func (bf *BatchFetcher) isTransientError(err error) bool {
if err == nil {
return false
}
errStr := err.Error()
// Transient errors worth retrying:
// - Timeouts/deadline exceeded (network latency)
// - Rate limiting (429 Too Many Requests)
// - Temporary network issues (connection refused, reset, etc.)
// - Context deadline exceeded (timeout)
transientPatterns := []string{
"context deadline exceeded",
"timeout",
"429",
"too many requests",
"rate limit",
"connection refused",
"connection reset",
"i/o timeout",
"temporary failure",
"temporarily unavailable",
"econnrefused",
"econnreset",
"etimedout",
}
for _, pattern := range transientPatterns {
if contains(errStr, pattern) {
return true
}
}
// Non-transient errors that should NOT be retried:
// - Execution reverts (contract logic failures)
// - Invalid addresses
// - Method not found
if bf.isExecutionRevert(err) ||
contains(errStr, "no method") ||
contains(errStr, "invalid address") ||
contains(errStr, "ABI") {
return false
}
// For unknown errors, assume NOT transient (fail fast)
return false
}

View File

@@ -0,0 +1,344 @@
package datafetcher
import (
"encoding/json"
"fmt"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
)
// FailureReason represents why a pool failed
type FailureReason string
const (
FailureExecutionRevert FailureReason = "execution_reverted"
FailureNoData FailureReason = "no_data_returned"
FailureInvalidAddress FailureReason = "invalid_address"
FailureTimeout FailureReason = "timeout"
FailureRateLimit FailureReason = "rate_limit"
FailureOther FailureReason = "other"
)
// PoolFailureRecord tracks failures for a specific pool
type PoolFailureRecord struct {
Address common.Address `json:"address"`
FailureCount int `json:"failure_count"`
ConsecutiveFails int `json:"consecutive_fails"`
LastFailure time.Time `json:"last_failure"`
LastReason FailureReason `json:"last_reason"`
FirstSeen time.Time `json:"first_seen"`
IsBlacklisted bool `json:"is_blacklisted"`
BlacklistedAt time.Time `json:"blacklisted_at,omitempty"`
}
// PoolBlacklist manages a list of pools that consistently fail
type PoolBlacklist struct {
mu sync.RWMutex
failures map[common.Address]*PoolFailureRecord
blacklistThreshold int // Number of consecutive failures before blacklisting
blacklistExpiry time.Duration // How long to keep in blacklist before retry
rateLimitIgnoreWindow time.Duration // Ignore rate limit errors within this window
persistPath string
lastPersist time.Time
persistInterval time.Duration
}
// NewPoolBlacklist creates a new pool blacklist manager
func NewPoolBlacklist(persistPath string) *PoolBlacklist {
pb := &PoolBlacklist{
failures: make(map[common.Address]*PoolFailureRecord),
blacklistThreshold: 5, // Blacklist after 5 consecutive failures
blacklistExpiry: 24 * time.Hour, // Retry blacklisted pools after 24 hours
rateLimitIgnoreWindow: 5 * time.Minute, // Ignore rate limit errors for 5 minutes
persistPath: persistPath,
persistInterval: 5 * time.Minute, // Persist every 5 minutes
}
// Load existing blacklist from disk
if persistPath != "" {
if err := pb.Load(); err != nil {
// Not critical, just log and continue
fmt.Printf("[WARN] Failed to load blacklist from %s: %v\n", persistPath, err)
}
}
return pb
}
// RecordFailure records a failure for a pool
func (pb *PoolBlacklist) RecordFailure(addr common.Address, reason FailureReason) {
pb.mu.Lock()
defer pb.mu.Unlock()
// Ignore rate limit errors during the ignore window
// (rate limits are temporary and shouldn't blacklist pools)
if reason == FailureRateLimit {
if record, exists := pb.failures[addr]; exists {
if time.Since(record.LastFailure) < pb.rateLimitIgnoreWindow {
return
}
}
}
now := time.Now()
record, exists := pb.failures[addr]
if !exists {
record = &PoolFailureRecord{
Address: addr,
FirstSeen: now,
}
pb.failures[addr] = record
}
// Update failure counts
record.FailureCount++
record.ConsecutiveFails++
record.LastFailure = now
record.LastReason = reason
// Check if should be blacklisted
if !record.IsBlacklisted && record.ConsecutiveFails >= pb.blacklistThreshold {
record.IsBlacklisted = true
record.BlacklistedAt = now
}
// Persist periodically
if pb.persistPath != "" && time.Since(pb.lastPersist) > pb.persistInterval {
pb.persistUnsafe()
}
}
// RecordSuccess records a successful fetch for a pool
func (pb *PoolBlacklist) RecordSuccess(addr common.Address) {
pb.mu.Lock()
defer pb.mu.Unlock()
record, exists := pb.failures[addr]
if !exists {
// No failures recorded, nothing to do
return
}
// Reset consecutive failures
record.ConsecutiveFails = 0
// If it was blacklisted and now works, remove from blacklist
if record.IsBlacklisted {
record.IsBlacklisted = false
record.BlacklistedAt = time.Time{}
}
// Persist on success to ensure blacklist is updated
if pb.persistPath != "" {
pb.persistUnsafe()
}
}
// IsBlacklisted checks if a pool is currently blacklisted
func (pb *PoolBlacklist) IsBlacklisted(addr common.Address) bool {
pb.mu.RLock()
defer pb.mu.RUnlock()
record, exists := pb.failures[addr]
if !exists {
return false
}
if !record.IsBlacklisted {
return false
}
// Check if blacklist has expired
if time.Since(record.BlacklistedAt) > pb.blacklistExpiry {
return false
}
return true
}
// FilterBlacklisted removes blacklisted pools from a list
func (pb *PoolBlacklist) FilterBlacklisted(pools []common.Address) []common.Address {
pb.mu.RLock()
defer pb.mu.RUnlock()
filtered := make([]common.Address, 0, len(pools))
now := time.Now()
for _, pool := range pools {
record, exists := pb.failures[pool]
// If no record or not blacklisted, include it
if !exists || !record.IsBlacklisted {
filtered = append(filtered, pool)
continue
}
// Check if blacklist has expired
if now.Sub(record.BlacklistedAt) > pb.blacklistExpiry {
// Expired, include it and mark for retry
filtered = append(filtered, pool)
// Note: We don't modify the record here (would need write lock)
// It will be updated on next fetch attempt
}
// Otherwise skip (blacklisted)
}
return filtered
}
// GetStats returns blacklist statistics
func (pb *PoolBlacklist) GetStats() (total, blacklisted, expired int) {
pb.mu.RLock()
defer pb.mu.RUnlock()
now := time.Now()
total = len(pb.failures)
for _, record := range pb.failures {
if record.IsBlacklisted {
if now.Sub(record.BlacklistedAt) > pb.blacklistExpiry {
expired++
} else {
blacklisted++
}
}
}
return total, blacklisted, expired
}
// GetTopFailures returns the top N pools by failure count
func (pb *PoolBlacklist) GetTopFailures(n int) []*PoolFailureRecord {
pb.mu.RLock()
defer pb.mu.RUnlock()
// Copy all records
records := make([]*PoolFailureRecord, 0, len(pb.failures))
for _, record := range pb.failures {
records = append(records, record)
}
// Simple bubble sort (fine for small n)
for i := 0; i < len(records)-1; i++ {
for j := 0; j < len(records)-i-1; j++ {
if records[j].FailureCount < records[j+1].FailureCount {
records[j], records[j+1] = records[j+1], records[j]
}
}
}
// Return top n
if n > len(records) {
n = len(records)
}
return records[:n]
}
// Persist saves the blacklist to disk
func (pb *PoolBlacklist) Persist() error {
pb.mu.Lock()
defer pb.mu.Unlock()
return pb.persistUnsafe()
}
// persistUnsafe persists without acquiring lock (caller must hold lock)
func (pb *PoolBlacklist) persistUnsafe() error {
if pb.persistPath == "" {
return nil
}
// Convert map to slice for JSON
records := make([]*PoolFailureRecord, 0, len(pb.failures))
for _, record := range pb.failures {
records = append(records, record)
}
data, err := json.MarshalIndent(records, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal blacklist: %w", err)
}
if err := os.WriteFile(pb.persistPath, data, 0644); err != nil {
return fmt.Errorf("failed to write blacklist: %w", err)
}
pb.lastPersist = time.Now()
return nil
}
// Load loads the blacklist from disk
func (pb *PoolBlacklist) Load() error {
pb.mu.Lock()
defer pb.mu.Unlock()
if pb.persistPath == "" {
return nil
}
data, err := os.ReadFile(pb.persistPath)
if err != nil {
if os.IsNotExist(err) {
return nil // File doesn't exist yet, not an error
}
return fmt.Errorf("failed to read blacklist: %w", err)
}
var records []*PoolFailureRecord
if err := json.Unmarshal(data, &records); err != nil {
return fmt.Errorf("failed to unmarshal blacklist: %w", err)
}
// Load records into map
pb.failures = make(map[common.Address]*PoolFailureRecord)
for _, record := range records {
pb.failures[record.Address] = record
}
return nil
}
// SetBlacklistThreshold sets the number of consecutive failures before blacklisting
func (pb *PoolBlacklist) SetBlacklistThreshold(threshold int) {
pb.mu.Lock()
defer pb.mu.Unlock()
if threshold > 0 {
pb.blacklistThreshold = threshold
}
}
// SetBlacklistExpiry sets how long pools stay blacklisted
func (pb *PoolBlacklist) SetBlacklistExpiry(expiry time.Duration) {
pb.mu.Lock()
defer pb.mu.Unlock()
if expiry > 0 {
pb.blacklistExpiry = expiry
}
}
// Clear removes all entries from the blacklist
func (pb *PoolBlacklist) Clear() {
pb.mu.Lock()
defer pb.mu.Unlock()
pb.failures = make(map[common.Address]*PoolFailureRecord)
}
// ValidatePoolAddress checks if a pool address is valid for fetching
func ValidatePoolAddress(addr common.Address) error {
// Check for zero address
if addr == (common.Address{}) {
return fmt.Errorf("zero address not allowed")
}
// Check for common invalid addresses
if addr.Hex() == "0x0000000000000000000000000000000000000000" {
return fmt.Errorf("zero address not allowed")
}
// All other addresses are potentially valid
// (we can't pre-validate if they're actual pool contracts without RPC calls)
return nil
}