Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
142 lines
3.3 KiB
Go
142 lines
3.3 KiB
Go
package dex
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/ethclient"
|
|
)
|
|
|
|
// PoolCache caches pool reserves to reduce RPC calls
|
|
type PoolCache struct {
|
|
cache map[string]*CachedPoolData
|
|
mu sync.RWMutex
|
|
ttl time.Duration
|
|
registry *Registry
|
|
client *ethclient.Client
|
|
}
|
|
|
|
// CachedPoolData represents cached pool data
|
|
type CachedPoolData struct {
|
|
Reserves *PoolReserves
|
|
Timestamp time.Time
|
|
Protocol DEXProtocol
|
|
}
|
|
|
|
// NewPoolCache creates a new pool cache
|
|
func NewPoolCache(registry *Registry, client *ethclient.Client, ttl time.Duration) *PoolCache {
|
|
return &PoolCache{
|
|
cache: make(map[string]*CachedPoolData),
|
|
ttl: ttl,
|
|
registry: registry,
|
|
client: client,
|
|
}
|
|
}
|
|
|
|
// Get retrieves pool reserves from cache or fetches if expired
|
|
func (pc *PoolCache) Get(ctx context.Context, protocol DEXProtocol, poolAddress common.Address) (*PoolReserves, error) {
|
|
key := pc.cacheKey(protocol, poolAddress)
|
|
|
|
// Try cache first
|
|
pc.mu.RLock()
|
|
cached, exists := pc.cache[key]
|
|
pc.mu.RUnlock()
|
|
|
|
if exists && time.Since(cached.Timestamp) < pc.ttl {
|
|
return cached.Reserves, nil
|
|
}
|
|
|
|
// Cache miss or expired - fetch fresh data
|
|
return pc.fetchAndCache(ctx, protocol, poolAddress, key)
|
|
}
|
|
|
|
// fetchAndCache fetches reserves and updates cache
|
|
func (pc *PoolCache) fetchAndCache(ctx context.Context, protocol DEXProtocol, poolAddress common.Address, key string) (*PoolReserves, error) {
|
|
// Get DEX info
|
|
dex, err := pc.registry.Get(protocol)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get DEX: %w", err)
|
|
}
|
|
|
|
// Fetch reserves
|
|
reserves, err := dex.Decoder.GetPoolReserves(ctx, pc.client, poolAddress)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to fetch reserves: %w", err)
|
|
}
|
|
|
|
// Update cache
|
|
pc.mu.Lock()
|
|
pc.cache[key] = &CachedPoolData{
|
|
Reserves: reserves,
|
|
Timestamp: time.Now(),
|
|
Protocol: protocol,
|
|
}
|
|
pc.mu.Unlock()
|
|
|
|
return reserves, nil
|
|
}
|
|
|
|
// Invalidate removes a pool from cache
|
|
func (pc *PoolCache) Invalidate(protocol DEXProtocol, poolAddress common.Address) {
|
|
key := pc.cacheKey(protocol, poolAddress)
|
|
pc.mu.Lock()
|
|
delete(pc.cache, key)
|
|
pc.mu.Unlock()
|
|
}
|
|
|
|
// Clear removes all cached data
|
|
func (pc *PoolCache) Clear() {
|
|
pc.mu.Lock()
|
|
pc.cache = make(map[string]*CachedPoolData)
|
|
pc.mu.Unlock()
|
|
}
|
|
|
|
// cacheKey generates a unique cache key
|
|
func (pc *PoolCache) cacheKey(protocol DEXProtocol, poolAddress common.Address) string {
|
|
return fmt.Sprintf("%d:%s", protocol, poolAddress.Hex())
|
|
}
|
|
|
|
// GetCacheSize returns the number of cached pools
|
|
func (pc *PoolCache) GetCacheSize() int {
|
|
pc.mu.RLock()
|
|
defer pc.mu.RUnlock()
|
|
return len(pc.cache)
|
|
}
|
|
|
|
// CleanExpired removes expired entries from cache
|
|
func (pc *PoolCache) CleanExpired() int {
|
|
pc.mu.Lock()
|
|
defer pc.mu.Unlock()
|
|
|
|
removed := 0
|
|
for key, cached := range pc.cache {
|
|
if time.Since(cached.Timestamp) >= pc.ttl {
|
|
delete(pc.cache, key)
|
|
removed++
|
|
}
|
|
}
|
|
return removed
|
|
}
|
|
|
|
// StartCleanupRoutine starts a background goroutine to clean expired entries
|
|
func (pc *PoolCache) StartCleanupRoutine(ctx context.Context, interval time.Duration) {
|
|
ticker := time.NewTicker(interval)
|
|
go func() {
|
|
defer ticker.Stop()
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
removed := pc.CleanExpired()
|
|
if removed > 0 {
|
|
// Could log here if logger is available
|
|
}
|
|
case <-ctx.Done():
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
}
|