Files
mev-beta/pkg/market/fan.go
Krypto Kajun 850223a953 fix(multicall): resolve critical multicall parsing corruption issues
- Added comprehensive bounds checking to prevent buffer overruns in multicall parsing
- Implemented graduated validation system (Strict/Moderate/Permissive) to reduce false positives
- Added LRU caching system for address validation with 10-minute TTL
- Enhanced ABI decoder with missing Universal Router and Arbitrum-specific DEX signatures
- Fixed duplicate function declarations and import conflicts across multiple files
- Added error recovery mechanisms with multiple fallback strategies
- Updated tests to handle new validation behavior for suspicious addresses
- Fixed parser test expectations for improved validation system
- Applied gofmt formatting fixes to ensure code style compliance
- Fixed mutex copying issues in monitoring package by introducing MetricsSnapshot
- Resolved critical security vulnerabilities in heuristic address extraction
- Progress: Updated TODO audit from 10% to 35% complete

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-10-17 00:12:55 -05:00

183 lines
4.4 KiB
Go

package market
import (
"context"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/core/types"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/internal/ratelimit"
)
// FanManager manages fan-in/fan-out patterns for multiple data sources
type FanManager struct {
config *config.Config
logger *logger.Logger
rateLimiter *ratelimit.LimiterManager
bufferSize int
maxWorkers int
}
// NewFanManager creates a new fan manager
func NewFanManager(cfg *config.Config, logger *logger.Logger, rateLimiter *ratelimit.LimiterManager) *FanManager {
return &FanManager{
config: cfg,
logger: logger,
rateLimiter: rateLimiter,
bufferSize: cfg.Bot.ChannelBufferSize,
maxWorkers: cfg.Bot.MaxWorkers,
}
}
// FanOut distributes work across multiple workers
func (fm *FanManager) FanOut(ctx context.Context, jobs <-chan *types.Transaction, numWorkers int) <-chan *types.Transaction {
// Create the output channel
out := make(chan *types.Transaction, fm.bufferSize)
// Create a wait group to wait for all workers
var wg sync.WaitGroup
// Start the workers
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
fm.worker(ctx, jobs, out, workerID)
}(i)
}
// Close the output channel when all workers are done
go func() {
wg.Wait()
close(out)
}()
return out
}
// worker processes jobs from the input channel and sends results to the output channel
func (fm *FanManager) worker(ctx context.Context, jobs <-chan *types.Transaction, out chan<- *types.Transaction, workerID int) {
for {
select {
case job, ok := <-jobs:
if !ok {
return // Channel closed
}
// Process the job (in this case, just pass it through)
// In practice, you would do some processing here
fm.logger.Debug(fmt.Sprintf("Worker %d processing transaction %s", workerID, job.Hash().Hex()))
// Simulate some work
time.Sleep(10 * time.Millisecond)
// Send the result to the output channel
select {
case out <- job:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}
// FanIn combines multiple input channels into a single output channel
func (fm *FanManager) FanIn(ctx context.Context, inputs ...<-chan *types.Transaction) <-chan *types.Transaction {
// Create the output channel
out := make(chan *types.Transaction, fm.bufferSize)
// Create a wait group to wait for all input channels
var wg sync.WaitGroup
// Start a goroutine for each input channel
for i, input := range inputs {
wg.Add(1)
go func(inputID int, inputChan <-chan *types.Transaction) {
defer wg.Done()
fm.fanInWorker(ctx, inputChan, out, inputID)
}(i, input)
}
// Close the output channel when all input channels are done
go func() {
wg.Wait()
close(out)
}()
return out
}
// fanInWorker reads from an input channel and writes to the output channel
func (fm *FanManager) fanInWorker(ctx context.Context, input <-chan *types.Transaction, out chan<- *types.Transaction, inputID int) {
for {
select {
case job, ok := <-input:
if !ok {
return // Channel closed
}
// Send the job to the output channel
select {
case out <- job:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}
// Multiplex distributes transactions across multiple endpoints with rate limiting
func (fm *FanManager) Multiplex(ctx context.Context, transactions <-chan *types.Transaction) []<-chan *types.Transaction {
endpoints := fm.rateLimiter.GetEndpoints()
outputs := make([]<-chan *types.Transaction, len(endpoints))
// Create a channel for each endpoint
for i, endpoint := range endpoints {
// Create a buffered channel for this endpoint
endpointChan := make(chan *types.Transaction, fm.bufferSize)
outputs[i] = endpointChan
// Start a worker for this endpoint
go func(endpointURL string, outChan chan<- *types.Transaction) {
defer close(outChan)
for {
select {
case tx, ok := <-transactions:
if !ok {
return // Input channel closed
}
// Wait for rate limiter
if err := fm.rateLimiter.WaitForLimit(ctx, endpointURL); err != nil {
fm.logger.Error(fmt.Sprintf("Rate limiter error for %s: %v", endpointURL, err))
continue
}
// Send to endpoint-specific channel
select {
case outChan <- tx:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}(endpoint, endpointChan)
}
return outputs
}