feat(testing): add Anvil fork local testing infrastructure

Complete local testing setup with Anvil fork of Arbitrum mainnet:

Infrastructure:
- Docker Compose orchestration (Anvil, MEV Bot, Prometheus, Grafana)
- Anvil fork configuration with 1-second blocks
- Multi-stage Dockerfile for optimized builds
- Health checks and auto-restart policies

Configuration:
- Comprehensive .env.example with all parameters
- Prometheus metrics collection setup
- Grafana datasource provisioning
- .gitignore to prevent committing secrets

Testing Scripts:
- setup-local-fork.sh: Initialize fork and fund test wallet
- create-test-swap.sh: Generate test swaps for bot detection
- Both scripts include validation and helpful output

Integration Components:
- pkg/sequencer/reader.go: WebSocket reader for pending transactions
  - Worker pool pattern (10 workers)
  - <50ms processing target
  - Front-running capability
  - Auto-reconnection with exponential backoff

- pkg/pools/discovery.go: Pool discovery service
  - UniswapV2-style pools (SushiSwap, Camelot)
  - UniswapV3 pools (multiple fee tiers)
  - Factory contract queries
  - Liquidity filtering

Documentation:
- TESTING.md: Complete testing guide
  - Quick start instructions
  - Testing scenarios
  - Monitoring and debugging
  - Performance benchmarks
  - Troubleshooting guide

This enables safe local testing without deploying to public testnet,
using real Arbitrum mainnet state forked locally with Anvil.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Administrator
2025-11-10 18:52:56 +01:00
parent 0a7a07c896
commit 65c1005d91
11 changed files with 1902 additions and 0 deletions

397
pkg/pools/discovery.go Normal file
View File

@@ -0,0 +1,397 @@
package pools
import (
"context"
"fmt"
"log/slog"
"math/big"
"sync"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/your-org/mev-bot/pkg/cache"
mevtypes "github.com/your-org/mev-bot/pkg/types"
)
// Known factory addresses on Arbitrum
var (
UniswapV2FactoryAddress = common.HexToAddress("0xf1D7CC64Fb4452F05c498126312eBE29f30Fbcf9") // SushiSwap
UniswapV3FactoryAddress = common.HexToAddress("0x1F98431c8aD98523631AE4a59f267346ea31F984") // Uniswap V3
CamelotFactoryAddress = common.HexToAddress("0x6EcCab422D763aC031210895C81787E87B43A652") // Camelot
CurveRegistryAddress = common.HexToAddress("0x445FE580eF8d70FF569aB36e80c647af338db351") // Curve (mainnet, example)
)
// Top traded tokens on Arbitrum
var TopTokens = []common.Address{
common.HexToAddress("0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"), // WETH
common.HexToAddress("0xFF970a61A04b1cA14834A43f5dE4533eBDDB5CC8"), // USDC
common.HexToAddress("0xFd086bC7CD5C481DCC9C85ebE478A1C0b69FCbb9"), // USDT
common.HexToAddress("0x2f2a2543B76A4166549F7aaB2e75Bef0aefC5B0f"), // WBTC
common.HexToAddress("0xDA10009cBd5D07dd0CeCc66161FC93D7c9000da1"), // DAI
common.HexToAddress("0xf97f4df75117a78c1A5a0DBb814Af92458539FB4"), // LINK
common.HexToAddress("0xFA7F8980b0f1E64A2062791cc3b0871572f1F7f0"), // UNI
}
// DiscoveryConfig contains configuration for pool discovery
type DiscoveryConfig struct {
// Connection
RPCURL string
// Discovery parameters
StartBlock uint64
MaxPools int
MinLiquidity *big.Int
BatchSize int
ConcurrentFetches int
// Token pairs to discover
TokenPairs []TokenPair
}
// TokenPair represents a pair of tokens
type TokenPair struct {
Token0 common.Address
Token1 common.Address
}
// DefaultDiscoveryConfig returns default configuration
func DefaultDiscoveryConfig() *DiscoveryConfig {
// Generate pairs from top tokens
pairs := make([]TokenPair, 0)
for i := 0; i < len(TopTokens); i++ {
for j := i + 1; j < len(TopTokens); j++ {
pairs = append(pairs, TokenPair{
Token0: TopTokens[i],
Token1: TopTokens[j],
})
}
}
return &DiscoveryConfig{
RPCURL: "https://arb1.arbitrum.io/rpc",
StartBlock: 0,
MaxPools: 1000,
MinLiquidity: big.NewInt(1e18), // 1 ETH minimum
BatchSize: 100,
ConcurrentFetches: 10,
TokenPairs: pairs,
}
}
// Discovery discovers pools on Arbitrum
type Discovery struct {
config *DiscoveryConfig
client *ethclient.Client
cache *cache.PoolCache
logger *slog.Logger
mu sync.Mutex
poolsDiscovered int
}
// NewDiscovery creates a new pool discovery service
func NewDiscovery(config *DiscoveryConfig, cache *cache.PoolCache, logger *slog.Logger) (*Discovery, error) {
if config == nil {
config = DefaultDiscoveryConfig()
}
client, err := ethclient.Dial(config.RPCURL)
if err != nil {
return nil, fmt.Errorf("failed to connect to RPC: %w", err)
}
return &Discovery{
config: config,
client: client,
cache: cache,
logger: logger.With("component", "pool_discovery"),
}, nil
}
// DiscoverAll discovers all pools from known DEXes
func (d *Discovery) DiscoverAll(ctx context.Context) error {
d.logger.Info("starting pool discovery")
// Discover UniswapV2-style pools (SushiSwap, Camelot, etc.)
if err := d.discoverUniswapV2Pools(ctx); err != nil {
d.logger.Error("uniswap v2 discovery failed", "error", err)
}
// Discover UniswapV3 pools
if err := d.discoverUniswapV3Pools(ctx); err != nil {
d.logger.Error("uniswap v3 discovery failed", "error", err)
}
d.logger.Info("pool discovery complete", "pools_discovered", d.poolsDiscovered, "total_cached", d.cache.Count())
return nil
}
// discoverUniswapV2Pools discovers UniswapV2-style pools
func (d *Discovery) discoverUniswapV2Pools(ctx context.Context) error {
d.logger.Info("discovering UniswapV2-style pools")
factories := []struct {
address common.Address
protocol string
}{
{UniswapV2FactoryAddress, mevtypes.ProtocolUniswapV2},
{CamelotFactoryAddress, mevtypes.ProtocolCamelot},
}
for _, factory := range factories {
d.logger.Info("querying factory", "protocol", factory.protocol, "address", factory.address.Hex())
// Query each token pair
for _, pair := range d.config.TokenPairs {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
poolAddr, err := d.getUniswapV2Pool(ctx, factory.address, pair.Token0, pair.Token1)
if err != nil {
continue
}
if poolAddr == (common.Address{}) {
continue // Pool doesn't exist
}
// Fetch pool info
poolInfo, err := d.fetchUniswapV2PoolInfo(ctx, poolAddr, pair.Token0, pair.Token1, factory.protocol)
if err != nil {
d.logger.Debug("failed to fetch pool info", "pool", poolAddr.Hex(), "error", err)
continue
}
// Check minimum liquidity
if poolInfo.LiquidityUSD.Cmp(d.config.MinLiquidity) < 0 {
continue
}
// Add to cache
if err := d.cache.Add(poolInfo); err != nil {
d.logger.Warn("failed to add pool to cache", "pool", poolAddr.Hex(), "error", err)
continue
}
d.mu.Lock()
d.poolsDiscovered++
d.mu.Unlock()
d.logger.Debug("discovered pool",
"protocol", factory.protocol,
"pool", poolAddr.Hex(),
"token0", pair.Token0.Hex(),
"token1", pair.Token1.Hex(),
"liquidity", poolInfo.LiquidityUSD.String(),
)
}
}
return nil
}
// getUniswapV2Pool gets a UniswapV2 pool address for a token pair
func (d *Discovery) getUniswapV2Pool(ctx context.Context, factory common.Address, token0, token1 common.Address) (common.Address, error) {
// getPair(address,address) returns (address)
// This is a simplified version - in production, use generated bindings
calldata := append([]byte{0xe6, 0xa4, 0x39, 0x05}, // getPair selector
append(padLeft(token0.Bytes(), 32), padLeft(token1.Bytes(), 32)...)...)
result, err := d.client.CallContract(ctx, map[string]interface{}{
"to": factory,
"data": common.Bytes2Hex(calldata),
}, nil)
if err != nil {
return common.Address{}, err
}
if len(result) == 0 {
return common.Address{}, nil
}
return common.BytesToAddress(result[12:]), nil
}
// fetchUniswapV2PoolInfo fetches pool information
func (d *Discovery) fetchUniswapV2PoolInfo(ctx context.Context, poolAddr, token0, token1 common.Address, protocol string) (*mevtypes.PoolInfo, error) {
// getReserves() returns (uint112,uint112,uint32)
// Simplified - in production use generated bindings
calldata := []byte{0x09, 0x02, 0xf1, 0xac} // getReserves selector
result, err := d.client.CallContract(ctx, map[string]interface{}{
"to": poolAddr,
"data": common.Bytes2Hex(calldata),
}, nil)
if err != nil {
return nil, err
}
if len(result) < 64 {
return nil, fmt.Errorf("invalid reserves response")
}
reserve0 := new(big.Int).SetBytes(result[0:32])
reserve1 := new(big.Int).SetBytes(result[32:64])
// Estimate liquidity in USD (simplified - in production, use price oracle)
liquidityUSD := new(big.Int).Add(reserve0, reserve1)
return &mevtypes.PoolInfo{
Address: poolAddr,
Protocol: protocol,
Token0: token0,
Token1: token1,
Reserve0: reserve0,
Reserve1: reserve1,
Fee: 300, // 0.3% for UniswapV2
LiquidityUSD: liquidityUSD,
}, nil
}
// discoverUniswapV3Pools discovers UniswapV3 pools
func (d *Discovery) discoverUniswapV3Pools(ctx context.Context) error {
d.logger.Info("discovering UniswapV3 pools")
// UniswapV3 has multiple fee tiers
feeTiers := []uint32{100, 500, 3000, 10000}
for _, pair := range d.config.TokenPairs {
for _, fee := range feeTiers {
select {
case <-ctx.Done():
return ctx.Err()
default:
}
poolAddr, err := d.getUniswapV3Pool(ctx, pair.Token0, pair.Token1, fee)
if err != nil {
continue
}
if poolAddr == (common.Address{}) {
continue // Pool doesn't exist
}
// Fetch pool info
poolInfo, err := d.fetchUniswapV3PoolInfo(ctx, poolAddr, pair.Token0, pair.Token1, fee)
if err != nil {
d.logger.Debug("failed to fetch pool info", "pool", poolAddr.Hex(), "error", err)
continue
}
// Check minimum liquidity
if poolInfo.LiquidityUSD.Cmp(d.config.MinLiquidity) < 0 {
continue
}
// Add to cache
if err := d.cache.Add(poolInfo); err != nil {
d.logger.Warn("failed to add pool to cache", "pool", poolAddr.Hex(), "error", err)
continue
}
d.mu.Lock()
d.poolsDiscovered++
d.mu.Unlock()
d.logger.Debug("discovered pool",
"protocol", mevtypes.ProtocolUniswapV3,
"pool", poolAddr.Hex(),
"token0", pair.Token0.Hex(),
"token1", pair.Token1.Hex(),
"fee", fee,
"liquidity", poolInfo.LiquidityUSD.String(),
)
// Check if we've reached max pools
if d.poolsDiscovered >= d.config.MaxPools {
return nil
}
}
}
return nil
}
// getUniswapV3Pool gets a UniswapV3 pool address
func (d *Discovery) getUniswapV3Pool(ctx context.Context, token0, token1 common.Address, fee uint32) (common.Address, error) {
// getPool(address,address,uint24) returns (address)
// Simplified - in production use generated bindings
feeBytes := make([]byte, 32)
copy(feeBytes[29:], big.NewInt(int64(fee)).Bytes())
calldata := append([]byte{0x17, 0x79, 0x05, 0x7a}, // getPool selector
append(append(padLeft(token0.Bytes(), 32), padLeft(token1.Bytes(), 32)...), feeBytes...)...)
result, err := d.client.CallContract(ctx, map[string]interface{}{
"to": UniswapV3FactoryAddress,
"data": common.Bytes2Hex(calldata),
}, nil)
if err != nil {
return common.Address{}, err
}
if len(result) == 0 {
return common.Address{}, nil
}
return common.BytesToAddress(result[12:]), nil
}
// fetchUniswapV3PoolInfo fetches UniswapV3 pool information
func (d *Discovery) fetchUniswapV3PoolInfo(ctx context.Context, poolAddr, token0, token1 common.Address, fee uint32) (*mevtypes.PoolInfo, error) {
// liquidity() returns (uint128)
// Simplified - in production use generated bindings
calldata := []byte{0x1a, 0x68, 0x65, 0x02} // liquidity selector
result, err := d.client.CallContract(ctx, map[string]interface{}{
"to": poolAddr,
"data": common.Bytes2Hex(calldata),
}, nil)
if err != nil {
return nil, err
}
if len(result) < 16 {
return nil, fmt.Errorf("invalid liquidity response")
}
liquidity := new(big.Int).SetBytes(result[16:32])
return &mevtypes.PoolInfo{
Address: poolAddr,
Protocol: mevtypes.ProtocolUniswapV3,
Token0: token0,
Token1: token1,
Reserve0: liquidity, // Simplified
Reserve1: liquidity,
Fee: fee,
LiquidityUSD: liquidity,
}, nil
}
// padLeft pads bytes to the left with zeros
func padLeft(data []byte, length int) []byte {
if len(data) >= length {
return data
}
padded := make([]byte, length)
copy(padded[length-len(data):], data)
return padded
}
// GetStats returns discovery statistics
func (d *Discovery) GetStats() map[string]interface{} {
d.mu.Lock()
defer d.mu.Unlock()
return map[string]interface{}{
"pools_discovered": d.poolsDiscovered,
"pools_cached": d.cache.Count(),
}
}

461
pkg/sequencer/reader.go Normal file
View File

@@ -0,0 +1,461 @@
package sequencer
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/gorilla/websocket"
"github.com/your-org/mev-bot/pkg/arbitrage"
"github.com/your-org/mev-bot/pkg/cache"
"github.com/your-org/mev-bot/pkg/execution"
"github.com/your-org/mev-bot/pkg/parsers"
"github.com/your-org/mev-bot/pkg/validation"
)
// ReaderConfig contains configuration for the sequencer reader
type ReaderConfig struct {
// WebSocket connection
WSURL string
ReconnectDelay time.Duration
MaxReconnectDelay time.Duration
PingInterval time.Duration
// RPC for fetching full transactions
RPCURL string
// Processing
WorkerCount int
BufferSize int
// Filtering
MinProfit *big.Int
EnableFrontRunning bool
// Performance
MaxProcessingTime time.Duration
}
// DefaultReaderConfig returns default configuration
func DefaultReaderConfig() *ReaderConfig {
return &ReaderConfig{
WSURL: "wss://arb1.arbitrum.io/ws",
ReconnectDelay: 1 * time.Second,
MaxReconnectDelay: 60 * time.Second,
PingInterval: 30 * time.Second,
RPCURL: "https://arb1.arbitrum.io/rpc",
WorkerCount: 10,
BufferSize: 1000,
MinProfit: big.NewInt(0.01e18), // 0.01 ETH
EnableFrontRunning: true,
MaxProcessingTime: 50 * time.Millisecond,
}
}
// Reader reads pending transactions from the Arbitrum sequencer
type Reader struct {
config *ReaderConfig
logger *slog.Logger
// Components
parsers *parsers.Factory
validator *validation.Validator
poolCache *cache.PoolCache
detector *arbitrage.Detector
executor *execution.Executor
// Connections
wsConn *websocket.Conn
rpcClient *ethclient.Client
// Channels
txHashes chan string
stopCh chan struct{}
wg sync.WaitGroup
// State
mu sync.RWMutex
connected bool
lastProcessed time.Time
processedCount uint64
opportunityCount uint64
executionCount uint64
// Metrics (placeholders for actual metrics)
txReceived uint64
txProcessed uint64
parseErrors uint64
validationErrors uint64
opportunitiesFound uint64
executionsAttempted uint64
avgParseLatency time.Duration
avgDetectLatency time.Duration
avgExecuteLatency time.Duration
}
// NewReader creates a new sequencer reader
func NewReader(
config *ReaderConfig,
parsers *parsers.Factory,
validator *validation.Validator,
poolCache *cache.PoolCache,
detector *arbitrage.Detector,
executor *execution.Executor,
logger *slog.Logger,
) (*Reader, error) {
if config == nil {
config = DefaultReaderConfig()
}
// Connect to RPC for fetching full transactions
rpcClient, err := ethclient.Dial(config.RPCURL)
if err != nil {
return nil, fmt.Errorf("failed to connect to RPC: %w", err)
}
return &Reader{
config: config,
logger: logger.With("component", "sequencer_reader"),
parsers: parsers,
validator: validator,
poolCache: poolCache,
detector: detector,
executor: executor,
rpcClient: rpcClient,
txHashes: make(chan string, config.BufferSize),
stopCh: make(chan struct{}),
}, nil
}
// Start starts the sequencer reader
func (r *Reader) Start(ctx context.Context) error {
r.logger.Info("starting sequencer reader")
// Start workers
for i := 0; i < r.config.WorkerCount; i++ {
r.wg.Add(1)
go r.worker(ctx, i)
}
// Start connection manager
r.wg.Add(1)
go r.maintainConnection(ctx)
// Wait for context cancellation
<-ctx.Done()
r.logger.Info("stopping sequencer reader")
close(r.stopCh)
r.wg.Wait()
return ctx.Err()
}
// maintainConnection maintains the WebSocket connection with automatic reconnection
func (r *Reader) maintainConnection(ctx context.Context) {
defer r.wg.Done()
reconnectDelay := r.config.ReconnectDelay
for {
select {
case <-ctx.Done():
return
default:
}
// Connect to sequencer
conn, err := r.connect(ctx)
if err != nil {
r.logger.Error("connection failed", "error", err, "retry_in", reconnectDelay)
time.Sleep(reconnectDelay)
// Exponential backoff
reconnectDelay *= 2
if reconnectDelay > r.config.MaxReconnectDelay {
reconnectDelay = r.config.MaxReconnectDelay
}
continue
}
// Reset backoff on successful connection
reconnectDelay = r.config.ReconnectDelay
r.wsConn = conn
r.mu.Lock()
r.connected = true
r.mu.Unlock()
r.logger.Info("connected to sequencer")
// Subscribe to pending transactions
if err := r.subscribe(ctx, conn); err != nil {
r.logger.Error("subscription failed", "error", err)
conn.Close()
continue
}
// Read messages until connection fails
if err := r.readMessages(ctx, conn); err != nil {
r.logger.Error("connection lost", "error", err)
}
r.mu.Lock()
r.connected = false
r.mu.Unlock()
conn.Close()
}
}
// connect establishes a WebSocket connection
func (r *Reader) connect(ctx context.Context) (*websocket.Conn, error) {
dialer := websocket.DefaultDialer
dialer.HandshakeTimeout = 10 * time.Second
conn, _, err := dialer.DialContext(ctx, r.config.WSURL, nil)
if err != nil {
return nil, fmt.Errorf("dial failed: %w", err)
}
// Set read/write deadlines
conn.SetReadDeadline(time.Now().Add(r.config.PingInterval * 2))
conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
return conn, nil
}
// subscribe subscribes to pending transactions
func (r *Reader) subscribe(ctx context.Context, conn *websocket.Conn) error {
// Subscribe to newPendingTransactions
sub := map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_subscribe",
"params": []interface{}{"newPendingTransactions"},
}
if err := conn.WriteJSON(sub); err != nil {
return fmt.Errorf("subscription write failed: %w", err)
}
// Read subscription response
var resp map[string]interface{}
if err := conn.ReadJSON(&resp); err != nil {
return fmt.Errorf("subscription response failed: %w", err)
}
r.logger.Info("subscribed to pending transactions", "response", resp)
return nil
}
// readMessages reads messages from the WebSocket connection
func (r *Reader) readMessages(ctx context.Context, conn *websocket.Conn) error {
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-r.stopCh:
return nil
default:
}
// Set read deadline
conn.SetReadDeadline(time.Now().Add(r.config.PingInterval * 2))
var msg map[string]interface{}
if err := conn.ReadJSON(&msg); err != nil {
return fmt.Errorf("read failed: %w", err)
}
// Extract transaction hash from notification
if params, ok := msg["params"].(map[string]interface{}); ok {
if result, ok := params["result"].(string); ok {
// Send to worker pool
select {
case r.txHashes <- result:
r.txReceived++
case <-ctx.Done():
return ctx.Err()
default:
r.logger.Warn("tx buffer full, dropping tx")
}
}
}
}
}
// worker processes transaction hashes
func (r *Reader) worker(ctx context.Context, id int) {
defer r.wg.Done()
logger := r.logger.With("worker", id)
for {
select {
case <-ctx.Done():
return
case <-r.stopCh:
return
case txHash := <-r.txHashes:
if err := r.processTxHash(ctx, txHash); err != nil {
logger.Debug("processing error", "tx", txHash, "error", err)
}
}
}
}
// processTxHash processes a transaction hash
func (r *Reader) processTxHash(ctx context.Context, txHash string) error {
startTime := time.Now()
// Enforce max processing time
procCtx, cancel := context.WithTimeout(ctx, r.config.MaxProcessingTime)
defer cancel()
// Fetch full transaction
tx, isPending, err := r.rpcClient.TransactionByHash(procCtx, common.HexToHash(txHash))
if err != nil {
return fmt.Errorf("fetch tx failed: %w", err)
}
if !isPending {
return nil // Skip already mined transactions
}
parseStart := time.Now()
// Parse transaction events
events, err := r.parsers.ParseTransaction(tx)
if err != nil {
r.parseErrors++
return fmt.Errorf("parse failed: %w", err)
}
if len(events) == 0 {
return nil // No swap events
}
r.avgParseLatency = time.Since(parseStart)
// Validate events
validEvents := r.validator.FilterValid(events)
if len(validEvents) == 0 {
r.validationErrors++
return nil
}
detectStart := time.Now()
// Detect arbitrage opportunities for each swap
for _, event := range validEvents {
// Get input token from the swap
inputToken := event.GetInputToken()
// Detect opportunities starting with this token
opportunities, err := r.detector.DetectOpportunities(procCtx, inputToken)
if err != nil {
continue
}
r.avgDetectLatency = time.Since(detectStart)
// Execute profitable opportunities
for _, opp := range opportunities {
if opp.NetProfit.Cmp(r.config.MinProfit) > 0 {
r.opportunitiesFound++
r.opportunityCount++
if r.config.EnableFrontRunning {
execStart := time.Now()
go r.executeFrontRun(ctx, opp, tx)
r.avgExecuteLatency = time.Since(execStart)
}
}
}
}
r.txProcessed++
r.processedCount++
r.lastProcessed = time.Now()
totalLatency := time.Since(startTime)
if totalLatency > r.config.MaxProcessingTime {
r.logger.Warn("processing too slow", "latency", totalLatency, "target", r.config.MaxProcessingTime)
}
return nil
}
// executeFrontRun executes a front-running transaction
func (r *Reader) executeFrontRun(ctx context.Context, opp *arbitrage.Opportunity, targetTx *types.Transaction) {
r.executionsAttempted++
r.executionCount++
r.logger.Info("front-running opportunity",
"opportunity_id", opp.ID,
"type", opp.Type,
"profit", opp.NetProfit.String(),
"roi", fmt.Sprintf("%.2f%%", opp.ROI*100),
"target_tx", targetTx.Hash().Hex(),
)
// Execute the arbitrage
result, err := r.executor.Execute(ctx, opp)
if err != nil {
r.logger.Error("execution failed",
"opportunity_id", opp.ID,
"error", err,
)
return
}
if result.Success {
r.logger.Info("execution succeeded",
"opportunity_id", opp.ID,
"tx_hash", result.TxHash.Hex(),
"actual_profit", result.ActualProfit.String(),
"gas_cost", result.GasCost.String(),
"duration", result.Duration,
)
} else {
r.logger.Warn("execution failed",
"opportunity_id", opp.ID,
"tx_hash", result.TxHash.Hex(),
"error", result.Error,
)
}
}
// GetStats returns current statistics
func (r *Reader) GetStats() map[string]interface{} {
r.mu.RLock()
defer r.mu.RUnlock()
return map[string]interface{}{
"connected": r.connected,
"tx_received": r.txReceived,
"tx_processed": r.txProcessed,
"parse_errors": r.parseErrors,
"validation_errors": r.validationErrors,
"opportunities_found": r.opportunitiesFound,
"executions_attempted": r.executionsAttempted,
"avg_parse_latency": r.avgParseLatency.String(),
"avg_detect_latency": r.avgDetectLatency.String(),
"avg_execute_latency": r.avgExecuteLatency.String(),
"last_processed": r.lastProcessed.Format(time.RFC3339),
}
}
// Stop stops the sequencer reader
func (r *Reader) Stop() {
close(r.stopCh)
}