Update module name to github.com/fraktal/mev-beta and fix channel closing issues in pipeline stages

This commit is contained in:
Krypto Kajun
2025-09-12 19:08:38 -05:00
parent fbb85e529a
commit 1113d82499
31 changed files with 3359 additions and 210 deletions

View File

@@ -6,9 +6,9 @@ import (
"sync"
"time"
"github.com/your-username/mev-beta/internal/config"
"github.com/your-username/mev-beta/internal/logger"
"github.com/your-username/mev-beta/internal/ratelimit"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/internal/ratelimit"
"github.com/ethereum/go-ethereum/core/types"
)

View File

@@ -6,8 +6,8 @@ import (
"sync"
"time"
"github.com/your-username/mev-beta/internal/config"
"github.com/your-username/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"golang.org/x/sync/singleflight"

292
pkg/market/manager_test.go Normal file
View File

@@ -0,0 +1,292 @@
package market
import (
"context"
"testing"
"time"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
)
func TestNewMarketManager(t *testing.T) {
// Create test config
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
// Create test logger
logger := logger.New("info", "text", "")
// Create market manager
manager := NewMarketManager(cfg, logger)
// Verify manager was created correctly
assert.NotNil(t, manager)
assert.Equal(t, cfg, manager.config)
assert.NotNil(t, manager.pools)
assert.Equal(t, time.Duration(cfg.Cache.Expiration)*time.Second, manager.cacheDuration)
assert.Equal(t, cfg.Cache.MaxSize, manager.maxCacheSize)
}
func TestGetPoolCacheHit(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Add a pool to the cache
poolAddress := common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640")
pool := &PoolData{
Address: poolAddress,
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Fee: 3000,
Liquidity: uint256.NewInt(1000000000000000000),
SqrtPriceX96: uint256.NewInt(2505414483750470000),
Tick: 200000,
TickSpacing: 60,
LastUpdated: time.Now(),
}
manager.pools[poolAddress.Hex()] = pool
// Get the pool (should be a cache hit)
ctx := context.Background()
result, err := manager.GetPool(ctx, poolAddress)
// Verify results
assert.NoError(t, err)
assert.Equal(t, pool, result)
}
func TestGetPoolCacheMiss(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Get a pool that's not in the cache (should trigger fetch)
poolAddress := common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640")
ctx := context.Background()
result, err := manager.GetPool(ctx, poolAddress)
// Verify results (should get mock data)
assert.NoError(t, err)
assert.NotNil(t, result)
assert.Equal(t, poolAddress, result.Address)
assert.Equal(t, "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", result.Token0.Hex())
assert.Equal(t, "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", result.Token1.Hex())
}
func TestGetPoolsByTokens(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Add some pools to the cache
token0 := common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48") // USDC
token1 := common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2") // WETH
pool1 := &PoolData{
Address: common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"),
Token0: token0,
Token1: token1,
Fee: 3000,
}
manager.pools[pool1.Address.Hex()] = pool1
pool2 := &PoolData{
Address: common.HexToAddress("0xB4e16d0168e52d35CaCD2c6185b44281Ec28C9Dc"),
Token0: token0,
Token1: token1,
Fee: 500,
}
manager.pools[pool2.Address.Hex()] = pool2
// Add a pool with different tokens
token2 := common.HexToAddress("0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984") // UNI
pool3 := &PoolData{
Address: common.HexToAddress("0x1234567890123456789012345678901234567890"),
Token0: token0,
Token1: token2,
Fee: 3000,
}
manager.pools[pool3.Address.Hex()] = pool3
// Get pools for the token pair
pools := manager.GetPoolsByTokens(token0, token1)
// Verify results
assert.Len(t, pools, 2)
// Check that both pools are in the result
pool1Found := false
pool2Found := false
for _, pool := range pools {
if pool.Address == pool1.Address {
pool1Found = true
}
if pool.Address == pool2.Address {
pool2Found = true
}
}
assert.True(t, pool1Found)
assert.True(t, pool2Found)
}
func TestGetAllPools(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Add some pools to the cache
pool1 := &PoolData{
Address: common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Fee: 3000,
}
manager.pools[pool1.Address.Hex()] = pool1
pool2 := &PoolData{
Address: common.HexToAddress("0xB4e16d0168e52d35CaCD2c6185b44281Ec28C9Dc"),
Token0: common.HexToAddress("0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"),
Token1: common.HexToAddress("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"),
Fee: 500,
}
manager.pools[pool2.Address.Hex()] = pool2
// Get all pools
pools := manager.GetAllPools()
// Verify results
assert.Len(t, pools, 2)
}
func TestUpdatePoolExisting(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Add a pool to the cache
poolAddress := common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640")
originalLiquidity := uint256.NewInt(1000000000000000000)
originalSqrtPrice := uint256.NewInt(2505414483750470000)
originalTick := 200000
pool := &PoolData{
Address: poolAddress,
Liquidity: originalLiquidity,
SqrtPriceX96: originalSqrtPrice,
Tick: originalTick,
LastUpdated: time.Now().Add(-time.Hour), // Set to past time
}
manager.pools[poolAddress.Hex()] = pool
// Update the pool
newLiquidity := uint256.NewInt(2000000000000000000)
newSqrtPrice := uint256.NewInt(3000000000000000000)
newTick := 250000
manager.UpdatePool(poolAddress, newLiquidity, newSqrtPrice, newTick)
// Verify the pool was updated
updatedPool := manager.pools[poolAddress.Hex()]
assert.Equal(t, newLiquidity, updatedPool.Liquidity)
assert.Equal(t, newSqrtPrice, updatedPool.SqrtPriceX96)
assert.Equal(t, newTick, updatedPool.Tick)
// Check that the last updated time is more recent (allowing for small time differences)
assert.True(t, updatedPool.LastUpdated.Unix() >= pool.LastUpdated.Unix())
}
func TestUpdatePoolNew(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Update a pool that doesn't exist yet
poolAddress := common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640")
liquidity := uint256.NewInt(1000000000000000000)
sqrtPrice := uint256.NewInt(2505414483750470000)
tick := 200000
manager.UpdatePool(poolAddress, liquidity, sqrtPrice, tick)
// Verify the pool was created
createdPool := manager.pools[poolAddress.Hex()]
assert.NotNil(t, createdPool)
assert.Equal(t, poolAddress, createdPool.Address)
assert.Equal(t, liquidity, createdPool.Liquidity)
assert.Equal(t, sqrtPrice, createdPool.SqrtPriceX96)
assert.Equal(t, tick, createdPool.Tick)
}
func TestGetCacheStats(t *testing.T) {
// Create market manager
cfg := &config.UniswapConfig{
Cache: config.CacheConfig{
Expiration: 300,
MaxSize: 10000,
},
}
logger := logger.New("info", "text", "")
manager := NewMarketManager(cfg, logger)
// Add some pools to the cache
pool1 := &PoolData{
Address: common.HexToAddress("0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640"),
}
manager.pools[pool1.Address.Hex()] = pool1
pool2 := &PoolData{
Address: common.HexToAddress("0xB4e16d0168e52d35CaCD2c6185b44281Ec28C9Dc"),
}
manager.pools[pool2.Address.Hex()] = pool2
// Get cache stats
currentSize, maxSize := manager.GetCacheStats()
// Verify results
assert.Equal(t, 2, currentSize)
assert.Equal(t, 10000, maxSize)
}

View File

@@ -7,9 +7,12 @@ import (
"sync"
"time"
"github.com/your-username/mev-beta/internal/config"
"github.com/your-username/mev-beta/internal/logger"
"github.com/your-username/mev-beta/pkg/scanner"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
"github.com/fraktal/mev-beta/pkg/events"
"github.com/fraktal/mev-beta/pkg/scanner"
"github.com/fraktal/mev-beta/pkg/uniswap"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
)
@@ -23,10 +26,11 @@ type Pipeline struct {
stages []PipelineStage
bufferSize int
concurrency int
eventParser *events.EventParser
}
// PipelineStage represents a stage in the processing pipeline
type PipelineStage func(context.Context, <-chan *types.Transaction, chan<- *scanner.SwapDetails) error
type PipelineStage func(context.Context, <-chan *scanner.EventDetails, chan<- *scanner.EventDetails) error
// NewPipeline creates a new transaction processing pipeline
func NewPipeline(
@@ -35,14 +39,27 @@ func NewPipeline(
marketMgr *MarketManager,
scanner *scanner.MarketScanner,
) *Pipeline {
return &Pipeline{
pipeline := &Pipeline{
config: cfg,
logger: logger,
marketMgr: marketMgr,
scanner: scanner,
bufferSize: cfg.ChannelBufferSize,
concurrency: cfg.MaxWorkers,
eventParser: events.NewEventParser(),
}
// Add default stages
pipeline.AddStage(TransactionDecoderStage(cfg, logger, marketMgr))
return pipeline
}
// AddDefaultStages adds the default processing stages to the pipeline
func (p *Pipeline) AddDefaultStages() {
p.AddStage(TransactionDecoderStage(p.config, p.logger, p.marketMgr))
p.AddStage(MarketAnalysisStage(p.config, p.logger, p.marketMgr))
p.AddStage(ArbitrageDetectionStage(p.config, p.logger, p.marketMgr))
}
// AddStage adds a processing stage to the pipeline
@@ -51,52 +68,91 @@ func (p *Pipeline) AddStage(stage PipelineStage) {
}
// ProcessTransactions processes a batch of transactions through the pipeline
func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*types.Transaction) error {
func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*types.Transaction, blockNumber uint64, timestamp uint64) error {
if len(p.stages) == 0 {
return fmt.Errorf("no pipeline stages configured")
}
// Create the initial input channel
inputChan := make(chan *types.Transaction, p.bufferSize)
// Parse events from transactions
eventChan := make(chan *events.Event, p.bufferSize)
// Send transactions to the input channel
// Parse transactions in a goroutine
go func() {
defer close(inputChan)
defer close(eventChan)
for _, tx := range transactions {
select {
case inputChan <- tx:
case <-ctx.Done():
return
// Skip transactions that don't interact with DEX contracts
if !p.eventParser.IsDEXInteraction(tx) {
continue
}
events, err := p.eventParser.ParseTransaction(tx, blockNumber, timestamp)
if err != nil {
p.logger.Error(fmt.Sprintf("Error parsing transaction %s: %v", tx.Hash().Hex(), err))
continue
}
for _, event := range events {
select {
case eventChan <- event:
case <-ctx.Done():
return
}
}
}
}()
// Process through each stage
var currentChan <-chan *scanner.SwapDetails = nil
var currentChan <-chan *scanner.EventDetails = nil
for i, stage := range p.stages {
// Create output channel for this stage
outputChan := make(chan *scanner.SwapDetails, p.bufferSize)
outputChan := make(chan *scanner.EventDetails, p.bufferSize)
// For the first stage, we need to convert transactions to swap details
// For the first stage, we process events
if i == 0 {
// Special handling for first stage
go func(stage PipelineStage, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) {
go func(stage PipelineStage, input <-chan *events.Event, output chan<- *scanner.EventDetails) {
defer close(output)
err := stage(ctx, input, output)
// Convert events.Event to scanner.EventDetails
convertedInput := make(chan *scanner.EventDetails, p.bufferSize)
go func() {
defer close(convertedInput)
for event := range input {
eventDetails := &scanner.EventDetails{
Type: event.Type,
Protocol: event.Protocol,
PoolAddress: event.PoolAddress.Hex(),
Token0: event.Token0.Hex(),
Token1: event.Token1.Hex(),
Amount0In: event.Amount0,
Amount0Out: big.NewInt(0),
Amount1In: big.NewInt(0),
Amount1Out: event.Amount1,
SqrtPriceX96: event.SqrtPriceX96,
Liquidity: event.Liquidity,
Tick: event.Tick,
Timestamp: time.Unix(int64(event.Timestamp), 0),
TransactionHash: event.TransactionHash,
}
select {
case convertedInput <- eventDetails:
case <-ctx.Done():
return
}
}
}()
err := stage(ctx, convertedInput, output)
if err != nil {
p.logger.Error(fmt.Sprintf("Pipeline stage %d error: %v", i, err))
}
}(stage, inputChan, outputChan)
}(stage, eventChan, outputChan)
} else {
// For subsequent stages
go func(stage PipelineStage, input <-chan *scanner.SwapDetails, output chan<- *scanner.SwapDetails) {
go func(stage PipelineStage, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) {
defer close(output)
// We need to create a dummy input channel for this stage
// This is a simplification - in practice you'd have a more complex pipeline
dummyInput := make(chan *types.Transaction, p.bufferSize)
close(dummyInput)
err := stage(ctx, dummyInput, output)
err := stage(ctx, input, output)
if err != nil {
p.logger.Error(fmt.Sprintf("Pipeline stage %d error: %v", i, err))
}
@@ -115,16 +171,16 @@ func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*type
}
// processSwapDetails processes the final output of the pipeline
func (p *Pipeline) processSwapDetails(ctx context.Context, swapDetails <-chan *scanner.SwapDetails) {
func (p *Pipeline) processSwapDetails(ctx context.Context, eventDetails <-chan *scanner.EventDetails) {
for {
select {
case swap, ok := <-swapDetails:
case event, ok := <-eventDetails:
if !ok {
return // Channel closed
}
// Submit to the market scanner for processing
p.scanner.SubmitSwap(*swap)
p.scanner.SubmitEvent(*event)
case <-ctx.Done():
return
@@ -138,26 +194,26 @@ func TransactionDecoderStage(
logger *logger.Logger,
marketMgr *MarketManager,
) PipelineStage {
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
var wg sync.WaitGroup
// Process transactions concurrently
// Process events concurrently
for i := 0; i < cfg.MaxWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case tx, ok := <-input:
case event, ok := <-input:
if !ok {
return // Channel closed
}
// Process the transaction
swapDetails := decodeTransaction(tx, logger)
if swapDetails != nil {
// Process the event (in this case, it's already decoded)
// In a real implementation, you might do additional processing here
if event != nil {
select {
case output <- swapDetails:
case output <- event:
case <-ctx.Done():
return
}
@@ -170,7 +226,7 @@ func TransactionDecoderStage(
}()
}
// Wait for all workers to finish
// Wait for all workers to finish, then close the output channel
go func() {
wg.Wait()
close(output)
@@ -180,70 +236,274 @@ func TransactionDecoderStage(
}
}
// decodeTransaction decodes a transaction to extract swap details
func decodeTransaction(tx *types.Transaction, logger *logger.Logger) *scanner.SwapDetails {
// This is a simplified implementation
// In practice, you would:
// 1. Check if the transaction is calling a Uniswap-like contract
// 2. Decode the function call data
// 3. Extract token addresses, amounts, etc.
// For now, we'll return mock data for demonstration
if tx.To() != nil {
swap := &scanner.SwapDetails{
PoolAddress: tx.To().Hex(),
Token0: "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", // USDC
Token1: "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", // WETH
Amount0In: big.NewInt(1000000000), // 1000 USDC
Amount0Out: big.NewInt(0),
Amount1In: big.NewInt(0),
Amount1Out: big.NewInt(500000000000000000), // 0.5 WETH
SqrtPriceX96: uint256.NewInt(2505414483750470000),
Liquidity: uint256.NewInt(1000000000000000000),
Tick: 200000,
Timestamp: time.Now(),
TransactionHash: tx.Hash(),
}
logger.Debug(fmt.Sprintf("Decoded swap transaction: %s", tx.Hash().Hex()))
return swap
}
return nil
}
// MarketAnalysisStage performs market analysis on swap details
// MarketAnalysisStage performs market analysis on event details
func MarketAnalysisStage(
cfg *config.BotConfig,
logger *logger.Logger,
marketMgr *MarketManager,
) PipelineStage {
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
// This is a placeholder for market analysis
// In practice, you would:
// 1. Get pool data from market manager
// 2. Analyze price impact
// 3. Check for arbitrage opportunities
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
var wg sync.WaitGroup
// Process events concurrently
for i := 0; i < cfg.MaxWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case event, ok := <-input:
if !ok {
return // Channel closed
}
// Only process swap events
if event.Type != events.Swap {
// Forward non-swap events without processing
select {
case output <- event:
case <-ctx.Done():
return
}
continue
}
// Get pool data from market manager
poolAddress := common.HexToAddress(event.PoolAddress)
poolData, err := marketMgr.GetPool(ctx, poolAddress)
if err != nil {
logger.Error(fmt.Sprintf("Error getting pool data for %s: %v", event.PoolAddress, err))
// Forward the event even if we can't get pool data
select {
case output <- event:
case <-ctx.Done():
return
}
continue
}
// Calculate price impact using Uniswap V3 math
priceImpact, err := calculatePriceImpact(event, poolData)
if err != nil {
logger.Error(fmt.Sprintf("Error calculating price impact for pool %s: %v", event.PoolAddress, err))
// Forward the event even if we can't calculate price impact
select {
case output <- event:
case <-ctx.Done():
return
}
continue
}
// Add price impact to the event
// Note: In a real implementation, you might want to create a new struct
// that extends EventDetails with additional fields
logger.Debug(fmt.Sprintf("Price impact for pool %s: %f", event.PoolAddress, priceImpact))
// Forward the processed event
select {
case output <- event:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
}
// Wait for all workers to finish, then close the output channel
go func() {
wg.Wait()
close(output)
}()
close(output)
return nil
}
}
// calculatePriceImpact calculates the price impact of a swap using Uniswap V3 math
func calculatePriceImpact(event *scanner.EventDetails, poolData *PoolData) (float64, error) {
// Convert event amounts to uint256 for calculations
amount0In := uint256.NewInt(0)
amount0In.SetFromBig(event.Amount0In)
amount1In := uint256.NewInt(0)
amount1In.SetFromBig(event.Amount1In)
// Determine which token is being swapped in
var amountIn *uint256.Int
if amount0In.Cmp(uint256.NewInt(0)) > 0 {
amountIn = amount0In
} else {
amountIn = amount1In
}
// If no amount is being swapped in, return 0 impact
if amountIn.Cmp(uint256.NewInt(0)) == 0 {
return 0.0, nil
}
// Calculate price impact as a percentage of liquidity
// priceImpact = amountIn / liquidity
liquidity := poolData.Liquidity
// If liquidity is 0, we can't calculate impact
if liquidity.Cmp(uint256.NewInt(0)) == 0 {
return 0.0, nil
}
// Calculate impact
impact := new(uint256.Int).Div(amountIn, liquidity)
// Convert to float64 for percentage
impactFloat := new(big.Float).SetInt(impact.ToBig())
percentage, _ := impactFloat.Float64()
// Convert to percentage (multiply by 100)
return percentage * 100.0, nil
}
// ArbitrageDetectionStage detects arbitrage opportunities
func ArbitrageDetectionStage(
cfg *config.BotConfig,
logger *logger.Logger,
marketMgr *MarketManager,
) PipelineStage {
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
// This is a placeholder for arbitrage detection
// In practice, you would:
// 1. Compare prices across multiple pools
// 2. Calculate potential profit
// 3. Filter based on profitability
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
var wg sync.WaitGroup
// Process events concurrently
for i := 0; i < cfg.MaxWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case event, ok := <-input:
if !ok {
return // Channel closed
}
// Only process swap events
if event.Type != events.Swap {
// Forward non-swap events without processing
select {
case output <- event:
case <-ctx.Done():
return
}
continue
}
// Look for arbitrage opportunities
opportunities, err := findArbitrageOpportunities(ctx, event, marketMgr, logger)
if err != nil {
logger.Error(fmt.Sprintf("Error finding arbitrage opportunities for pool %s: %v", event.PoolAddress, err))
// Forward the event even if we encounter an error
select {
case output <- event:
case <-ctx.Done():
return
}
continue
}
// Log any found opportunities
if len(opportunities) > 0 {
logger.Info(fmt.Sprintf("Found %d arbitrage opportunities for pool %s", len(opportunities), event.PoolAddress))
for _, opp := range opportunities {
logger.Info(fmt.Sprintf("Arbitrage opportunity: %+v", opp))
}
}
// Forward the processed event
select {
case output <- event:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
}
// Wait for all workers to finish, then close the output channel
go func() {
wg.Wait()
close(output)
}()
close(output)
return nil
}
}
// findArbitrageOpportunities looks for arbitrage opportunities based on a swap event
func findArbitrageOpportunities(ctx context.Context, event *scanner.EventDetails, marketMgr *MarketManager, logger *logger.Logger) ([]scanner.ArbitrageOpportunity, error) {
opportunities := make([]scanner.ArbitrageOpportunity, 0)
// Get all pools for the same token pair
token0 := common.HexToAddress(event.Token0)
token1 := common.HexToAddress(event.Token1)
pools := marketMgr.GetPoolsByTokens(token0, token1)
// If we don't have multiple pools, we can't do arbitrage
if len(pools) < 2 {
return opportunities, nil
}
// Get the pool that triggered the event
eventPoolAddress := common.HexToAddress(event.PoolAddress)
// Find the pool that triggered the event
var eventPool *PoolData
for _, pool := range pools {
if pool.Address == eventPoolAddress {
eventPool = pool
break
}
}
// If we can't find the event pool, return
if eventPool == nil {
return opportunities, nil
}
// Convert sqrtPriceX96 to price for the event pool
eventPoolPrice := uniswap.SqrtPriceX96ToPrice(eventPool.SqrtPriceX96.ToBig())
// Compare with other pools
for _, pool := range pools {
// Skip the event pool
if pool.Address == eventPoolAddress {
continue
}
// Convert sqrtPriceX96 to price for comparison pool
compPoolPrice := uniswap.SqrtPriceX96ToPrice(pool.SqrtPriceX96.ToBig())
// Calculate potential profit (simplified)
// In practice, this would involve more complex calculations
profit := new(big.Float).Sub(compPoolPrice, eventPoolPrice)
// If there's a price difference, we might have an opportunity
if profit.Cmp(big.NewFloat(0)) > 0 {
opp := scanner.ArbitrageOpportunity{
Path: []string{event.Token0, event.Token1},
Pools: []string{event.PoolAddress, pool.Address.Hex()},
Profit: big.NewInt(1000000000000000000), // 1 ETH (mock value)
GasEstimate: big.NewInt(200000000000000000), // 0.2 ETH (mock value)
ROI: 5.0, // 500% (mock value)
Protocol: event.Protocol,
}
opportunities = append(opportunities, opp)
}
}
return opportunities, nil
}

204
pkg/market/pipeline_test.go Normal file
View File

@@ -0,0 +1,204 @@
package market
import (
"context"
"math/big"
"testing"
"github.com/fraktal/mev-beta/internal/config"
"github.com/fraktal/mev-beta/internal/logger"
scannerpkg "github.com/fraktal/mev-beta/pkg/scanner"
"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
// MockMarketManager is a mock implementation of MarketManager for testing
type MockMarketManager struct {
mock.Mock
}
func (m *MockMarketManager) GetPool(ctx context.Context, poolAddress common.Address) (*PoolData, error) {
args := m.Called(ctx, poolAddress)
return args.Get(0).(*PoolData), args.Error(1)
}
func (m *MockMarketManager) GetPoolsByTokens(token0, token1 common.Address) []*PoolData {
args := m.Called(token0, token1)
return args.Get(0).([]*PoolData)
}
// MockLogger is a mock implementation of logger.Logger for testing
type MockLogger struct {
mock.Mock
}
func (m *MockLogger) Debug(msg string) {
m.Called(msg)
}
func (m *MockLogger) Info(msg string) {
m.Called(msg)
}
func (m *MockLogger) Warn(msg string) {
m.Called(msg)
}
func (m *MockLogger) Error(msg string, err ...interface{}) {
m.Called(msg, err)
}
func TestNewPipeline(t *testing.T) {
// Create mock config
cfg := &config.BotConfig{
MaxWorkers: 5,
ChannelBufferSize: 10,
}
// Create mock logger
logger := logger.New("info", "text", "")
// Create mock market manager
marketMgr := &MarketManager{}
// Create mock scanner
scannerObj := &scannerpkg.MarketScanner{}
// Create pipeline
pipeline := NewPipeline(cfg, logger, marketMgr, scannerObj)
// Verify pipeline was created correctly
assert.NotNil(t, pipeline)
assert.Equal(t, cfg, pipeline.config)
assert.Equal(t, logger, pipeline.logger)
assert.Equal(t, marketMgr, pipeline.marketMgr)
assert.Equal(t, scannerObj, pipeline.scanner)
assert.Equal(t, cfg.ChannelBufferSize, pipeline.bufferSize)
assert.Equal(t, cfg.MaxWorkers, pipeline.concurrency)
assert.NotNil(t, pipeline.eventParser)
assert.Len(t, pipeline.stages, 1) // Should have TransactionDecoderStage by default
}
func TestAddStage(t *testing.T) {
// Create pipeline
cfg := &config.BotConfig{
MaxWorkers: 5,
ChannelBufferSize: 10,
}
logger := logger.New("info", "text", "")
marketMgr := &MarketManager{}
scannerObj := &scannerpkg.MarketScanner{}
pipeline := NewPipeline(cfg, logger, marketMgr, scannerObj)
// Add a new stage
newStage := func(ctx context.Context, input <-chan *scannerpkg.EventDetails, output chan<- *scannerpkg.EventDetails) error {
return nil
}
pipeline.AddStage(newStage)
// Verify stage was added
assert.Len(t, pipeline.stages, 2) // TransactionDecoderStage + newStage
}
func TestAddDefaultStages(t *testing.T) {
// Create pipeline
cfg := &config.BotConfig{
MaxWorkers: 5,
ChannelBufferSize: 10,
}
logger := logger.New("info", "text", "")
marketMgr := &MarketManager{}
scannerObj := &scannerpkg.MarketScanner{}
pipeline := NewPipeline(cfg, logger, marketMgr, scannerObj)
// Add default stages
pipeline.AddDefaultStages()
// Verify stages were added (should be 4 total: TransactionDecoder, MarketAnalysis, ArbitrageDetection, plus the initial TransactionDecoder)
assert.Len(t, pipeline.stages, 4)
}
func TestTransactionDecoderStage(t *testing.T) {
// Create mock config
cfg := &config.BotConfig{
MaxWorkers: 1, // Use 1 worker for simplicity in test
ChannelBufferSize: 10,
}
// Create mock logger
log := logger.New("info", "text", "")
// Create mock market manager
marketMgr := &MarketManager{}
// Create the stage
stage := TransactionDecoderStage(cfg, log, marketMgr)
// Verify the stage function was created
assert.NotNil(t, stage)
}
func TestCalculatePriceImpact(t *testing.T) {
// Create test event
event := &scannerpkg.EventDetails{
Amount0In: big.NewInt(1000000000), // 1000 tokens
Amount1In: big.NewInt(0),
}
// Create test pool data
liquidity := uint256.NewInt(1000000000000000000) // 1 ETH in liquidity
poolData := &PoolData{
Liquidity: liquidity,
}
// Calculate price impact
impact, err := calculatePriceImpact(event, poolData)
// Verify results
assert.NoError(t, err)
assert.InDelta(t, 0.001, impact, 0.001) // 0.001% impact (1000/1000000000000000000 * 100)
}
func TestCalculatePriceImpactNoAmount(t *testing.T) {
// Create test event with no amount
event := &scannerpkg.EventDetails{
Amount0In: big.NewInt(0),
Amount1In: big.NewInt(0),
}
// Create test pool data
liquidity := uint256.NewInt(10000000000000000000) // 10 ETH in liquidity
poolData := &PoolData{
Liquidity: liquidity,
}
// Calculate price impact
impact, err := calculatePriceImpact(event, poolData)
// Verify results
assert.NoError(t, err)
assert.Equal(t, 0.0, impact)
}
func TestCalculatePriceImpactNoLiquidity(t *testing.T) {
// Create test event
event := &scannerpkg.EventDetails{
Amount0In: big.NewInt(1000000000),
Amount1In: big.NewInt(0),
}
// Create test pool data with zero liquidity
liquidity := uint256.NewInt(0)
poolData := &PoolData{
Liquidity: liquidity,
}
// Calculate price impact
impact, err := calculatePriceImpact(event, poolData)
// Verify results
assert.NoError(t, err)
assert.Equal(t, 0.0, impact)
}