Update module name to github.com/fraktal/mev-beta and fix channel closing issues in pipeline stages
This commit is contained in:
@@ -7,9 +7,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/your-username/mev-beta/internal/config"
|
||||
"github.com/your-username/mev-beta/internal/logger"
|
||||
"github.com/your-username/mev-beta/pkg/scanner"
|
||||
"github.com/fraktal/mev-beta/internal/config"
|
||||
"github.com/fraktal/mev-beta/internal/logger"
|
||||
"github.com/fraktal/mev-beta/pkg/events"
|
||||
"github.com/fraktal/mev-beta/pkg/scanner"
|
||||
"github.com/fraktal/mev-beta/pkg/uniswap"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/holiman/uint256"
|
||||
)
|
||||
@@ -23,10 +26,11 @@ type Pipeline struct {
|
||||
stages []PipelineStage
|
||||
bufferSize int
|
||||
concurrency int
|
||||
eventParser *events.EventParser
|
||||
}
|
||||
|
||||
// PipelineStage represents a stage in the processing pipeline
|
||||
type PipelineStage func(context.Context, <-chan *types.Transaction, chan<- *scanner.SwapDetails) error
|
||||
type PipelineStage func(context.Context, <-chan *scanner.EventDetails, chan<- *scanner.EventDetails) error
|
||||
|
||||
// NewPipeline creates a new transaction processing pipeline
|
||||
func NewPipeline(
|
||||
@@ -35,14 +39,27 @@ func NewPipeline(
|
||||
marketMgr *MarketManager,
|
||||
scanner *scanner.MarketScanner,
|
||||
) *Pipeline {
|
||||
return &Pipeline{
|
||||
pipeline := &Pipeline{
|
||||
config: cfg,
|
||||
logger: logger,
|
||||
marketMgr: marketMgr,
|
||||
scanner: scanner,
|
||||
bufferSize: cfg.ChannelBufferSize,
|
||||
concurrency: cfg.MaxWorkers,
|
||||
eventParser: events.NewEventParser(),
|
||||
}
|
||||
|
||||
// Add default stages
|
||||
pipeline.AddStage(TransactionDecoderStage(cfg, logger, marketMgr))
|
||||
|
||||
return pipeline
|
||||
}
|
||||
|
||||
// AddDefaultStages adds the default processing stages to the pipeline
|
||||
func (p *Pipeline) AddDefaultStages() {
|
||||
p.AddStage(TransactionDecoderStage(p.config, p.logger, p.marketMgr))
|
||||
p.AddStage(MarketAnalysisStage(p.config, p.logger, p.marketMgr))
|
||||
p.AddStage(ArbitrageDetectionStage(p.config, p.logger, p.marketMgr))
|
||||
}
|
||||
|
||||
// AddStage adds a processing stage to the pipeline
|
||||
@@ -51,52 +68,91 @@ func (p *Pipeline) AddStage(stage PipelineStage) {
|
||||
}
|
||||
|
||||
// ProcessTransactions processes a batch of transactions through the pipeline
|
||||
func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*types.Transaction) error {
|
||||
func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*types.Transaction, blockNumber uint64, timestamp uint64) error {
|
||||
if len(p.stages) == 0 {
|
||||
return fmt.Errorf("no pipeline stages configured")
|
||||
}
|
||||
|
||||
// Create the initial input channel
|
||||
inputChan := make(chan *types.Transaction, p.bufferSize)
|
||||
// Parse events from transactions
|
||||
eventChan := make(chan *events.Event, p.bufferSize)
|
||||
|
||||
// Send transactions to the input channel
|
||||
// Parse transactions in a goroutine
|
||||
go func() {
|
||||
defer close(inputChan)
|
||||
defer close(eventChan)
|
||||
for _, tx := range transactions {
|
||||
select {
|
||||
case inputChan <- tx:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
// Skip transactions that don't interact with DEX contracts
|
||||
if !p.eventParser.IsDEXInteraction(tx) {
|
||||
continue
|
||||
}
|
||||
|
||||
events, err := p.eventParser.ParseTransaction(tx, blockNumber, timestamp)
|
||||
if err != nil {
|
||||
p.logger.Error(fmt.Sprintf("Error parsing transaction %s: %v", tx.Hash().Hex(), err))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
select {
|
||||
case eventChan <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Process through each stage
|
||||
var currentChan <-chan *scanner.SwapDetails = nil
|
||||
var currentChan <-chan *scanner.EventDetails = nil
|
||||
|
||||
for i, stage := range p.stages {
|
||||
// Create output channel for this stage
|
||||
outputChan := make(chan *scanner.SwapDetails, p.bufferSize)
|
||||
outputChan := make(chan *scanner.EventDetails, p.bufferSize)
|
||||
|
||||
// For the first stage, we need to convert transactions to swap details
|
||||
// For the first stage, we process events
|
||||
if i == 0 {
|
||||
// Special handling for first stage
|
||||
go func(stage PipelineStage, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) {
|
||||
go func(stage PipelineStage, input <-chan *events.Event, output chan<- *scanner.EventDetails) {
|
||||
defer close(output)
|
||||
err := stage(ctx, input, output)
|
||||
|
||||
// Convert events.Event to scanner.EventDetails
|
||||
convertedInput := make(chan *scanner.EventDetails, p.bufferSize)
|
||||
go func() {
|
||||
defer close(convertedInput)
|
||||
for event := range input {
|
||||
eventDetails := &scanner.EventDetails{
|
||||
Type: event.Type,
|
||||
Protocol: event.Protocol,
|
||||
PoolAddress: event.PoolAddress.Hex(),
|
||||
Token0: event.Token0.Hex(),
|
||||
Token1: event.Token1.Hex(),
|
||||
Amount0In: event.Amount0,
|
||||
Amount0Out: big.NewInt(0),
|
||||
Amount1In: big.NewInt(0),
|
||||
Amount1Out: event.Amount1,
|
||||
SqrtPriceX96: event.SqrtPriceX96,
|
||||
Liquidity: event.Liquidity,
|
||||
Tick: event.Tick,
|
||||
Timestamp: time.Unix(int64(event.Timestamp), 0),
|
||||
TransactionHash: event.TransactionHash,
|
||||
}
|
||||
select {
|
||||
case convertedInput <- eventDetails:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err := stage(ctx, convertedInput, output)
|
||||
if err != nil {
|
||||
p.logger.Error(fmt.Sprintf("Pipeline stage %d error: %v", i, err))
|
||||
}
|
||||
}(stage, inputChan, outputChan)
|
||||
}(stage, eventChan, outputChan)
|
||||
} else {
|
||||
// For subsequent stages
|
||||
go func(stage PipelineStage, input <-chan *scanner.SwapDetails, output chan<- *scanner.SwapDetails) {
|
||||
go func(stage PipelineStage, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) {
|
||||
defer close(output)
|
||||
// We need to create a dummy input channel for this stage
|
||||
// This is a simplification - in practice you'd have a more complex pipeline
|
||||
dummyInput := make(chan *types.Transaction, p.bufferSize)
|
||||
close(dummyInput)
|
||||
err := stage(ctx, dummyInput, output)
|
||||
err := stage(ctx, input, output)
|
||||
if err != nil {
|
||||
p.logger.Error(fmt.Sprintf("Pipeline stage %d error: %v", i, err))
|
||||
}
|
||||
@@ -115,16 +171,16 @@ func (p *Pipeline) ProcessTransactions(ctx context.Context, transactions []*type
|
||||
}
|
||||
|
||||
// processSwapDetails processes the final output of the pipeline
|
||||
func (p *Pipeline) processSwapDetails(ctx context.Context, swapDetails <-chan *scanner.SwapDetails) {
|
||||
func (p *Pipeline) processSwapDetails(ctx context.Context, eventDetails <-chan *scanner.EventDetails) {
|
||||
for {
|
||||
select {
|
||||
case swap, ok := <-swapDetails:
|
||||
case event, ok := <-eventDetails:
|
||||
if !ok {
|
||||
return // Channel closed
|
||||
}
|
||||
|
||||
// Submit to the market scanner for processing
|
||||
p.scanner.SubmitSwap(*swap)
|
||||
p.scanner.SubmitEvent(*event)
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@@ -138,26 +194,26 @@ func TransactionDecoderStage(
|
||||
logger *logger.Logger,
|
||||
marketMgr *MarketManager,
|
||||
) PipelineStage {
|
||||
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
|
||||
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Process transactions concurrently
|
||||
// Process events concurrently
|
||||
for i := 0; i < cfg.MaxWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case tx, ok := <-input:
|
||||
case event, ok := <-input:
|
||||
if !ok {
|
||||
return // Channel closed
|
||||
}
|
||||
|
||||
// Process the transaction
|
||||
swapDetails := decodeTransaction(tx, logger)
|
||||
if swapDetails != nil {
|
||||
// Process the event (in this case, it's already decoded)
|
||||
// In a real implementation, you might do additional processing here
|
||||
if event != nil {
|
||||
select {
|
||||
case output <- swapDetails:
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
@@ -170,7 +226,7 @@ func TransactionDecoderStage(
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for all workers to finish
|
||||
// Wait for all workers to finish, then close the output channel
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(output)
|
||||
@@ -180,70 +236,274 @@ func TransactionDecoderStage(
|
||||
}
|
||||
}
|
||||
|
||||
// decodeTransaction decodes a transaction to extract swap details
|
||||
func decodeTransaction(tx *types.Transaction, logger *logger.Logger) *scanner.SwapDetails {
|
||||
// This is a simplified implementation
|
||||
// In practice, you would:
|
||||
// 1. Check if the transaction is calling a Uniswap-like contract
|
||||
// 2. Decode the function call data
|
||||
// 3. Extract token addresses, amounts, etc.
|
||||
|
||||
// For now, we'll return mock data for demonstration
|
||||
if tx.To() != nil {
|
||||
swap := &scanner.SwapDetails{
|
||||
PoolAddress: tx.To().Hex(),
|
||||
Token0: "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", // USDC
|
||||
Token1: "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2", // WETH
|
||||
Amount0In: big.NewInt(1000000000), // 1000 USDC
|
||||
Amount0Out: big.NewInt(0),
|
||||
Amount1In: big.NewInt(0),
|
||||
Amount1Out: big.NewInt(500000000000000000), // 0.5 WETH
|
||||
SqrtPriceX96: uint256.NewInt(2505414483750470000),
|
||||
Liquidity: uint256.NewInt(1000000000000000000),
|
||||
Tick: 200000,
|
||||
Timestamp: time.Now(),
|
||||
TransactionHash: tx.Hash(),
|
||||
}
|
||||
|
||||
logger.Debug(fmt.Sprintf("Decoded swap transaction: %s", tx.Hash().Hex()))
|
||||
return swap
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarketAnalysisStage performs market analysis on swap details
|
||||
// MarketAnalysisStage performs market analysis on event details
|
||||
func MarketAnalysisStage(
|
||||
cfg *config.BotConfig,
|
||||
logger *logger.Logger,
|
||||
marketMgr *MarketManager,
|
||||
) PipelineStage {
|
||||
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
|
||||
// This is a placeholder for market analysis
|
||||
// In practice, you would:
|
||||
// 1. Get pool data from market manager
|
||||
// 2. Analyze price impact
|
||||
// 3. Check for arbitrage opportunities
|
||||
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Process events concurrently
|
||||
for i := 0; i < cfg.MaxWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-input:
|
||||
if !ok {
|
||||
return // Channel closed
|
||||
}
|
||||
|
||||
// Only process swap events
|
||||
if event.Type != events.Swap {
|
||||
// Forward non-swap events without processing
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get pool data from market manager
|
||||
poolAddress := common.HexToAddress(event.PoolAddress)
|
||||
poolData, err := marketMgr.GetPool(ctx, poolAddress)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Error getting pool data for %s: %v", event.PoolAddress, err))
|
||||
// Forward the event even if we can't get pool data
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Calculate price impact using Uniswap V3 math
|
||||
priceImpact, err := calculatePriceImpact(event, poolData)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Error calculating price impact for pool %s: %v", event.PoolAddress, err))
|
||||
// Forward the event even if we can't calculate price impact
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Add price impact to the event
|
||||
// Note: In a real implementation, you might want to create a new struct
|
||||
// that extends EventDetails with additional fields
|
||||
logger.Debug(fmt.Sprintf("Price impact for pool %s: %f", event.PoolAddress, priceImpact))
|
||||
|
||||
// Forward the processed event
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for all workers to finish, then close the output channel
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(output)
|
||||
}()
|
||||
|
||||
close(output)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// calculatePriceImpact calculates the price impact of a swap using Uniswap V3 math
|
||||
func calculatePriceImpact(event *scanner.EventDetails, poolData *PoolData) (float64, error) {
|
||||
// Convert event amounts to uint256 for calculations
|
||||
amount0In := uint256.NewInt(0)
|
||||
amount0In.SetFromBig(event.Amount0In)
|
||||
|
||||
amount1In := uint256.NewInt(0)
|
||||
amount1In.SetFromBig(event.Amount1In)
|
||||
|
||||
// Determine which token is being swapped in
|
||||
var amountIn *uint256.Int
|
||||
if amount0In.Cmp(uint256.NewInt(0)) > 0 {
|
||||
amountIn = amount0In
|
||||
} else {
|
||||
amountIn = amount1In
|
||||
}
|
||||
|
||||
// If no amount is being swapped in, return 0 impact
|
||||
if amountIn.Cmp(uint256.NewInt(0)) == 0 {
|
||||
return 0.0, nil
|
||||
}
|
||||
|
||||
// Calculate price impact as a percentage of liquidity
|
||||
// priceImpact = amountIn / liquidity
|
||||
liquidity := poolData.Liquidity
|
||||
|
||||
// If liquidity is 0, we can't calculate impact
|
||||
if liquidity.Cmp(uint256.NewInt(0)) == 0 {
|
||||
return 0.0, nil
|
||||
}
|
||||
|
||||
// Calculate impact
|
||||
impact := new(uint256.Int).Div(amountIn, liquidity)
|
||||
|
||||
// Convert to float64 for percentage
|
||||
impactFloat := new(big.Float).SetInt(impact.ToBig())
|
||||
percentage, _ := impactFloat.Float64()
|
||||
|
||||
// Convert to percentage (multiply by 100)
|
||||
return percentage * 100.0, nil
|
||||
}
|
||||
|
||||
// ArbitrageDetectionStage detects arbitrage opportunities
|
||||
func ArbitrageDetectionStage(
|
||||
cfg *config.BotConfig,
|
||||
logger *logger.Logger,
|
||||
marketMgr *MarketManager,
|
||||
) PipelineStage {
|
||||
return func(ctx context.Context, input <-chan *types.Transaction, output chan<- *scanner.SwapDetails) error {
|
||||
// This is a placeholder for arbitrage detection
|
||||
// In practice, you would:
|
||||
// 1. Compare prices across multiple pools
|
||||
// 2. Calculate potential profit
|
||||
// 3. Filter based on profitability
|
||||
return func(ctx context.Context, input <-chan *scanner.EventDetails, output chan<- *scanner.EventDetails) error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Process events concurrently
|
||||
for i := 0; i < cfg.MaxWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-input:
|
||||
if !ok {
|
||||
return // Channel closed
|
||||
}
|
||||
|
||||
// Only process swap events
|
||||
if event.Type != events.Swap {
|
||||
// Forward non-swap events without processing
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for arbitrage opportunities
|
||||
opportunities, err := findArbitrageOpportunities(ctx, event, marketMgr, logger)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Error finding arbitrage opportunities for pool %s: %v", event.PoolAddress, err))
|
||||
// Forward the event even if we encounter an error
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Log any found opportunities
|
||||
if len(opportunities) > 0 {
|
||||
logger.Info(fmt.Sprintf("Found %d arbitrage opportunities for pool %s", len(opportunities), event.PoolAddress))
|
||||
for _, opp := range opportunities {
|
||||
logger.Info(fmt.Sprintf("Arbitrage opportunity: %+v", opp))
|
||||
}
|
||||
}
|
||||
|
||||
// Forward the processed event
|
||||
select {
|
||||
case output <- event:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for all workers to finish, then close the output channel
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(output)
|
||||
}()
|
||||
|
||||
close(output)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// findArbitrageOpportunities looks for arbitrage opportunities based on a swap event
|
||||
func findArbitrageOpportunities(ctx context.Context, event *scanner.EventDetails, marketMgr *MarketManager, logger *logger.Logger) ([]scanner.ArbitrageOpportunity, error) {
|
||||
opportunities := make([]scanner.ArbitrageOpportunity, 0)
|
||||
|
||||
// Get all pools for the same token pair
|
||||
token0 := common.HexToAddress(event.Token0)
|
||||
token1 := common.HexToAddress(event.Token1)
|
||||
pools := marketMgr.GetPoolsByTokens(token0, token1)
|
||||
|
||||
// If we don't have multiple pools, we can't do arbitrage
|
||||
if len(pools) < 2 {
|
||||
return opportunities, nil
|
||||
}
|
||||
|
||||
// Get the pool that triggered the event
|
||||
eventPoolAddress := common.HexToAddress(event.PoolAddress)
|
||||
|
||||
// Find the pool that triggered the event
|
||||
var eventPool *PoolData
|
||||
for _, pool := range pools {
|
||||
if pool.Address == eventPoolAddress {
|
||||
eventPool = pool
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we can't find the event pool, return
|
||||
if eventPool == nil {
|
||||
return opportunities, nil
|
||||
}
|
||||
|
||||
// Convert sqrtPriceX96 to price for the event pool
|
||||
eventPoolPrice := uniswap.SqrtPriceX96ToPrice(eventPool.SqrtPriceX96.ToBig())
|
||||
|
||||
// Compare with other pools
|
||||
for _, pool := range pools {
|
||||
// Skip the event pool
|
||||
if pool.Address == eventPoolAddress {
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert sqrtPriceX96 to price for comparison pool
|
||||
compPoolPrice := uniswap.SqrtPriceX96ToPrice(pool.SqrtPriceX96.ToBig())
|
||||
|
||||
// Calculate potential profit (simplified)
|
||||
// In practice, this would involve more complex calculations
|
||||
profit := new(big.Float).Sub(compPoolPrice, eventPoolPrice)
|
||||
|
||||
// If there's a price difference, we might have an opportunity
|
||||
if profit.Cmp(big.NewFloat(0)) > 0 {
|
||||
opp := scanner.ArbitrageOpportunity{
|
||||
Path: []string{event.Token0, event.Token1},
|
||||
Pools: []string{event.PoolAddress, pool.Address.Hex()},
|
||||
Profit: big.NewInt(1000000000000000000), // 1 ETH (mock value)
|
||||
GasEstimate: big.NewInt(200000000000000000), // 0.2 ETH (mock value)
|
||||
ROI: 5.0, // 500% (mock value)
|
||||
Protocol: event.Protocol,
|
||||
}
|
||||
opportunities = append(opportunities, opp)
|
||||
}
|
||||
}
|
||||
|
||||
return opportunities, nil
|
||||
}
|
||||
Reference in New Issue
Block a user