fix: resolve all compilation issues across transport and lifecycle packages
- Fixed duplicate type declarations in transport package - Removed unused variables in lifecycle and dependency injection - Fixed big.Int arithmetic operations in uniswap contracts - Added missing methods to MetricsCollector (IncrementCounter, RecordLatency, etc.) - Fixed jitter calculation in TCP transport retry logic - Updated ComponentHealth field access to use transport type - Ensured all core packages build successfully All major compilation errors resolved: ✅ Transport package builds clean ✅ Lifecycle package builds clean ✅ Main MEV bot application builds clean ✅ Fixed method signature mismatches ✅ Resolved type conflicts and duplications 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
862
test/performance_benchmarks_test.go
Normal file
862
test/performance_benchmarks_test.go
Normal file
@@ -0,0 +1,862 @@
|
||||
package test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/fraktal/mev-beta/internal/logger"
|
||||
"github.com/fraktal/mev-beta/pkg/arbitrum"
|
||||
"github.com/fraktal/mev-beta/pkg/events"
|
||||
"github.com/fraktal/mev-beta/pkg/oracle"
|
||||
)
|
||||
|
||||
// PerformanceTestSuite manages performance testing
|
||||
type PerformanceTestSuite struct {
|
||||
l2Parser *arbitrum.ArbitrumL2Parser
|
||||
eventParser *events.EventParser
|
||||
logger *logger.Logger
|
||||
oracle *oracle.PriceOracle
|
||||
testDataCache *TestDataCache
|
||||
metrics *PerformanceMetrics
|
||||
}
|
||||
|
||||
// PerformanceMetrics tracks performance during tests
|
||||
type PerformanceMetrics struct {
|
||||
mu sync.RWMutex
|
||||
totalTransactions uint64
|
||||
totalBlocks uint64
|
||||
totalParsingTime time.Duration
|
||||
totalMemoryAllocated uint64
|
||||
parsingErrors uint64
|
||||
successfulParses uint64
|
||||
|
||||
// Detailed breakdown
|
||||
protocolMetrics map[string]*ProtocolMetrics
|
||||
functionMetrics map[string]*FunctionMetrics
|
||||
|
||||
// Performance thresholds (for validation)
|
||||
maxParsingTimeMs int64
|
||||
maxMemoryUsageMB int64
|
||||
minThroughputTxPerS int64
|
||||
}
|
||||
|
||||
type ProtocolMetrics struct {
|
||||
TransactionCount uint64
|
||||
TotalParsingTime time.Duration
|
||||
ErrorCount uint64
|
||||
AvgGasUsed uint64
|
||||
AvgValue *big.Int
|
||||
}
|
||||
|
||||
type FunctionMetrics struct {
|
||||
CallCount uint64
|
||||
TotalParsingTime time.Duration
|
||||
ErrorCount uint64
|
||||
AvgComplexity float64
|
||||
}
|
||||
|
||||
// TestDataCache manages cached test data for performance tests
|
||||
type TestDataCache struct {
|
||||
mu sync.RWMutex
|
||||
transactions []*TestTransaction
|
||||
blocks []*TestBlock
|
||||
highVolumeData []*TestTransaction
|
||||
complexTransactions []*TestTransaction
|
||||
}
|
||||
|
||||
type TestTransaction struct {
|
||||
RawTx arbitrum.RawL2Transaction
|
||||
ExpectedGas uint64
|
||||
Protocol string
|
||||
Complexity int // 1-10 scale
|
||||
}
|
||||
|
||||
type TestBlock struct {
|
||||
Block *arbitrum.RawL2Block
|
||||
TxCount int
|
||||
ExpectedTime time.Duration
|
||||
}
|
||||
|
||||
func NewPerformanceTestSuite(t *testing.T) *PerformanceTestSuite {
|
||||
// Setup components with performance-optimized configuration
|
||||
testLogger := logger.NewLogger(logger.Config{
|
||||
Level: "warn", // Reduce logging overhead during performance tests
|
||||
Format: "json",
|
||||
})
|
||||
|
||||
testOracle, err := oracle.NewPriceOracle(&oracle.Config{
|
||||
Providers: []oracle.Provider{
|
||||
{Name: "mock", Type: "mock"},
|
||||
},
|
||||
}, testLogger)
|
||||
require.NoError(t, err, "Failed to create price oracle")
|
||||
|
||||
l2Parser, err := arbitrum.NewArbitrumL2Parser("https://mock-rpc", testLogger, testOracle)
|
||||
require.NoError(t, err, "Failed to create L2 parser")
|
||||
|
||||
eventParser := events.NewEventParser()
|
||||
|
||||
return &PerformanceTestSuite{
|
||||
l2Parser: l2Parser,
|
||||
eventParser: eventParser,
|
||||
logger: testLogger,
|
||||
oracle: testOracle,
|
||||
testDataCache: &TestDataCache{
|
||||
transactions: make([]*TestTransaction, 0),
|
||||
blocks: make([]*TestBlock, 0),
|
||||
highVolumeData: make([]*TestTransaction, 0),
|
||||
complexTransactions: make([]*TestTransaction, 0),
|
||||
},
|
||||
metrics: &PerformanceMetrics{
|
||||
protocolMetrics: make(map[string]*ProtocolMetrics),
|
||||
functionMetrics: make(map[string]*FunctionMetrics),
|
||||
maxParsingTimeMs: 100, // 100ms max per transaction
|
||||
maxMemoryUsageMB: 500, // 500MB max memory usage
|
||||
minThroughputTxPerS: 1000, // 1000 tx/s minimum throughput
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Main performance test entry point
|
||||
func TestParserPerformance(t *testing.T) {
|
||||
suite := NewPerformanceTestSuite(t)
|
||||
defer suite.l2Parser.Close()
|
||||
|
||||
// Initialize test data
|
||||
t.Run("InitializeTestData", func(t *testing.T) {
|
||||
suite.initializeTestData(t)
|
||||
})
|
||||
|
||||
// Core performance benchmarks
|
||||
t.Run("SingleTransactionParsing", func(t *testing.T) {
|
||||
suite.benchmarkSingleTransactionParsing(t)
|
||||
})
|
||||
|
||||
t.Run("BatchParsing", func(t *testing.T) {
|
||||
suite.benchmarkBatchParsing(t)
|
||||
})
|
||||
|
||||
t.Run("HighVolumeParsing", func(t *testing.T) {
|
||||
suite.benchmarkHighVolumeParsing(t)
|
||||
})
|
||||
|
||||
t.Run("ConcurrentParsing", func(t *testing.T) {
|
||||
suite.benchmarkConcurrentParsing(t)
|
||||
})
|
||||
|
||||
t.Run("MemoryUsage", func(t *testing.T) {
|
||||
suite.benchmarkMemoryUsage(t)
|
||||
})
|
||||
|
||||
t.Run("ProtocolSpecificPerformance", func(t *testing.T) {
|
||||
suite.benchmarkProtocolSpecificPerformance(t)
|
||||
})
|
||||
|
||||
t.Run("ComplexTransactionParsing", func(t *testing.T) {
|
||||
suite.benchmarkComplexTransactionParsing(t)
|
||||
})
|
||||
|
||||
t.Run("StressTest", func(t *testing.T) {
|
||||
suite.performStressTest(t)
|
||||
})
|
||||
|
||||
// Report final metrics
|
||||
t.Run("ReportMetrics", func(t *testing.T) {
|
||||
suite.reportPerformanceMetrics(t)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) initializeTestData(t *testing.T) {
|
||||
t.Log("Initializing performance test data...")
|
||||
|
||||
// Generate diverse transaction types
|
||||
suite.generateUniswapV3Transactions(1000)
|
||||
suite.generateUniswapV2Transactions(1000)
|
||||
suite.generateSushiSwapTransactions(500)
|
||||
suite.generateMulticallTransactions(200)
|
||||
suite.generate1InchTransactions(300)
|
||||
suite.generateComplexTransactions(100)
|
||||
|
||||
// Generate high-volume test blocks
|
||||
suite.generateHighVolumeBlocks(50)
|
||||
|
||||
t.Logf("Generated %d transactions and %d blocks for performance testing",
|
||||
len(suite.testDataCache.transactions), len(suite.testDataCache.blocks))
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkSingleTransactionParsing(t *testing.T) {
|
||||
if len(suite.testDataCache.transactions) == 0 {
|
||||
t.Skip("No test transactions available")
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
var totalParsingTime time.Duration
|
||||
successCount := 0
|
||||
|
||||
// Test parsing individual transactions
|
||||
for i, testTx := range suite.testDataCache.transactions[:100] {
|
||||
txStartTime := time.Now()
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
|
||||
parsingTime := time.Since(txStartTime)
|
||||
totalParsingTime += parsingTime
|
||||
|
||||
if err == nil {
|
||||
successCount++
|
||||
}
|
||||
|
||||
// Validate performance threshold
|
||||
assert.True(t, parsingTime.Milliseconds() < suite.metrics.maxParsingTimeMs,
|
||||
"Transaction %d parsing time (%dms) exceeded threshold (%dms)",
|
||||
i, parsingTime.Milliseconds(), suite.metrics.maxParsingTimeMs)
|
||||
}
|
||||
|
||||
avgParsingTime := totalParsingTime / 100
|
||||
throughput := float64(100) / time.Since(startTime).Seconds()
|
||||
|
||||
t.Logf("Single transaction parsing: avg=%v, throughput=%.2f tx/s, success=%d/100",
|
||||
avgParsingTime, throughput, successCount)
|
||||
|
||||
// Validate performance requirements
|
||||
assert.True(t, throughput >= float64(suite.metrics.minThroughputTxPerS),
|
||||
"Throughput (%.2f tx/s) below minimum requirement (%d tx/s)",
|
||||
throughput, suite.metrics.minThroughputTxPerS)
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkBatchParsing(t *testing.T) {
|
||||
if len(suite.testDataCache.blocks) == 0 {
|
||||
t.Skip("No test blocks available")
|
||||
}
|
||||
|
||||
for _, testBlock := range suite.testDataCache.blocks[:10] {
|
||||
startTime := time.Now()
|
||||
|
||||
parsedTxs, err := suite.l2Parser.ParseDEXTransactions(context.Background(), testBlock.Block)
|
||||
|
||||
parsingTime := time.Since(startTime)
|
||||
throughput := float64(len(testBlock.Block.Transactions)) / parsingTime.Seconds()
|
||||
|
||||
if err != nil {
|
||||
t.Logf("Block parsing error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Block with %d transactions: time=%v, throughput=%.2f tx/s, parsed=%d",
|
||||
len(testBlock.Block.Transactions), parsingTime, throughput, len(parsedTxs))
|
||||
|
||||
// Validate batch parsing performance
|
||||
assert.True(t, throughput >= float64(suite.metrics.minThroughputTxPerS),
|
||||
"Batch throughput (%.2f tx/s) below minimum requirement (%d tx/s)",
|
||||
throughput, suite.metrics.minThroughputTxPerS)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkHighVolumeParsing(t *testing.T) {
|
||||
// Test parsing a large number of transactions
|
||||
transactionCount := 10000
|
||||
if len(suite.testDataCache.transactions) < transactionCount {
|
||||
t.Skipf("Need at least %d transactions, have %d",
|
||||
transactionCount, len(suite.testDataCache.transactions))
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
successCount := 0
|
||||
errorCount := 0
|
||||
|
||||
for i := 0; i < transactionCount; i++ {
|
||||
testTx := suite.testDataCache.transactions[i%len(suite.testDataCache.transactions)]
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
if err == nil {
|
||||
successCount++
|
||||
} else {
|
||||
errorCount++
|
||||
}
|
||||
|
||||
// Log progress every 1000 transactions
|
||||
if (i+1)%1000 == 0 {
|
||||
elapsed := time.Since(startTime)
|
||||
currentThroughput := float64(i+1) / elapsed.Seconds()
|
||||
t.Logf("Progress: %d/%d transactions, throughput: %.2f tx/s",
|
||||
i+1, transactionCount, currentThroughput)
|
||||
}
|
||||
}
|
||||
|
||||
totalTime := time.Since(startTime)
|
||||
throughput := float64(transactionCount) / totalTime.Seconds()
|
||||
|
||||
t.Logf("High-volume parsing: %d transactions in %v (%.2f tx/s), success=%d, errors=%d",
|
||||
transactionCount, totalTime, throughput, successCount, errorCount)
|
||||
|
||||
// Validate high-volume performance
|
||||
assert.True(t, throughput >= float64(suite.metrics.minThroughputTxPerS/2),
|
||||
"High-volume throughput (%.2f tx/s) below acceptable threshold (%d tx/s)",
|
||||
throughput, suite.metrics.minThroughputTxPerS/2)
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkConcurrentParsing(t *testing.T) {
|
||||
concurrencyLevels := []int{1, 2, 4, 8, 16, 32}
|
||||
transactionsPerWorker := 100
|
||||
|
||||
for _, workers := range concurrencyLevels {
|
||||
t.Run(fmt.Sprintf("Workers_%d", workers), func(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
var totalSuccess, totalErrors uint64
|
||||
var mu sync.Mutex
|
||||
|
||||
for w := 0; w < workers; w++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
localSuccess := 0
|
||||
localErrors := 0
|
||||
|
||||
for i := 0; i < transactionsPerWorker; i++ {
|
||||
txIndex := (workerID*transactionsPerWorker + i) % len(suite.testDataCache.transactions)
|
||||
testTx := suite.testDataCache.transactions[txIndex]
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
if err == nil {
|
||||
localSuccess++
|
||||
} else {
|
||||
localErrors++
|
||||
}
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
totalSuccess += uint64(localSuccess)
|
||||
totalErrors += uint64(localErrors)
|
||||
mu.Unlock()
|
||||
}(w)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
totalTime := time.Since(startTime)
|
||||
totalTransactions := workers * transactionsPerWorker
|
||||
throughput := float64(totalTransactions) / totalTime.Seconds()
|
||||
|
||||
t.Logf("Concurrent parsing (%d workers): %d transactions in %v (%.2f tx/s), success=%d, errors=%d",
|
||||
workers, totalTransactions, totalTime, throughput, totalSuccess, totalErrors)
|
||||
|
||||
// Validate that concurrency improves performance (up to a point)
|
||||
if workers <= 8 {
|
||||
expectedMinThroughput := float64(suite.metrics.minThroughputTxPerS) * float64(workers) * 0.7 // 70% efficiency
|
||||
assert.True(t, throughput >= expectedMinThroughput,
|
||||
"Concurrent throughput (%.2f tx/s) with %d workers below expected minimum (%.2f tx/s)",
|
||||
throughput, workers, expectedMinThroughput)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkMemoryUsage(t *testing.T) {
|
||||
// Force garbage collection to get baseline
|
||||
runtime.GC()
|
||||
var m1 runtime.MemStats
|
||||
runtime.ReadMemStats(&m1)
|
||||
baselineAlloc := m1.Alloc
|
||||
|
||||
// Parse a batch of transactions and measure memory
|
||||
testTransactions := suite.testDataCache.transactions[:1000]
|
||||
|
||||
for _, testTx := range testTransactions {
|
||||
_, _ = suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
var m2 runtime.MemStats
|
||||
runtime.ReadMemStats(&m2)
|
||||
|
||||
allocatedMemory := m2.Alloc - baselineAlloc
|
||||
allocatedMB := float64(allocatedMemory) / 1024 / 1024
|
||||
memoryPerTx := float64(allocatedMemory) / float64(len(testTransactions))
|
||||
|
||||
t.Logf("Memory usage: %.2f MB total (%.2f KB per transaction)",
|
||||
allocatedMB, memoryPerTx/1024)
|
||||
|
||||
// Validate memory usage
|
||||
assert.True(t, allocatedMB < float64(suite.metrics.maxMemoryUsageMB),
|
||||
"Memory usage (%.2f MB) exceeded threshold (%d MB)",
|
||||
allocatedMB, suite.metrics.maxMemoryUsageMB)
|
||||
|
||||
// Check for memory leaks (parse more transactions and ensure memory doesn't grow excessively)
|
||||
runtime.GC()
|
||||
var m3 runtime.MemStats
|
||||
runtime.ReadMemStats(&m3)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
testTx := testTransactions[i%len(testTransactions)]
|
||||
_, _ = suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
}
|
||||
|
||||
runtime.GC()
|
||||
var m4 runtime.MemStats
|
||||
runtime.ReadMemStats(&m4)
|
||||
|
||||
additionalAlloc := m4.Alloc - m3.Alloc
|
||||
additionalMB := float64(additionalAlloc) / 1024 / 1024
|
||||
|
||||
t.Logf("Additional memory after 1000 more transactions: %.2f MB", additionalMB)
|
||||
|
||||
// Memory growth should be minimal (indicating no significant leaks)
|
||||
assert.True(t, additionalMB < 50.0,
|
||||
"Excessive memory growth (%.2f MB) suggests potential memory leak", additionalMB)
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkProtocolSpecificPerformance(t *testing.T) {
|
||||
protocolGroups := make(map[string][]*TestTransaction)
|
||||
|
||||
// Group transactions by protocol
|
||||
for _, testTx := range suite.testDataCache.transactions {
|
||||
protocolGroups[testTx.Protocol] = append(protocolGroups[testTx.Protocol], testTx)
|
||||
}
|
||||
|
||||
for protocol, transactions := range protocolGroups {
|
||||
if len(transactions) < 10 {
|
||||
continue // Skip protocols with insufficient test data
|
||||
}
|
||||
|
||||
t.Run(protocol, func(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
successCount := 0
|
||||
totalGasUsed := uint64(0)
|
||||
|
||||
testCount := len(transactions)
|
||||
if testCount > 200 {
|
||||
testCount = 200 // Limit test size for performance
|
||||
}
|
||||
|
||||
for i := 0; i < testCount; i++ {
|
||||
testTx := transactions[i]
|
||||
|
||||
parsed, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
if err == nil {
|
||||
successCount++
|
||||
if parsed != nil {
|
||||
totalGasUsed += testTx.ExpectedGas
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
totalTime := time.Since(startTime)
|
||||
throughput := float64(testCount) / totalTime.Seconds()
|
||||
avgGas := float64(totalGasUsed) / float64(successCount)
|
||||
|
||||
t.Logf("Protocol %s: %d transactions in %v (%.2f tx/s), success=%d, avg_gas=%.0f",
|
||||
protocol, testCount, totalTime, throughput, successCount, avgGas)
|
||||
|
||||
// Update protocol metrics
|
||||
suite.metrics.mu.Lock()
|
||||
if suite.metrics.protocolMetrics[protocol] == nil {
|
||||
suite.metrics.protocolMetrics[protocol] = &ProtocolMetrics{}
|
||||
}
|
||||
metrics := suite.metrics.protocolMetrics[protocol]
|
||||
metrics.TransactionCount += uint64(testCount)
|
||||
metrics.TotalParsingTime += totalTime
|
||||
metrics.AvgGasUsed = uint64(avgGas)
|
||||
suite.metrics.mu.Unlock()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) benchmarkComplexTransactionParsing(t *testing.T) {
|
||||
if len(suite.testDataCache.complexTransactions) == 0 {
|
||||
t.Skip("No complex transactions available")
|
||||
}
|
||||
|
||||
complexityLevels := make(map[int][]*TestTransaction)
|
||||
for _, tx := range suite.testDataCache.complexTransactions {
|
||||
complexityLevels[tx.Complexity] = append(complexityLevels[tx.Complexity], tx)
|
||||
}
|
||||
|
||||
for complexity, transactions := range complexityLevels {
|
||||
t.Run(fmt.Sprintf("Complexity_%d", complexity), func(t *testing.T) {
|
||||
startTime := time.Now()
|
||||
successCount := 0
|
||||
maxParsingTime := time.Duration(0)
|
||||
totalParsingTime := time.Duration(0)
|
||||
|
||||
for _, testTx := range transactions[:min(50, len(transactions))] {
|
||||
txStartTime := time.Now()
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
|
||||
parsingTime := time.Since(txStartTime)
|
||||
totalParsingTime += parsingTime
|
||||
|
||||
if parsingTime > maxParsingTime {
|
||||
maxParsingTime = parsingTime
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
avgParsingTime := totalParsingTime / time.Duration(len(transactions))
|
||||
|
||||
t.Logf("Complexity %d: success=%d/%d, avg_time=%v, max_time=%v",
|
||||
complexity, successCount, len(transactions), avgParsingTime, maxParsingTime)
|
||||
|
||||
// More complex transactions can take longer, but should still be reasonable
|
||||
maxAllowedTime := time.Duration(suite.metrics.maxParsingTimeMs*int64(complexity/2)) * time.Millisecond
|
||||
assert.True(t, maxParsingTime < maxAllowedTime,
|
||||
"Complex transaction parsing time (%v) exceeded threshold (%v) for complexity %d",
|
||||
maxParsingTime, maxAllowedTime, complexity)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) performStressTest(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping stress test in short mode")
|
||||
}
|
||||
|
||||
t.Log("Starting stress test...")
|
||||
|
||||
// Create a large synthetic dataset
|
||||
stressTransactions := make([]*TestTransaction, 50000)
|
||||
for i := range stressTransactions {
|
||||
stressTransactions[i] = suite.generateRandomTransaction(i)
|
||||
}
|
||||
|
||||
// Test 1: Sustained load
|
||||
t.Run("SustainedLoad", func(t *testing.T) {
|
||||
duration := 30 * time.Second
|
||||
startTime := time.Now()
|
||||
transactionCount := 0
|
||||
errorCount := 0
|
||||
|
||||
for time.Since(startTime) < duration {
|
||||
testTx := stressTransactions[transactionCount%len(stressTransactions)]
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
if err != nil {
|
||||
errorCount++
|
||||
}
|
||||
|
||||
transactionCount++
|
||||
}
|
||||
|
||||
actualDuration := time.Since(startTime)
|
||||
throughput := float64(transactionCount) / actualDuration.Seconds()
|
||||
errorRate := float64(errorCount) / float64(transactionCount) * 100
|
||||
|
||||
t.Logf("Sustained load: %d transactions in %v (%.2f tx/s), error_rate=%.2f%%",
|
||||
transactionCount, actualDuration, throughput, errorRate)
|
||||
|
||||
// Validate sustained performance
|
||||
assert.True(t, throughput >= float64(suite.metrics.minThroughputTxPerS)*0.8,
|
||||
"Sustained throughput (%.2f tx/s) below 80%% of target (%d tx/s)",
|
||||
throughput, suite.metrics.minThroughputTxPerS)
|
||||
assert.True(t, errorRate < 5.0,
|
||||
"Error rate (%.2f%%) too high during stress test", errorRate)
|
||||
})
|
||||
|
||||
// Test 2: Burst load
|
||||
t.Run("BurstLoad", func(t *testing.T) {
|
||||
burstSize := 1000
|
||||
bursts := 10
|
||||
|
||||
for burst := 0; burst < bursts; burst++ {
|
||||
startTime := time.Now()
|
||||
successCount := 0
|
||||
|
||||
for i := 0; i < burstSize; i++ {
|
||||
testTx := stressTransactions[(burst*burstSize+i)%len(stressTransactions)]
|
||||
|
||||
_, err := suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
if err == nil {
|
||||
successCount++
|
||||
}
|
||||
}
|
||||
|
||||
burstTime := time.Since(startTime)
|
||||
burstThroughput := float64(burstSize) / burstTime.Seconds()
|
||||
|
||||
t.Logf("Burst %d: %d transactions in %v (%.2f tx/s), success=%d",
|
||||
burst+1, burstSize, burstTime, burstThroughput, successCount)
|
||||
|
||||
// Brief pause between bursts
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) reportPerformanceMetrics(t *testing.T) {
|
||||
suite.metrics.mu.RLock()
|
||||
defer suite.metrics.mu.RUnlock()
|
||||
|
||||
t.Log("\n========== PERFORMANCE TEST SUMMARY ==========")
|
||||
|
||||
// Overall metrics
|
||||
t.Logf("Total Transactions Parsed: %d", suite.metrics.totalTransactions)
|
||||
t.Logf("Total Blocks Parsed: %d", suite.metrics.totalBlocks)
|
||||
t.Logf("Total Parsing Time: %v", suite.metrics.totalParsingTime)
|
||||
t.Logf("Parsing Errors: %d", suite.metrics.parsingErrors)
|
||||
t.Logf("Successful Parses: %d", suite.metrics.successfulParses)
|
||||
|
||||
// Protocol breakdown
|
||||
t.Log("\nProtocol Performance:")
|
||||
for protocol, metrics := range suite.metrics.protocolMetrics {
|
||||
avgTime := metrics.TotalParsingTime / time.Duration(metrics.TransactionCount)
|
||||
throughput := float64(metrics.TransactionCount) / metrics.TotalParsingTime.Seconds()
|
||||
|
||||
t.Logf(" %s: %d txs, avg_time=%v, throughput=%.2f tx/s, avg_gas=%d",
|
||||
protocol, metrics.TransactionCount, avgTime, throughput, metrics.AvgGasUsed)
|
||||
}
|
||||
|
||||
// Memory stats
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
t.Logf("\nMemory Statistics:")
|
||||
t.Logf(" Current Allocation: %.2f MB", float64(m.Alloc)/1024/1024)
|
||||
t.Logf(" Total Allocations: %.2f MB", float64(m.TotalAlloc)/1024/1024)
|
||||
t.Logf(" GC Cycles: %d", m.NumGC)
|
||||
|
||||
t.Log("===============================================")
|
||||
}
|
||||
|
||||
// Helper functions for generating test data
|
||||
|
||||
func (suite *PerformanceTestSuite) generateUniswapV3Transactions(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xuniswapv3_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0xE592427A0AEce92De3Edee1F18E0157C05861564",
|
||||
Input: "0x414bf389" + suite.generateRandomHex(512),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 150000 + uint64(rand.Intn(50000)),
|
||||
Protocol: "UniswapV3",
|
||||
Complexity: 3 + rand.Intn(3),
|
||||
}
|
||||
suite.testDataCache.transactions = append(suite.testDataCache.transactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateUniswapV2Transactions(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xuniswapv2_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0x4752ba5dbc23f44d87826276bf6fd6b1c372ad24",
|
||||
Input: "0x38ed1739" + suite.generateRandomHex(320),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 120000 + uint64(rand.Intn(30000)),
|
||||
Protocol: "UniswapV2",
|
||||
Complexity: 2 + rand.Intn(2),
|
||||
}
|
||||
suite.testDataCache.transactions = append(suite.testDataCache.transactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateSushiSwapTransactions(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xsushiswap_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506",
|
||||
Input: "0x38ed1739" + suite.generateRandomHex(320),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 125000 + uint64(rand.Intn(35000)),
|
||||
Protocol: "SushiSwap",
|
||||
Complexity: 2 + rand.Intn(2),
|
||||
}
|
||||
suite.testDataCache.transactions = append(suite.testDataCache.transactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateMulticallTransactions(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xmulticall_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0x68b3465833fb72A70ecDF485E0e4C7bD8665Fc45",
|
||||
Input: "0xac9650d8" + suite.generateRandomHex(1024),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 300000 + uint64(rand.Intn(200000)),
|
||||
Protocol: "Multicall",
|
||||
Complexity: 6 + rand.Intn(4),
|
||||
}
|
||||
suite.testDataCache.transactions = append(suite.testDataCache.transactions, tx)
|
||||
suite.testDataCache.complexTransactions = append(suite.testDataCache.complexTransactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generate1InchTransactions(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0x1inch_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0x1111111254EEB25477B68fb85Ed929f73A960582",
|
||||
Input: "0x7c025200" + suite.generateRandomHex(768),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 250000 + uint64(rand.Intn(150000)),
|
||||
Protocol: "1Inch",
|
||||
Complexity: 5 + rand.Intn(3),
|
||||
}
|
||||
suite.testDataCache.transactions = append(suite.testDataCache.transactions, tx)
|
||||
suite.testDataCache.complexTransactions = append(suite.testDataCache.complexTransactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateComplexTransactions(count int) {
|
||||
complexityLevels := []int{7, 8, 9, 10}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
complexity := complexityLevels[rand.Intn(len(complexityLevels))]
|
||||
dataSize := 1024 + complexity*256
|
||||
|
||||
tx := &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xcomplex_%d", i),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: "0x68b3465833fb72A70ecDF485E0e4C7bD8665Fc45",
|
||||
Input: "0xac9650d8" + suite.generateRandomHex(dataSize),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: uint64(complexity * 50000),
|
||||
Protocol: "Complex",
|
||||
Complexity: complexity,
|
||||
}
|
||||
suite.testDataCache.complexTransactions = append(suite.testDataCache.complexTransactions, tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateHighVolumeBlocks(count int) {
|
||||
for i := 0; i < count; i++ {
|
||||
txCount := 100 + rand.Intn(400) // 100-500 transactions per block
|
||||
|
||||
var transactions []arbitrum.RawL2Transaction
|
||||
for j := 0; j < txCount; j++ {
|
||||
txIndex := rand.Intn(len(suite.testDataCache.transactions))
|
||||
baseTx := suite.testDataCache.transactions[txIndex]
|
||||
|
||||
tx := baseTx.RawTx
|
||||
tx.Hash = fmt.Sprintf("0xblock_%d_tx_%d", i, j)
|
||||
transactions = append(transactions, tx)
|
||||
}
|
||||
|
||||
block := &TestBlock{
|
||||
Block: &arbitrum.RawL2Block{
|
||||
Hash: fmt.Sprintf("0xblock_%d", i),
|
||||
Number: fmt.Sprintf("0x%x", 1000000+i),
|
||||
Timestamp: fmt.Sprintf("0x%x", time.Now().Unix()),
|
||||
Transactions: transactions,
|
||||
},
|
||||
TxCount: txCount,
|
||||
ExpectedTime: time.Duration(txCount) * time.Millisecond, // 1ms per tx baseline
|
||||
}
|
||||
suite.testDataCache.blocks = append(suite.testDataCache.blocks, block)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateRandomTransaction(seed int) *TestTransaction {
|
||||
protocols := []string{"UniswapV3", "UniswapV2", "SushiSwap", "1Inch", "Multicall"}
|
||||
routers := []string{
|
||||
"0xE592427A0AEce92De3Edee1F18E0157C05861564",
|
||||
"0x4752ba5dbc23f44d87826276bf6fd6b1c372ad24",
|
||||
"0x1b02dA8Cb0d097eB8D57A175b88c7D8b47997506",
|
||||
"0x1111111254EEB25477B68fb85Ed929f73A960582",
|
||||
"0x68b3465833fb72A70ecDF485E0e4C7bD8665Fc45",
|
||||
}
|
||||
functions := []string{"0x414bf389", "0x38ed1739", "0x7c025200", "0xac9650d8"}
|
||||
|
||||
rand.Seed(int64(seed))
|
||||
protocolIndex := rand.Intn(len(protocols))
|
||||
|
||||
return &TestTransaction{
|
||||
RawTx: arbitrum.RawL2Transaction{
|
||||
Hash: fmt.Sprintf("0xrandom_%d", seed),
|
||||
From: "0x1234567890123456789012345678901234567890",
|
||||
To: routers[protocolIndex],
|
||||
Input: functions[rand.Intn(len(functions))] + suite.generateRandomHex(256+rand.Intn(768)),
|
||||
Value: "0",
|
||||
},
|
||||
ExpectedGas: 100000 + uint64(rand.Intn(300000)),
|
||||
Protocol: protocols[protocolIndex],
|
||||
Complexity: 1 + rand.Intn(5),
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *PerformanceTestSuite) generateRandomHex(length int) string {
|
||||
chars := "0123456789abcdef"
|
||||
result := make([]byte, length)
|
||||
for i := range result {
|
||||
result[i] = chars[rand.Intn(len(chars))]
|
||||
}
|
||||
return string(result)
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Benchmark functions for go test -bench
|
||||
|
||||
func BenchmarkSingleTransactionParsing(b *testing.B) {
|
||||
suite := NewPerformanceTestSuite(&testing.T{})
|
||||
defer suite.l2Parser.Close()
|
||||
|
||||
if len(suite.testDataCache.transactions) == 0 {
|
||||
suite.generateUniswapV3Transactions(1)
|
||||
}
|
||||
|
||||
testTx := suite.testDataCache.transactions[0]
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUniswapV3Parsing(b *testing.B) {
|
||||
suite := NewPerformanceTestSuite(&testing.T{})
|
||||
defer suite.l2Parser.Close()
|
||||
|
||||
suite.generateUniswapV3Transactions(1)
|
||||
testTx := suite.testDataCache.transactions[0]
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkComplexTransactionParsing(b *testing.B) {
|
||||
suite := NewPerformanceTestSuite(&testing.T{})
|
||||
defer suite.l2Parser.Close()
|
||||
|
||||
suite.generateComplexTransactions(1)
|
||||
testTx := suite.testDataCache.complexTransactions[0]
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, _ = suite.l2Parser.ParseDEXTransaction(testTx.RawTx)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user