- Added comprehensive bounds checking to prevent buffer overruns in multicall parsing - Implemented graduated validation system (Strict/Moderate/Permissive) to reduce false positives - Added LRU caching system for address validation with 10-minute TTL - Enhanced ABI decoder with missing Universal Router and Arbitrum-specific DEX signatures - Fixed duplicate function declarations and import conflicts across multiple files - Added error recovery mechanisms with multiple fallback strategies - Updated tests to handle new validation behavior for suspicious addresses - Fixed parser test expectations for improved validation system - Applied gofmt formatting fixes to ensure code style compliance - Fixed mutex copying issues in monitoring package by introducing MetricsSnapshot - Resolved critical security vulnerabilities in heuristic address extraction - Progress: Updated TODO audit from 10% to 35% complete 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
70 lines
2.3 KiB
Go
70 lines
2.3 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"flag"
|
|
"fmt"
|
|
"log"
|
|
"os"
|
|
"time"
|
|
|
|
"github.com/fraktal/mev-beta/tools/performance-audit/internal"
|
|
)
|
|
|
|
func main() {
|
|
var (
|
|
testType = flag.String("test", "all", "Test type: throughput, latency, memory, cpu, stress, all")
|
|
duration = flag.Duration("duration", 5*time.Minute, "Test duration")
|
|
outputDir = flag.String("output", "reports/performance", "Output directory")
|
|
verbose = flag.Bool("verbose", false, "Enable verbose output")
|
|
concurrent = flag.Int("concurrent", 10, "Number of concurrent workers")
|
|
loadLevel = flag.String("load", "normal", "Load level: light, normal, heavy, extreme")
|
|
profileEnabled = flag.Bool("profile", false, "Enable CPU/memory profiling")
|
|
benchmarkMode = flag.Bool("benchmark", false, "Run in benchmark mode")
|
|
stressTest = flag.Bool("stress", false, "Run stress test scenarios")
|
|
targetTPS = flag.Int("target-tps", 1000, "Target transactions per second")
|
|
maxMemoryMB = flag.Int("max-memory", 512, "Maximum memory usage in MB")
|
|
cpuThreshold = flag.Float64("cpu-threshold", 80.0, "CPU usage threshold percentage")
|
|
)
|
|
flag.Parse()
|
|
|
|
// Create output directory
|
|
if err := os.MkdirAll(*outputDir, 0755); err != nil {
|
|
log.Fatalf("Failed to create output directory: %v", err)
|
|
}
|
|
|
|
// Initialize performance auditor
|
|
auditor, err := internal.NewPerformanceAuditor(&internal.PerformanceAuditConfig{
|
|
TestType: *testType,
|
|
Duration: *duration,
|
|
OutputDir: *outputDir,
|
|
Verbose: *verbose,
|
|
Concurrent: *concurrent,
|
|
LoadLevel: *loadLevel,
|
|
ProfileEnabled: *profileEnabled,
|
|
BenchmarkMode: *benchmarkMode,
|
|
StressTest: *stressTest,
|
|
TargetTPS: *targetTPS,
|
|
MaxMemoryMB: *maxMemoryMB,
|
|
CPUThreshold: *cpuThreshold,
|
|
})
|
|
if err != nil {
|
|
log.Fatalf("Failed to initialize performance auditor: %v", err)
|
|
}
|
|
|
|
ctx := context.Background()
|
|
ctx, cancel := context.WithTimeout(ctx, *duration+time.Minute) // Add buffer
|
|
defer cancel()
|
|
|
|
fmt.Printf("Starting performance audit: %s test for %v...\n", *testType, *duration)
|
|
if err := auditor.RunPerformanceTests(ctx); err != nil {
|
|
log.Fatalf("Performance audit failed: %v", err)
|
|
}
|
|
|
|
if err := auditor.GenerateReport(); err != nil {
|
|
log.Fatalf("Report generation failed: %v", err)
|
|
}
|
|
|
|
fmt.Printf("Performance audit complete. Reports saved to: %s\n", *outputDir)
|
|
}
|