refactor: move all remaining files to orig/ directory

Completed clean root directory structure:
- Root now contains only: .git, .env, docs/, orig/
- Moved all remaining files and directories to orig/:
  - Config files (.claude, .dockerignore, .drone.yml, etc.)
  - All .env variants (except active .env)
  - Git config (.gitconfig, .github, .gitignore, etc.)
  - Tool configs (.golangci.yml, .revive.toml, etc.)
  - Documentation (*.md files, @prompts)
  - Build files (Dockerfiles, Makefile, go.mod, go.sum)
  - Docker compose files
  - All source directories (scripts, tests, tools, etc.)
  - Runtime directories (logs, monitoring, reports)
  - Dependency files (node_modules, lib, cache)
  - Special files (--delete)

- Removed empty runtime directories (bin/, data/)

V2 structure is now clean:
- docs/planning/ - V2 planning documents
- orig/ - Complete V1 codebase preserved
- .env - Active environment config (not in git)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Administrator
2025-11-10 10:53:05 +01:00
parent 803de231ba
commit c54c569f30
718 changed files with 8304 additions and 8281 deletions

222
orig/.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,222 @@
name: Staging Pipeline
on:
workflow_dispatch:
inputs:
run_live_integration:
description: 'Run live RPC-dependent integration tests'
required: false
default: 'false'
workflow_call:
env:
GO_VERSION: '1.25'
jobs:
staging-test:
name: Build, Lint & Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go toolchain
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-staging-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-staging-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Verify dependencies
run: go mod verify
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout=10m
- name: Run go vet
run: go vet ./...
- name: Run unit tests (race + coverage)
run: |
export SKIP_LIVE_RPC_TESTS=true
export USE_MOCK_RPC=true
GOCACHE=$(pwd)/.gocache go test -race -coverprofile=coverage.out ./...
- name: Upload coverage
uses: actions/upload-artifact@v3
with:
name: staging-coverage
path: coverage.out
- name: Build binary
run: go build -v -o mev-bot ./cmd/mev-bot
- name: Smoke start binary
run: |
export MEV_BOT_ENCRYPTION_KEY="test_key_32_chars_minimum_length"
timeout 5s ./mev-bot start || true
echo "✓ Binary builds and starts successfully"
integration-test:
name: Integration Tests
runs-on: ubuntu-latest
needs: staging-test
if: vars.ENABLE_LIVE_INTEGRATION == 'true' || (github.event_name == 'workflow_dispatch' && github.event.inputs.run_live_integration == 'true')
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-staging-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-staging-${{ env.GO_VERSION }}-
- name: Run integration tests
run: |
export ARBITRUM_RPC_ENDPOINT="mock://localhost:8545"
export ARBITRUM_WS_ENDPOINT="mock://localhost:8546"
export SKIP_LIVE_RPC_TESTS=true
go test -v ./pkg/monitor/ -tags=integration
go test -v ./pkg/arbitrage/ -tags=integration
go test -v ./pkg/arbitrum/ -tags=integration
- name: Performance benchmarks
run: |
go test -bench=. -benchmem ./pkg/monitor/
go test -bench=. -benchmem ./pkg/scanner/
docker-build:
name: Docker Build
runs-on: ubuntu-latest
needs: [staging-test, integration-test]
if: github.event_name == 'push'
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build Docker image
uses: docker/build-push-action@v4
with:
context: .
push: false
tags: mev-bot:staging
cache-from: type=gha
cache-to: type=gha,mode=max
math-audit:
name: Math Audit
runs-on: ubuntu-latest
needs: staging-test
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-staging-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-staging-${{ env.GO_VERSION }}-
- name: Run math audit
run: GOCACHE=$(pwd)/.gocache go run ./tools/math-audit --vectors default --report reports/math/latest
- name: Verify math audit artifacts
run: |
test -s reports/math/latest/report.json
test -s reports/math/latest/report.md
- name: Upload math audit report
uses: actions/upload-artifact@v3
with:
name: math-audit-report
path: reports/math/latest
deployment-ready:
name: Deployment Ready Check
runs-on: ubuntu-latest
needs: [staging-test, integration-test, docker-build, math-audit]
if: always()
steps:
- name: Check deployment readiness
run: |
integration_result="${{ needs.integration-test.result }}"
if [[ "$integration_result" == "skipped" ]]; then
echo " Integration tests skipped (live RPC disabled)."
integration_result="success"
echo "INTEGRATION_STATUS=skipped (RPC disabled)" >> $GITHUB_ENV
else
echo "INTEGRATION_STATUS=${{ needs.integration-test.result }}" >> $GITHUB_ENV
fi
if [[ "${{ needs.staging-test.result }}" == "success" && "$integration_result" == "success" && "${{ needs.math-audit.result }}" == "success" ]]; then
echo "✅ All tests passed - Ready for deployment"
echo "DEPLOYMENT_READY=true" >> $GITHUB_ENV
else
echo "❌ Tests failed - Not ready for deployment"
echo "DEPLOYMENT_READY=false" >> $GITHUB_ENV
exit 1
fi
- name: Generate deployment summary
run: |
cat > deployment-summary.md << 'EOF'
# 🚀 MEV Bot Staging Summary
**Commit**: ${{ github.sha }}
**Branch**: ${{ github.ref_name }}
**Timestamp**: $(date -u)
## Test Results
- **Build & Unit**: ${{ needs.staging-test.result }}
- **Integration Tests**: ${INTEGRATION_STATUS:-${{ needs.integration-test.result }}}
- **Docker Build**: ${{ needs.docker-build.result }}
- **Math Audit**: ${{ needs.math-audit.result }}
## Reports
- Math Audit: reports/math/latest/report.md (artifact **math-audit-report**)
## Deployment Notes
- Ensure RPC endpoints are configured
- Set strong encryption key (32+ chars)
- Configure rate limits appropriately
- Monitor transaction processing metrics
EOF
- name: Upload deployment summary
uses: actions/upload-artifact@v3
with:
name: staging-deployment-summary
path: deployment-summary.md

View File

@@ -0,0 +1,435 @@
name: MEV Bot Parser Validation
on:
push:
branches: [ main, develop ]
paths:
- 'pkg/arbitrum/**'
- 'pkg/events/**'
- 'test/**'
- 'go.mod'
- 'go.sum'
pull_request:
branches: [ main ]
paths:
- 'pkg/arbitrum/**'
- 'pkg/events/**'
- 'test/**'
- 'go.mod'
- 'go.sum'
schedule:
# Run daily at 2 AM UTC to catch regressions
- cron: '0 2 * * *'
workflow_dispatch:
inputs:
run_live_tests:
description: 'Run live integration tests'
required: false
default: 'false'
type: boolean
run_fuzzing:
description: 'Run fuzzing tests'
required: false
default: 'false'
type: boolean
test_timeout:
description: 'Test timeout in minutes'
required: false
default: '30'
type: string
env:
GO_VERSION: '1.21'
GOLANGCI_LINT_VERSION: 'v1.55.2'
TEST_TIMEOUT: ${{ github.event.inputs.test_timeout || '30' }}m
jobs:
# Basic validation and unit tests
unit_tests:
name: Unit Tests & Basic Validation
runs-on: ubuntu-latest
strategy:
matrix:
go-version: ['1.21', '1.20']
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go ${{ matrix.go-version }}
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ matrix.go-version }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ matrix.go-version }}-
- name: Download dependencies
run: go mod download
- name: Verify dependencies
run: go mod verify
- name: Run unit tests
run: |
go test -v -timeout=${{ env.TEST_TIMEOUT }} ./pkg/arbitrum/... ./pkg/events/...
- name: Run parser validation tests
run: |
go test -v -timeout=${{ env.TEST_TIMEOUT }} ./test/ -run TestComprehensiveParserValidation
- name: Generate test coverage
run: |
go test -coverprofile=coverage.out -covermode=atomic ./pkg/arbitrum/... ./pkg/events/... ./test/...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.out
flags: unittests
name: codecov-umbrella
# Golden file testing for consistency
golden_file_tests:
name: Golden File Testing
runs-on: ubuntu-latest
needs: unit_tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Run golden file tests
run: |
go test -v -timeout=${{ env.TEST_TIMEOUT }} ./test/ -run TestGoldenFiles
- name: Validate golden files exist
run: |
if [ ! -d "test/golden" ] || [ -z "$(ls -A test/golden)" ]; then
echo "❌ Golden files not found or empty"
echo "Generating golden files for future validation..."
REGENERATE_GOLDEN=true go test ./test/ -run TestGoldenFiles
else
echo "✅ Golden files validation passed"
fi
- name: Upload golden files as artifacts
uses: actions/upload-artifact@v3
with:
name: golden-files-${{ github.sha }}
path: test/golden/
retention-days: 30
# Performance benchmarking
performance_tests:
name: Performance Benchmarks
runs-on: ubuntu-latest
needs: unit_tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Run performance benchmarks
run: |
go test -v -timeout=${{ env.TEST_TIMEOUT }} -bench=. -benchmem ./test/ -run TestParserPerformance
- name: Run specific benchmarks
run: |
echo "=== Single Transaction Parsing Benchmark ==="
go test -bench=BenchmarkSingleTransactionParsing -benchtime=10s ./test/
echo "=== Uniswap V3 Parsing Benchmark ==="
go test -bench=BenchmarkUniswapV3Parsing -benchtime=10s ./test/
echo "=== Complex Transaction Parsing Benchmark ==="
go test -bench=BenchmarkComplexTransactionParsing -benchtime=5s ./test/
- name: Performance regression check
run: |
# This would compare against baseline performance metrics
# For now, we'll just validate that benchmarks complete
echo "✅ Performance benchmarks completed successfully"
# Fuzzing tests for robustness
fuzzing_tests:
name: Fuzzing & Robustness Testing
runs-on: ubuntu-latest
needs: unit_tests
if: github.event.inputs.run_fuzzing == 'true' || github.event_name == 'schedule'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Run fuzzing tests
run: |
echo "🔍 Starting fuzzing tests..."
go test -v -timeout=${{ env.TEST_TIMEOUT }} ./test/ -run TestFuzzingRobustness
- name: Run Go fuzzing (if available)
run: |
echo "🔍 Running native Go fuzzing..."
# Run for 30 seconds each
timeout 30s go test -fuzz=FuzzParserRobustness ./test/ || echo "Fuzzing completed"
- name: Generate fuzzing report
run: |
echo "📊 Fuzzing Summary:" > fuzzing_report.txt
echo "- Transaction data fuzzing: COMPLETED" >> fuzzing_report.txt
echo "- Function selector fuzzing: COMPLETED" >> fuzzing_report.txt
echo "- Amount value fuzzing: COMPLETED" >> fuzzing_report.txt
echo "- Address value fuzzing: COMPLETED" >> fuzzing_report.txt
echo "- Concurrent access fuzzing: COMPLETED" >> fuzzing_report.txt
cat fuzzing_report.txt
- name: Upload fuzzing report
uses: actions/upload-artifact@v3
with:
name: fuzzing-report-${{ github.sha }}
path: fuzzing_report.txt
# Live integration tests (optional, with external data)
integration_tests:
name: Live Integration Tests
runs-on: ubuntu-latest
needs: unit_tests
if: github.event.inputs.run_live_tests == 'true' || github.event_name == 'schedule'
env:
ENABLE_LIVE_TESTING: 'true'
ARBITRUM_RPC_ENDPOINT: ${{ secrets.ARBITRUM_RPC_ENDPOINT || 'https://arb1.arbitrum.io/rpc' }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Test RPC connectivity
run: |
echo "Testing RPC connectivity..."
curl -X POST -H "Content-Type: application/json" \
--data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' \
${{ env.ARBITRUM_RPC_ENDPOINT }} || echo "RPC test failed - continuing with mock tests"
- name: Run integration tests
run: |
echo "🌐 Running live integration tests..."
go test -v -timeout=${{ env.TEST_TIMEOUT }} ./test/ -run TestArbitrumIntegration
- name: Generate integration report
run: |
echo "📊 Integration Test Summary:" > integration_report.txt
echo "- RPC Connectivity: TESTED" >> integration_report.txt
echo "- Block Retrieval: TESTED" >> integration_report.txt
echo "- Live Transaction Parsing: TESTED" >> integration_report.txt
echo "- Parser Accuracy: VALIDATED" >> integration_report.txt
cat integration_report.txt
- name: Upload integration report
uses: actions/upload-artifact@v3
with:
name: integration-report-${{ github.sha }}
path: integration_report.txt
# Code quality and security checks
code_quality:
name: Code Quality & Security
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: ${{ env.GOLANGCI_LINT_VERSION }}
args: --timeout=10m --config=.golangci.yml
- name: Run gosec security scan
uses: securecodewarrior/github-action-gosec@master
with:
args: '-fmt sarif -out gosec.sarif ./...'
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: gosec.sarif
- name: Run Nancy vulnerability scan
run: |
go list -json -m all | docker run --rm -i sonatypecommunity/nancy:latest sleuth
- name: Check for hardcoded secrets
run: |
echo "🔍 Checking for hardcoded secrets..."
if grep -r -i "password\|secret\|key\|token" --include="*.go" . | grep -v "test\|example\|demo"; then
echo "❌ Potential hardcoded secrets found"
exit 1
else
echo "✅ No hardcoded secrets detected"
fi
# Final validation and reporting
validation_summary:
name: Validation Summary
runs-on: ubuntu-latest
needs: [unit_tests, golden_file_tests, performance_tests, code_quality]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v3
- name: Generate comprehensive report
run: |
echo "# 🤖 MEV Bot Parser Validation Report" > validation_report.md
echo "" >> validation_report.md
echo "**Commit:** ${{ github.sha }}" >> validation_report.md
echo "**Date:** $(date)" >> validation_report.md
echo "**Triggered by:** ${{ github.event_name }}" >> validation_report.md
echo "" >> validation_report.md
echo "## 📊 Test Results" >> validation_report.md
echo "| Test Suite | Status |" >> validation_report.md
echo "|------------|--------|" >> validation_report.md
echo "| Unit Tests | ${{ needs.unit_tests.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
echo "| Golden File Tests | ${{ needs.golden_file_tests.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
echo "| Performance Tests | ${{ needs.performance_tests.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
echo "| Code Quality | ${{ needs.code_quality.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
if [[ "${{ needs.fuzzing_tests.result }}" != "skipped" ]]; then
echo "| Fuzzing Tests | ${{ needs.fuzzing_tests.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
fi
if [[ "${{ needs.integration_tests.result }}" != "skipped" ]]; then
echo "| Integration Tests | ${{ needs.integration_tests.result == 'success' && '✅ PASSED' || '❌ FAILED' }} |" >> validation_report.md
fi
echo "" >> validation_report.md
echo "## 🎯 Key Validation Points" >> validation_report.md
echo "- ✅ Parser handles all major DEX protocols (Uniswap V2/V3, SushiSwap, etc.)" >> validation_report.md
echo "- ✅ Accurate parsing of swap amounts, fees, and addresses" >> validation_report.md
echo "- ✅ Robust handling of edge cases and malformed data" >> validation_report.md
echo "- ✅ Performance meets production requirements (>1000 tx/s)" >> validation_report.md
echo "- ✅ Memory usage within acceptable limits" >> validation_report.md
echo "- ✅ No security vulnerabilities detected" >> validation_report.md
echo "" >> validation_report.md
# Overall status
if [[ "${{ needs.unit_tests.result }}" == "success" &&
"${{ needs.golden_file_tests.result }}" == "success" &&
"${{ needs.performance_tests.result }}" == "success" &&
"${{ needs.code_quality.result }}" == "success" ]]; then
echo "## 🎉 Overall Status: PASSED ✅" >> validation_report.md
echo "The MEV bot parser has passed all validation tests and is ready for production use." >> validation_report.md
else
echo "## ⚠️ Overall Status: FAILED ❌" >> validation_report.md
echo "Some validation tests failed. Please review the failed tests and fix issues before proceeding." >> validation_report.md
fi
cat validation_report.md
- name: Upload validation report
uses: actions/upload-artifact@v3
with:
name: validation-report-${{ github.sha }}
path: validation_report.md
- name: Comment on PR (if applicable)
uses: actions/github-script@v6
if: github.event_name == 'pull_request'
with:
script: |
const fs = require('fs');
const report = fs.readFileSync('validation_report.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: report
});

79
orig/.github/workflows/pipeline-dev.yml vendored Normal file
View File

@@ -0,0 +1,79 @@
name: Dev Pipeline
on:
workflow_dispatch:
workflow_call:
env:
GO_VERSION: '1.25'
jobs:
quick-checks:
name: Formatting & Static Checks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Check gofmt formatting
run: |
fmt_out=$(gofmt -l $(find . -name '*.go'))
if [[ -n "$fmt_out" ]]; then
echo "Following files need gofmt:" && echo "$fmt_out"
exit 1
fi
- name: Run go mod tidy check
run: |
go mod tidy
git diff --exit-code go.mod go.sum
- name: Run static vet
run: go vet ./...
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
needs: quick-checks
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Run targeted package tests
run: |
GOCACHE=$(pwd)/.gocache go test ./pkg/... ./internal/... -count=1
- name: Upload test cache (optional diagnostics)
if: always()
uses: actions/upload-artifact@v3
with:
name: dev-unit-cache
path: .gocache

View File

@@ -0,0 +1,80 @@
name: Test Pipeline
on:
workflow_dispatch:
workflow_call:
env:
GO_VERSION: '1.25'
jobs:
lint-and-unit:
name: Lint & Unit Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go toolchain
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout=10m
- name: Run go test (race, cover)
run: |
GOCACHE=$(pwd)/.gocache go test -race -coverprofile=coverage.out ./...
- name: Upload coverage
uses: actions/upload-artifact@v3
with:
name: unit-test-coverage
path: coverage.out
smoke-binary:
name: Build & Smoke Test Binary
runs-on: ubuntu-latest
needs: lint-and-unit
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go build cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-go-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-${{ env.GO_VERSION }}-
- name: Build binary
run: go build -o bin/mev-bot ./cmd/mev-bot
- name: Smoke test startup
run: |
export MEV_BOT_ENCRYPTION_KEY="test_key_32_chars_minimum_length"
timeout 5s ./bin/mev-bot start || true
echo "✓ Binary builds and starts"

256
orig/.github/workflows/security.yml vendored Normal file
View File

@@ -0,0 +1,256 @@
name: Audit Pipeline
on:
workflow_dispatch:
workflow_call:
env:
GO_VERSION: '1.25'
jobs:
static-analysis:
name: Static Security Analysis
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go toolchain
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-audit-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-audit-${{ env.GO_VERSION }}-
- name: Download dependencies
run: go mod download
- name: Run gosec Security Scanner
uses: securecodewarrior/github-action-gosec@master
with:
args: '-fmt sarif -out gosec-results.sarif ./...'
continue-on-error: true
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@v2
if: always()
with:
sarif_file: gosec-results.sarif
- name: Run govulncheck
run: |
go install golang.org/x/vuln/cmd/govulncheck@latest
govulncheck ./...
- name: Run golangci-lint (security focus)
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --enable=gosec,gocritic,ineffassign,misspell,unparam --timeout=10m
dependency-scan:
name: Dependency Vulnerability Scan
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go modules
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-audit-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-audit-${{ env.GO_VERSION }}-
- name: Run Nancy (Dependency Vulnerability Scanner)
run: |
go install github.com/sonatypecommunity/nancy@latest
go list -json -m all | nancy sleuth --exclude-vulnerability-file .nancy-ignore
- name: Generate dependency report
run: |
echo "# Dependency Security Report" > dependency-report.md
echo "Generated on: $(date)" >> dependency-report.md
echo "" >> dependency-report.md
echo "## Direct Dependencies" >> dependency-report.md
go list -m all | grep -v "^github.com/fraktal/mev-beta" >> dependency-report.md
- name: Upload dependency report
uses: actions/upload-artifact@v3
with:
name: dependency-report
path: dependency-report.md
security-tests:
name: Security Tests & Fuzzing
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-audit-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-audit-${{ env.GO_VERSION }}-
- name: Create required directories
run: |
mkdir -p logs keystore test_keystore benchmark_keystore test_concurrent_keystore
- name: Run security unit tests
run: go test -v -race ./pkg/security/
- name: Run fuzzing tests (short)
run: |
go test -fuzz=FuzzRPCResponseParser -fuzztime=30s ./pkg/security/
go test -fuzz=FuzzKeyValidation -fuzztime=30s ./pkg/security/
go test -fuzz=FuzzInputValidator -fuzztime=30s ./pkg/security/
- name: Run race condition tests
run: go test -race -run=TestConcurrent ./...
- name: Run security benchmarks
run: go test -bench=BenchmarkSecurity -benchmem ./pkg/security/
integration-security:
name: Integration Security Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Restore Go cache
uses: actions/cache@v3
with:
path: |
~/go/pkg/mod
~/.cache/go-build
key: ${{ runner.os }}-audit-${{ env.GO_VERSION }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-audit-${{ env.GO_VERSION }}-
- name: Create required directories and files
run: |
mkdir -p logs keystore
echo "MEV_BOT_ENCRYPTION_KEY=integration_test_key_32_characters" > .env.test
- name: Test encryption key validation
run: |
export MEV_BOT_ENCRYPTION_KEY="test123"
if go run cmd/mev-bot/main.go 2>&1 | grep -q "production encryption key"; then
echo "✓ Weak encryption key properly rejected"
else
echo "✗ Weak encryption key not rejected"
exit 1
fi
- name: Test with proper encryption key
run: |
export MEV_BOT_ENCRYPTION_KEY="proper_production_key_32_chars_min"
timeout 10s go run cmd/mev-bot/main.go || true
echo "✓ Application accepts strong encryption key"
- name: Test configuration security
run: |
echo "Testing keystore security..."
export MEV_BOT_KEYSTORE_PATH="/tmp/insecure"
if go run cmd/mev-bot/main.go 2>&1 | grep -q "publicly accessible"; then
echo "✓ Insecure keystore path properly rejected"
else
echo "Warning: Insecure keystore path validation may need improvement"
fi
secret-scanning:
name: Secret Scanning
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run TruffleHog for secret detection
uses: trufflesecurity/trufflehog@main
with:
path: ./
base: main
head: HEAD
- name: Check for hardcoded secrets
run: |
echo "Scanning for potential hardcoded secrets..."
if grep -r -i "password.*=" --include="*.go" --include="*.yaml" --include="*.yml" . | grep -v "PASSWORD_PLACEHOLDER"; then
echo "Warning: Found potential hardcoded passwords"
fi
if grep -r -i "secret.*=" --include="*.go" --include="*.yaml" --include="*.yml" . | grep -v "SECRET_PLACEHOLDER"; then
echo "Warning: Found potential hardcoded secrets"
fi
if grep -r -i "key.*=" --include="*.go" --include="*.yaml" --include="*.yml" . | grep -v -E "(public|test|example|placeholder)"; then
echo "Warning: Found potential hardcoded keys"
fi
echo "Secret scan completed"
security-report:
name: Generate Security Report
needs: [static-analysis, dependency-scan, security-tests, integration-security, secret-scanning]
runs-on: ubuntu-latest
if: always()
steps:
- uses: actions/checkout@v4
- name: Generate comprehensive security report
run: |
cat > security-report.md << 'EOF'
# MEV Bot Security Report
**Commit**: ${{ github.sha }}
**Branch**: ${{ github.ref_name }}
**Generated**: $(date -u)
## Summary
- Static analysis: ${{ needs.static-analysis.result }}
- Dependency scan: ${{ needs.dependency-scan.result }}
- Security tests: ${{ needs.security-tests.result }}
- Integration security: ${{ needs.integration-security.result }}
- Secret scanning: ${{ needs.secret-scanning.result }}
## Next Actions
- Review SARIF results uploaded under artifacts `gosec-results`
- Review dependency-report artifact for vulnerable modules
- Address any warnings surfaced in logs
EOF
- name: Upload security report
uses: actions/upload-artifact@v3
with:
name: security-report
path: security-report.md