Restructured project for V2 refactor: **Structure Changes:** - Moved all V1 code to orig/ folder (preserved with git mv) - Created docs/planning/ directory - Added orig/README_V1.md explaining V1 preservation **Planning Documents:** - 00_V2_MASTER_PLAN.md: Complete architecture overview - Executive summary of critical V1 issues - High-level component architecture diagrams - 5-phase implementation roadmap - Success metrics and risk mitigation - 07_TASK_BREAKDOWN.md: Atomic task breakdown - 99+ hours of detailed tasks - Every task < 2 hours (atomic) - Clear dependencies and success criteria - Organized by implementation phase **V2 Key Improvements:** - Per-exchange parsers (factory pattern) - Multi-layer strict validation - Multi-index pool cache - Background validation pipeline - Comprehensive observability **Critical Issues Addressed:** - Zero address tokens (strict validation + cache enrichment) - Parsing accuracy (protocol-specific parsers) - No audit trail (background validation channel) - Inefficient lookups (multi-index cache) - Stats disconnection (event-driven metrics) Next Steps: 1. Review planning documents 2. Begin Phase 1: Foundation (P1-001 through P1-010) 3. Implement parsers in Phase 2 4. Build cache system in Phase 3 5. Add validation pipeline in Phase 4 6. Migrate and test in Phase 5 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
391 lines
11 KiB
Go
391 lines
11 KiB
Go
package security
|
|
|
|
import (
|
|
"encoding/json"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
"github.com/stretchr/testify/require"
|
|
)
|
|
|
|
func TestNewSecurityDashboard(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
AlertBuffer: 1000,
|
|
MaxEvents: 1000,
|
|
CleanupInterval: time.Hour,
|
|
MetricsInterval: 30 * time.Second,
|
|
})
|
|
|
|
// Test with default config
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
assert.NotNil(t, dashboard)
|
|
assert.NotNil(t, dashboard.config)
|
|
assert.Equal(t, 30*time.Second, dashboard.config.RefreshInterval)
|
|
|
|
// Test with custom config
|
|
customConfig := &DashboardConfig{
|
|
RefreshInterval: time.Minute,
|
|
AlertThresholds: map[string]float64{
|
|
"test_metric": 0.5,
|
|
},
|
|
EnabledWidgets: []string{"overview"},
|
|
ExportFormat: "json",
|
|
}
|
|
|
|
dashboard2 := NewSecurityDashboard(monitor, customConfig)
|
|
assert.NotNil(t, dashboard2)
|
|
assert.Equal(t, time.Minute, dashboard2.config.RefreshInterval)
|
|
assert.Equal(t, 0.5, dashboard2.config.AlertThresholds["test_metric"])
|
|
}
|
|
|
|
func TestGenerateDashboard(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
AlertBuffer: 1000,
|
|
MaxEvents: 1000,
|
|
CleanupInterval: time.Hour,
|
|
MetricsInterval: 30 * time.Second,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
// Generate some test data
|
|
monitor.RecordEvent("request", "127.0.0.1", "Test request", "info", map[string]interface{}{
|
|
"success": true,
|
|
})
|
|
|
|
data, err := dashboard.GenerateDashboard()
|
|
require.NoError(t, err)
|
|
assert.NotNil(t, data)
|
|
assert.NotNil(t, data.OverviewMetrics)
|
|
assert.NotNil(t, data.ThreatAnalysis)
|
|
assert.NotNil(t, data.PerformanceData)
|
|
assert.NotNil(t, data.TrendAnalysis)
|
|
assert.NotNil(t, data.SystemHealth)
|
|
}
|
|
|
|
func TestOverviewMetrics(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
overview := dashboard.generateOverviewMetrics(metrics)
|
|
assert.NotNil(t, overview)
|
|
assert.GreaterOrEqual(t, overview.SecurityScore, 0.0)
|
|
assert.LessOrEqual(t, overview.SecurityScore, 100.0)
|
|
assert.Contains(t, []string{"LOW", "MEDIUM", "HIGH", "CRITICAL"}, overview.ThreatLevel)
|
|
assert.GreaterOrEqual(t, overview.SuccessRate, 0.0)
|
|
assert.LessOrEqual(t, overview.SuccessRate, 100.0)
|
|
}
|
|
|
|
func TestThreatAnalysis(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
threatAnalysis := dashboard.generateThreatAnalysis(metrics)
|
|
assert.NotNil(t, threatAnalysis)
|
|
assert.GreaterOrEqual(t, threatAnalysis.DDoSRisk, 0.0)
|
|
assert.LessOrEqual(t, threatAnalysis.DDoSRisk, 1.0)
|
|
assert.GreaterOrEqual(t, threatAnalysis.BruteForceRisk, 0.0)
|
|
assert.LessOrEqual(t, threatAnalysis.BruteForceRisk, 1.0)
|
|
assert.GreaterOrEqual(t, threatAnalysis.AnomalyScore, 0.0)
|
|
assert.LessOrEqual(t, threatAnalysis.AnomalyScore, 1.0)
|
|
assert.NotNil(t, threatAnalysis.MitigationStatus)
|
|
assert.NotNil(t, threatAnalysis.ThreatVectors)
|
|
}
|
|
|
|
func TestPerformanceMetrics(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
performance := dashboard.generatePerformanceMetrics(metrics)
|
|
assert.NotNil(t, performance)
|
|
assert.Greater(t, performance.AverageValidationTime, 0.0)
|
|
assert.Greater(t, performance.AverageEncryptionTime, 0.0)
|
|
assert.Greater(t, performance.AverageDecryptionTime, 0.0)
|
|
assert.GreaterOrEqual(t, performance.ErrorRate, 0.0)
|
|
assert.LessOrEqual(t, performance.ErrorRate, 100.0)
|
|
}
|
|
|
|
func TestDashboardSystemHealth(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
health := dashboard.generateSystemHealth(metrics)
|
|
assert.NotNil(t, health)
|
|
assert.NotNil(t, health.SecurityComponentHealth)
|
|
assert.Contains(t, []string{"HEALTHY", "WARNING", "DEGRADED", "CRITICAL"}, health.OverallHealth)
|
|
assert.GreaterOrEqual(t, health.HealthScore, 0.0)
|
|
assert.LessOrEqual(t, health.HealthScore, 100.0)
|
|
}
|
|
|
|
func TestTopThreats(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
topThreats := dashboard.generateTopThreats(metrics)
|
|
assert.NotNil(t, topThreats)
|
|
assert.LessOrEqual(t, len(topThreats), 10) // Should be reasonable number
|
|
|
|
for _, threat := range topThreats {
|
|
assert.NotEmpty(t, threat.ThreatType)
|
|
assert.GreaterOrEqual(t, threat.Count, int64(0))
|
|
assert.Contains(t, []string{"NONE", "LOW", "MEDIUM", "HIGH", "CRITICAL"}, threat.Severity)
|
|
assert.Contains(t, []string{"ACTIVE", "MITIGATED", "MONITORING"}, threat.Status)
|
|
}
|
|
}
|
|
|
|
func TestTrendAnalysis(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
metrics := monitor.GetMetrics()
|
|
|
|
trends := dashboard.generateTrendAnalysis(metrics)
|
|
assert.NotNil(t, trends)
|
|
assert.NotNil(t, trends.HourlyTrends)
|
|
assert.NotNil(t, trends.DailyTrends)
|
|
assert.NotNil(t, trends.Predictions)
|
|
assert.NotNil(t, trends.GrowthRates)
|
|
|
|
// Check hourly trends have expected structure
|
|
if requestTrends, exists := trends.HourlyTrends["requests"]; exists {
|
|
assert.LessOrEqual(t, len(requestTrends), 24) // Should have at most 24 hours
|
|
for _, point := range requestTrends {
|
|
assert.GreaterOrEqual(t, point.Value, 0.0)
|
|
assert.False(t, point.Timestamp.IsZero())
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExportDashboard(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
// Test JSON export
|
|
jsonData, err := dashboard.ExportDashboard("json")
|
|
require.NoError(t, err)
|
|
assert.NotEmpty(t, jsonData)
|
|
|
|
// Verify it's valid JSON
|
|
var parsed DashboardData
|
|
err = json.Unmarshal(jsonData, &parsed)
|
|
require.NoError(t, err)
|
|
|
|
// Test CSV export
|
|
csvData, err := dashboard.ExportDashboard("csv")
|
|
require.NoError(t, err)
|
|
assert.NotEmpty(t, csvData)
|
|
assert.Contains(t, string(csvData), "Metric,Value,Timestamp")
|
|
|
|
// Test Prometheus export
|
|
promData, err := dashboard.ExportDashboard("prometheus")
|
|
require.NoError(t, err)
|
|
assert.NotEmpty(t, promData)
|
|
assert.Contains(t, string(promData), "# HELP")
|
|
assert.Contains(t, string(promData), "# TYPE")
|
|
|
|
// Test unsupported format
|
|
_, err = dashboard.ExportDashboard("unsupported")
|
|
assert.Error(t, err)
|
|
assert.Contains(t, err.Error(), "unsupported export format")
|
|
}
|
|
|
|
func TestSecurityScoreCalculation(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
// Test with clean metrics (high score)
|
|
cleanMetrics := &SecurityMetrics{
|
|
TotalRequests: 1000,
|
|
BlockedRequests: 0,
|
|
DDoSAttempts: 0,
|
|
BruteForceAttempts: 0,
|
|
RateLimitViolations: 0,
|
|
}
|
|
score := dashboard.calculateSecurityScore(cleanMetrics)
|
|
assert.Equal(t, 100.0, score)
|
|
|
|
// Test with some threats (reduced score)
|
|
threatsMetrics := &SecurityMetrics{
|
|
TotalRequests: 1000,
|
|
BlockedRequests: 50,
|
|
DDoSAttempts: 10,
|
|
BruteForceAttempts: 5,
|
|
RateLimitViolations: 20,
|
|
}
|
|
score = dashboard.calculateSecurityScore(threatsMetrics)
|
|
assert.Less(t, score, 100.0)
|
|
assert.GreaterOrEqual(t, score, 0.0)
|
|
}
|
|
|
|
func TestThreatLevelCalculation(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
testCases := []struct {
|
|
score float64
|
|
expected string
|
|
}{
|
|
{95.0, "LOW"},
|
|
{85.0, "MEDIUM"},
|
|
{60.0, "HIGH"},
|
|
{30.0, "CRITICAL"},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
result := dashboard.calculateThreatLevel(tc.score)
|
|
assert.Equal(t, tc.expected, result, "Score %.1f should give threat level %s", tc.score, tc.expected)
|
|
}
|
|
}
|
|
|
|
func TestWidgetConfiguration(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
// Test with limited widgets
|
|
config := &DashboardConfig{
|
|
EnabledWidgets: []string{"overview", "alerts"},
|
|
}
|
|
|
|
dashboard := NewSecurityDashboard(monitor, config)
|
|
|
|
assert.True(t, dashboard.isWidgetEnabled("overview"))
|
|
assert.True(t, dashboard.isWidgetEnabled("alerts"))
|
|
assert.False(t, dashboard.isWidgetEnabled("threats"))
|
|
assert.False(t, dashboard.isWidgetEnabled("performance"))
|
|
|
|
// Generate dashboard with limited widgets
|
|
data, err := dashboard.GenerateDashboard()
|
|
require.NoError(t, err)
|
|
|
|
assert.NotNil(t, data.OverviewMetrics)
|
|
assert.NotNil(t, data.SecurityAlerts)
|
|
assert.Nil(t, data.ThreatAnalysis) // Should be nil because "threats" widget is disabled
|
|
assert.Nil(t, data.PerformanceData) // Should be nil because "performance" widget is disabled
|
|
}
|
|
|
|
func TestAttackPatternDetection(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
// Test with metrics showing DDoS activity
|
|
metrics := &SecurityMetrics{
|
|
DDoSAttempts: 25,
|
|
BruteForceAttempts: 0,
|
|
}
|
|
|
|
patterns := dashboard.detectAttackPatterns(metrics)
|
|
assert.NotEmpty(t, patterns)
|
|
|
|
ddosPattern := patterns[0]
|
|
assert.Equal(t, "DDoS", ddosPattern.PatternType)
|
|
assert.Equal(t, int64(25), ddosPattern.Frequency)
|
|
assert.Equal(t, "HIGH", ddosPattern.Severity)
|
|
assert.GreaterOrEqual(t, ddosPattern.Confidence, 0.0)
|
|
assert.LessOrEqual(t, ddosPattern.Confidence, 1.0)
|
|
assert.NotEmpty(t, ddosPattern.Description)
|
|
}
|
|
|
|
func TestRiskFactorIdentification(t *testing.T) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
// Test with various risk scenarios
|
|
riskMetrics := &SecurityMetrics{
|
|
DDoSAttempts: 15,
|
|
BruteForceAttempts: 8,
|
|
RateLimitViolations: 150,
|
|
FailedKeyAccess: 12,
|
|
}
|
|
|
|
factors := dashboard.identifyRiskFactors(riskMetrics)
|
|
assert.NotEmpty(t, factors)
|
|
assert.Contains(t, factors, "High DDoS activity")
|
|
assert.Contains(t, factors, "Brute force attacks detected")
|
|
assert.Contains(t, factors, "Excessive rate limit violations")
|
|
assert.Contains(t, factors, "Multiple failed key access attempts")
|
|
|
|
// Test with clean metrics
|
|
cleanMetrics := &SecurityMetrics{
|
|
DDoSAttempts: 0,
|
|
BruteForceAttempts: 0,
|
|
RateLimitViolations: 5,
|
|
FailedKeyAccess: 2,
|
|
}
|
|
|
|
cleanFactors := dashboard.identifyRiskFactors(cleanMetrics)
|
|
assert.Empty(t, cleanFactors)
|
|
}
|
|
|
|
func BenchmarkGenerateDashboard(b *testing.B) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := dashboard.GenerateDashboard()
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func BenchmarkExportJSON(b *testing.B) {
|
|
monitor := NewSecurityMonitor(&MonitorConfig{
|
|
EnableAlerts: true,
|
|
})
|
|
|
|
dashboard := NewSecurityDashboard(monitor, nil)
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
_, err := dashboard.ExportDashboard("json")
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
}
|
|
}
|