saving in place

This commit is contained in:
Krypto Kajun
2025-10-04 09:31:02 -05:00
parent 76c1b5cee1
commit f358f49aa9
295 changed files with 72071 additions and 17209 deletions

View File

@@ -552,12 +552,59 @@ func (bs *BenchmarkSuite) generateAnalysis() ReportAnalysis {
}
func (bs *BenchmarkSuite) analyzeScalability() ScalabilityAnalysis {
// Simplified scalability analysis
// In a real implementation, you'd do more sophisticated analysis
if len(bs.results) < 2 {
return ScalabilityAnalysis{
LinearScaling: false,
ScalingFactor: 0.0,
OptimalConcurrency: 1,
}
}
// Analyze throughput vs concurrency relationship
var throughputData []float64
var concurrencyData []int
for _, result := range bs.results {
if result.Concurrency > 0 && result.Duration > 0 {
throughput := float64(result.MessagesReceived) / result.Duration.Seconds()
throughputData = append(throughputData, throughput)
concurrencyData = append(concurrencyData, result.Concurrency)
}
}
if len(throughputData) < 2 {
return ScalabilityAnalysis{
LinearScaling: false,
ScalingFactor: 0.0,
OptimalConcurrency: 1,
}
}
// Calculate scaling efficiency
// Compare actual throughput improvement with ideal linear scaling
maxThroughput := 0.0
maxThroughputConcurrency := 1
baseThroughput := throughputData[0]
baseConcurrency := float64(concurrencyData[0])
for i, throughput := range throughputData {
if throughput > maxThroughput {
maxThroughput = throughput
maxThroughputConcurrency = concurrencyData[i]
}
}
// Calculate scaling factor (actual vs ideal)
idealThroughput := baseThroughput * float64(maxThroughputConcurrency) / baseConcurrency
actualScalingFactor := maxThroughput / idealThroughput
// Determine if scaling is linear (within 20% of ideal)
linearScaling := actualScalingFactor >= 0.8
return ScalabilityAnalysis{
LinearScaling: true, // Placeholder
ScalingFactor: 0.85, // Placeholder
OptimalConcurrency: 50, // Placeholder
LinearScaling: linearScaling,
ScalingFactor: actualScalingFactor,
OptimalConcurrency: maxThroughputConcurrency,
}
}

View File

@@ -1,8 +1,14 @@
package transport
import (
"bytes"
"compress/gzip"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@@ -94,7 +100,7 @@ func (fpl *FilePersistenceLayer) Store(msg *Message) error {
// Create directory if it doesn't exist
topicDir := filepath.Join(fpl.basePath, msg.Topic)
if err := os.MkdirAll(topicDir, 0755); err != nil {
if err := os.MkdirAll(topicDir, 0750); err != nil {
return fmt.Errorf("failed to create topic directory: %w", err)
}
@@ -139,7 +145,7 @@ func (fpl *FilePersistenceLayer) Store(msg *Message) error {
}
// Write to file
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return fmt.Errorf("failed to open file: %w", err)
}
@@ -193,10 +199,26 @@ func (fpl *FilePersistenceLayer) Delete(id string) error {
fpl.mu.Lock()
defer fpl.mu.Unlock()
// This is a simplified implementation
// In a production system, you might want to mark messages as deleted
// and compact files periodically instead of rewriting entire files
return fmt.Errorf("delete operation not yet implemented")
// Search all topic directories to find the message
topicDirs, err := fpl.getTopicDirectories()
if err != nil {
return fmt.Errorf("failed to get topic directories: %w", err)
}
for _, topicDir := range topicDirs {
files, err := fpl.getTopicFiles(topicDir)
if err != nil {
continue
}
for _, file := range files {
if err := fpl.deleteMessageFromFile(file, id); err == nil {
return nil // Successfully deleted
}
}
}
return fmt.Errorf("message not found: %s", id)
}
// List returns messages for a topic with optional limit
@@ -483,25 +505,203 @@ func (fpl *FilePersistenceLayer) isDirectoryEmpty(dir string) (bool, error) {
}
func (fpl *FilePersistenceLayer) encrypt(data []byte) ([]byte, error) {
// Placeholder for encryption implementation
// In a real implementation, you would use proper encryption libraries
return data, nil
if !fpl.encryption.Enabled || len(fpl.encryption.Key) == 0 {
return data, nil
}
// Create cipher block
block, err := aes.NewCipher(fpl.encryption.Key)
if err != nil {
return nil, fmt.Errorf("failed to create cipher: %w", err)
}
// Generate random nonce
nonce := make([]byte, 12) // GCM standard nonce size
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
return nil, fmt.Errorf("failed to generate nonce: %w", err)
}
// Create GCM mode
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, fmt.Errorf("failed to create GCM: %w", err)
}
// Encrypt and authenticate
ciphertext := gcm.Seal(nil, nonce, data, nil)
// Prepend nonce to ciphertext
result := make([]byte, len(nonce)+len(ciphertext))
copy(result, nonce)
copy(result[len(nonce):], ciphertext)
return result, nil
}
func (fpl *FilePersistenceLayer) decrypt(data []byte) ([]byte, error) {
// Placeholder for decryption implementation
return data, nil
if !fpl.encryption.Enabled || len(fpl.encryption.Key) == 0 {
return data, nil
}
if len(data) < 12 {
return nil, fmt.Errorf("encrypted data too short")
}
// Create cipher block
block, err := aes.NewCipher(fpl.encryption.Key)
if err != nil {
return nil, fmt.Errorf("failed to create cipher: %w", err)
}
// Create GCM mode
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, fmt.Errorf("failed to create GCM: %w", err)
}
// Extract nonce and ciphertext
nonce := data[:12]
ciphertext := data[12:]
// Decrypt and verify
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
if err != nil {
return nil, fmt.Errorf("decryption failed: %w", err)
}
return plaintext, nil
}
func (fpl *FilePersistenceLayer) compress(data []byte) ([]byte, error) {
// Placeholder for compression implementation
// In a real implementation, you would use libraries like gzip
return data, nil
var buf bytes.Buffer
gzWriter := gzip.NewWriter(&buf)
if _, err := gzWriter.Write(data); err != nil {
gzWriter.Close()
return nil, fmt.Errorf("compression failed: %w", err)
}
if err := gzWriter.Close(); err != nil {
return nil, fmt.Errorf("failed to close gzip writer: %w", err)
}
return buf.Bytes(), nil
}
func (fpl *FilePersistenceLayer) decompress(data []byte) ([]byte, error) {
// Placeholder for decompression implementation
return data, nil
buf := bytes.NewReader(data)
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, fmt.Errorf("failed to create gzip reader: %w", err)
}
defer gzReader.Close()
decompressed, err := ioutil.ReadAll(gzReader)
if err != nil {
return nil, fmt.Errorf("decompression failed: %w", err)
}
return decompressed, nil
}
// deleteMessageFromFile removes a specific message from a file
func (fpl *FilePersistenceLayer) deleteMessageFromFile(filename, messageID string) error {
// Read all messages from file
messages, err := fpl.readMessagesFromFile(filename)
if err != nil {
return fmt.Errorf("failed to read messages from file: %w", err)
}
// Check if message exists in this file
found := false
var filteredMessages []*Message
for _, msg := range messages {
if msg.ID != messageID {
filteredMessages = append(filteredMessages, msg)
} else {
found = true
}
}
if !found {
return fmt.Errorf("message not found in file")
}
// If no messages remain, delete the file
if len(filteredMessages) == 0 {
return os.Remove(filename)
}
// Rewrite file with remaining messages
return fpl.rewriteFileWithMessages(filename, filteredMessages)
}
// rewriteFileWithMessages rewrites a file with the given messages
func (fpl *FilePersistenceLayer) rewriteFileWithMessages(filename string, messages []*Message) error {
// Create temporary file
tempFile := filename + ".tmp"
file, err := os.Create(tempFile)
if err != nil {
return fmt.Errorf("failed to create temp file: %w", err)
}
defer file.Close()
// Write each message to temp file
for _, msg := range messages {
// Create persisted message
persistedMsg := &PersistedMessage{
ID: msg.ID,
Topic: msg.Topic,
Message: msg,
Stored: time.Now(),
Metadata: make(map[string]interface{}),
}
// Serialize message
data, err := json.Marshal(persistedMsg)
if err != nil {
os.Remove(tempFile)
return fmt.Errorf("failed to marshal message: %w", err)
}
// Apply encryption if enabled
if fpl.encryption.Enabled {
encryptedData, err := fpl.encrypt(data)
if err != nil {
os.Remove(tempFile)
return fmt.Errorf("encryption failed: %w", err)
}
data = encryptedData
persistedMsg.Encrypted = true
}
// Apply compression if enabled
if fpl.compression {
compressedData, err := fpl.compress(data)
if err != nil {
os.Remove(tempFile)
return fmt.Errorf("compression failed: %w", err)
}
data = compressedData
}
// Write length prefix and data
lengthPrefix := fmt.Sprintf("%d\n", len(data))
if _, err := file.WriteString(lengthPrefix); err != nil {
os.Remove(tempFile)
return fmt.Errorf("failed to write length prefix: %w", err)
}
if _, err := file.Write(data); err != nil {
os.Remove(tempFile)
return fmt.Errorf("failed to write data: %w", err)
}
}
// Close temp file before rename
file.Close()
// Replace original file with temp file
return os.Rename(tempFile, filename)
}
// InMemoryPersistenceLayer implements in-memory persistence for testing/development

View File

@@ -0,0 +1,509 @@
package transport
import (
"context"
"fmt"
"net/http"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
"golang.org/x/time/rate"
"gopkg.in/yaml.v3"
)
// ProviderConfig represents a single RPC provider configuration
type ProviderConfig struct {
Name string `yaml:"name"`
Type string `yaml:"type"`
HTTPEndpoint string `yaml:"http_endpoint"`
WSEndpoint string `yaml:"ws_endpoint"`
Priority int `yaml:"priority"`
RateLimit RateLimitConfig `yaml:"rate_limit"`
Features []string `yaml:"features"`
HealthCheck HealthCheckConfig `yaml:"health_check"`
AnvilConfig *AnvilConfig `yaml:"anvil_config,omitempty"` // For Anvil fork providers
}
// AnvilConfig defines Anvil-specific configuration
type AnvilConfig struct {
ForkURL string `yaml:"fork_url"`
ChainID int `yaml:"chain_id"`
Port int `yaml:"port"`
BlockTime int `yaml:"block_time"`
AutoImpersonate bool `yaml:"auto_impersonate"`
StateInterval int `yaml:"state_interval"`
}
// RateLimitConfig defines rate limiting parameters
type RateLimitConfig struct {
RequestsPerSecond int `yaml:"requests_per_second"`
Burst int `yaml:"burst"`
Timeout time.Duration `yaml:"timeout"`
RetryDelay time.Duration `yaml:"retry_delay"`
MaxRetries int `yaml:"max_retries"`
}
// HealthCheckConfig defines health check parameters
type HealthCheckConfig struct {
Enabled bool `yaml:"enabled"`
Interval time.Duration `yaml:"interval"`
Timeout time.Duration `yaml:"timeout"`
}
// RotationConfig defines provider rotation strategy
type RotationConfig struct {
Strategy string `yaml:"strategy"`
HealthCheckRequired bool `yaml:"health_check_required"`
FallbackEnabled bool `yaml:"fallback_enabled"`
RetryFailedAfter time.Duration `yaml:"retry_failed_after"`
}
// ProviderPoolConfig defines configuration for a provider pool
type ProviderPoolConfig struct {
Strategy string `yaml:"strategy"`
MaxConcurrentConnections int `yaml:"max_concurrent_connections"`
HealthCheckInterval string `yaml:"health_check_interval"`
FailoverEnabled bool `yaml:"failover_enabled"`
Providers []string `yaml:"providers"`
}
// ProvidersConfig represents the complete provider configuration
type ProvidersConfig struct {
ProviderPools map[string]ProviderPoolConfig `yaml:"provider_pools"`
Providers []ProviderConfig `yaml:"providers"`
Rotation RotationConfig `yaml:"rotation"`
GlobalLimits GlobalLimits `yaml:"global_limits"`
Monitoring MonitoringConfig `yaml:"monitoring"`
}
// GlobalLimits defines global connection limits
type GlobalLimits struct {
MaxConcurrentConnections int `yaml:"max_concurrent_connections"`
ConnectionTimeout time.Duration `yaml:"connection_timeout"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
IdleTimeout time.Duration `yaml:"idle_timeout"`
}
// MonitoringConfig defines monitoring settings
type MonitoringConfig struct {
Enabled bool `yaml:"enabled"`
MetricsInterval time.Duration `yaml:"metrics_interval"`
LogSlowRequests bool `yaml:"log_slow_requests"`
SlowRequestThreshold time.Duration `yaml:"slow_request_threshold"`
TrackProviderPerformance bool `yaml:"track_provider_performance"`
}
// Provider represents an active RPC provider connection
type Provider struct {
Config ProviderConfig
HTTPClient *ethclient.Client
WSClient *ethclient.Client
RateLimiter *rate.Limiter
HTTPConn *rpc.Client
WSConn *rpc.Client
IsHealthy bool
LastHealthCheck time.Time
RequestCount int64
ErrorCount int64
AvgResponseTime time.Duration
mutex sync.RWMutex
}
// ProviderManager manages multiple RPC providers with rotation and failover
type ProviderManager struct {
providers []*Provider
config ProvidersConfig
currentProvider int
mutex sync.RWMutex
healthTicker *time.Ticker
metricsTicker *time.Ticker
stopChan chan struct{}
}
// NewProviderManager creates a new provider manager from configuration
func NewProviderManager(configPath string) (*ProviderManager, error) {
// Load configuration
config, err := LoadProvidersConfig(configPath)
if err != nil {
return nil, fmt.Errorf("failed to load provider config: %w", err)
}
pm := &ProviderManager{
config: config,
stopChan: make(chan struct{}),
}
// Initialize providers
if err := pm.initializeProviders(); err != nil {
return nil, fmt.Errorf("failed to initialize providers: %w", err)
}
// Start health checks and metrics collection
pm.startBackgroundTasks()
return pm, nil
}
// LoadProvidersConfig loads provider configuration from YAML file
func LoadProvidersConfig(path string) (ProvidersConfig, error) {
var config ProvidersConfig
// Read the YAML file
data, err := os.ReadFile(path)
if err != nil {
return config, fmt.Errorf("failed to read config file %s: %w", path, err)
}
// Unmarshal the YAML data
if err := yaml.Unmarshal(data, &config); err != nil {
return config, fmt.Errorf("failed to parse YAML config: %w", err)
}
// Validate the configuration
if err := validateConfig(&config); err != nil {
return config, fmt.Errorf("invalid configuration: %w", err)
}
return config, nil
}
// validateConfig validates the provider configuration
func validateConfig(config *ProvidersConfig) error {
if len(config.Providers) == 0 {
return fmt.Errorf("no providers configured")
}
for i, provider := range config.Providers {
if provider.Name == "" {
return fmt.Errorf("provider %d has no name", i)
}
if provider.HTTPEndpoint == "" && provider.WSEndpoint == "" {
return fmt.Errorf("provider %s has no endpoints", provider.Name)
}
if provider.RateLimit.RequestsPerSecond <= 0 {
return fmt.Errorf("provider %s has invalid rate limit", provider.Name)
}
}
return nil
}
// initializeProviders sets up all configured providers
func (pm *ProviderManager) initializeProviders() error {
pm.providers = make([]*Provider, 0, len(pm.config.Providers))
for _, providerConfig := range pm.config.Providers {
provider, err := createProvider(providerConfig)
if err != nil {
// Log error but continue with other providers
continue
}
pm.providers = append(pm.providers, provider)
}
if len(pm.providers) == 0 {
return fmt.Errorf("no providers successfully initialized")
}
return nil
}
// createProvider creates a new provider instance (shared utility function)
func createProvider(config ProviderConfig) (*Provider, error) {
// Create rate limiter
rateLimiter := rate.NewLimiter(
rate.Limit(config.RateLimit.RequestsPerSecond),
config.RateLimit.Burst,
)
provider := &Provider{
Config: config,
RateLimiter: rateLimiter,
IsHealthy: true, // Assume healthy until proven otherwise
}
// Initialize HTTP connection
if config.HTTPEndpoint != "" {
httpClient := &http.Client{
Timeout: config.RateLimit.Timeout, // Use config timeout
}
rpcClient, err := rpc.DialHTTPWithClient(config.HTTPEndpoint, httpClient)
if err != nil {
return nil, fmt.Errorf("failed to connect to HTTP endpoint %s: %w", config.HTTPEndpoint, err)
}
provider.HTTPConn = rpcClient
provider.HTTPClient = ethclient.NewClient(rpcClient)
}
// Initialize WebSocket connection
if config.WSEndpoint != "" {
wsClient, err := rpc.DialWebsocket(context.Background(), config.WSEndpoint, "")
if err != nil {
// Don't fail if WS connection fails, HTTP might still work
fmt.Printf("Warning: failed to connect to WebSocket endpoint %s: %v\n", config.WSEndpoint, err)
} else {
provider.WSConn = wsClient
provider.WSClient = ethclient.NewClient(wsClient)
}
}
return provider, nil
}
// GetHealthyProvider returns the next healthy provider based on rotation strategy
func (pm *ProviderManager) GetHealthyProvider() (*Provider, error) {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
if len(pm.providers) == 0 {
return nil, fmt.Errorf("no providers available")
}
switch pm.config.Rotation.Strategy {
case "round_robin":
return pm.getNextRoundRobin()
case "weighted":
return pm.getWeightedProvider()
case "priority_based":
return pm.getPriorityProvider()
default:
return pm.getNextRoundRobin()
}
}
// getNextRoundRobin implements round-robin provider selection
func (pm *ProviderManager) getNextRoundRobin() (*Provider, error) {
startIndex := pm.currentProvider
for i := 0; i < len(pm.providers); i++ {
index := (startIndex + i) % len(pm.providers)
provider := pm.providers[index]
if pm.isProviderUsable(provider) {
pm.currentProvider = (index + 1) % len(pm.providers)
return provider, nil
}
}
return nil, fmt.Errorf("no healthy providers available")
}
// getPriorityProvider returns the highest priority healthy provider
func (pm *ProviderManager) getPriorityProvider() (*Provider, error) {
var bestProvider *Provider
highestPriority := int(^uint(0) >> 1) // Max int
for _, provider := range pm.providers {
if pm.isProviderUsable(provider) && provider.Config.Priority < highestPriority {
bestProvider = provider
highestPriority = provider.Config.Priority
}
}
if bestProvider == nil {
return nil, fmt.Errorf("no healthy providers available")
}
return bestProvider, nil
}
// getWeightedProvider implements weighted provider selection based on performance
func (pm *ProviderManager) getWeightedProvider() (*Provider, error) {
// For now, fallback to priority-based selection
// In a full implementation, this would consider response times and success rates
return pm.getPriorityProvider()
}
// isProviderUsable checks if a provider is healthy and within rate limits
func (pm *ProviderManager) isProviderUsable(provider *Provider) bool {
provider.mutex.RLock()
defer provider.mutex.RUnlock()
// Check health status
if pm.config.Rotation.HealthCheckRequired && !provider.IsHealthy {
return false
}
// Check rate limit
if !provider.RateLimiter.Allow() {
return false
}
return true
}
// GetHTTPClient returns an HTTP client for the current provider
func (pm *ProviderManager) GetHTTPClient() (*ethclient.Client, error) {
provider, err := pm.GetHealthyProvider()
if err != nil {
return nil, err
}
if provider.HTTPClient == nil {
return nil, fmt.Errorf("provider %s has no HTTP client", provider.Config.Name)
}
return provider.HTTPClient, nil
}
// GetWSClient returns a WebSocket client for the current provider
func (pm *ProviderManager) GetWSClient() (*ethclient.Client, error) {
provider, err := pm.GetHealthyProvider()
if err != nil {
return nil, err
}
if provider.WSClient == nil {
return nil, fmt.Errorf("provider %s has no WebSocket client", provider.Config.Name)
}
return provider.WSClient, nil
}
// GetRPCClient returns a raw RPC client for advanced operations
func (pm *ProviderManager) GetRPCClient(preferWS bool) (*rpc.Client, error) {
provider, err := pm.GetHealthyProvider()
if err != nil {
return nil, err
}
if preferWS && provider.WSConn != nil {
return provider.WSConn, nil
}
if provider.HTTPConn != nil {
return provider.HTTPConn, nil
}
return nil, fmt.Errorf("provider %s has no available RPC client", provider.Config.Name)
}
// startBackgroundTasks starts health checking and metrics collection
func (pm *ProviderManager) startBackgroundTasks() {
// Start health checks
if pm.config.Monitoring.Enabled {
pm.healthTicker = time.NewTicker(time.Minute) // Default 1 minute
go pm.healthCheckLoop()
pm.metricsTicker = time.NewTicker(pm.config.Monitoring.MetricsInterval)
go pm.metricsLoop()
}
}
// healthCheckLoop periodically checks provider health
func (pm *ProviderManager) healthCheckLoop() {
for {
select {
case <-pm.healthTicker.C:
pm.performHealthChecks()
case <-pm.stopChan:
return
}
}
}
// metricsLoop periodically collects provider metrics
func (pm *ProviderManager) metricsLoop() {
for {
select {
case <-pm.metricsTicker.C:
pm.collectMetrics()
case <-pm.stopChan:
return
}
}
}
// performHealthChecks checks all providers' health
func (pm *ProviderManager) performHealthChecks() {
for _, provider := range pm.providers {
go pm.checkProviderHealth(provider)
}
}
// checkProviderHealth performs a health check on a single provider
func (pm *ProviderManager) checkProviderHealth(provider *Provider) {
if !provider.Config.HealthCheck.Enabled {
return
}
ctx, cancel := context.WithTimeout(context.Background(), provider.Config.HealthCheck.Timeout)
defer cancel()
start := time.Now()
var err error
// Try to get latest block number as health check
if provider.HTTPClient != nil {
_, err = provider.HTTPClient.BlockNumber(ctx)
} else if provider.WSClient != nil {
_, err = provider.WSClient.BlockNumber(ctx)
}
provider.mutex.Lock()
provider.LastHealthCheck = time.Now()
provider.IsHealthy = (err == nil)
if err == nil {
provider.AvgResponseTime = time.Since(start)
}
provider.mutex.Unlock()
}
// collectMetrics collects performance metrics
func (pm *ProviderManager) collectMetrics() {
// Implementation would collect and report metrics
// For now, just log basic stats
}
// Close shuts down the provider manager
func (pm *ProviderManager) Close() error {
close(pm.stopChan)
if pm.healthTicker != nil {
pm.healthTicker.Stop()
}
if pm.metricsTicker != nil {
pm.metricsTicker.Stop()
}
// Close all connections
for _, provider := range pm.providers {
if provider.HTTPConn != nil {
provider.HTTPConn.Close()
}
if provider.WSConn != nil {
provider.WSConn.Close()
}
}
return nil
}
// GetProviderStats returns current provider statistics
func (pm *ProviderManager) GetProviderStats() map[string]interface{} {
pm.mutex.RLock()
defer pm.mutex.RUnlock()
stats := make(map[string]interface{})
for _, provider := range pm.providers {
provider.mutex.RLock()
providerStats := map[string]interface{}{
"name": provider.Config.Name,
"healthy": provider.IsHealthy,
"last_health_check": provider.LastHealthCheck,
"request_count": provider.RequestCount,
"error_count": provider.ErrorCount,
"avg_response_time": provider.AvgResponseTime,
}
provider.mutex.RUnlock()
stats[provider.Config.Name] = providerStats
}
return stats
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,38 @@
package transport
import (
"crypto/rand"
"fmt"
"math/rand"
"math/big"
"sort"
"sync"
"time"
)
// cryptoRandInt returns a random integer in [0,n) using crypto/rand
func cryptoRandInt(n int) (int, error) {
if n <= 0 {
return 0, fmt.Errorf("n must be positive")
}
max := big.NewInt(int64(n))
randNum, err := rand.Int(rand.Reader, max)
if err != nil {
return 0, err
}
return int(randNum.Int64()), nil
}
// cryptoRandFloat returns a random float64 in [0.0,1.0) using crypto/rand
func cryptoRandFloat() (float64, error) {
// Generate a random number in [0, 2^53) and divide by 2^53
max := new(big.Int).Exp(big.NewInt(2), big.NewInt(53), nil) // 2^53
randNum, err := rand.Int(rand.Reader, max)
if err != nil {
return 0, err
}
return float64(randNum.Int64()) / float64(max.Int64()), nil
}
// MessageRouter handles intelligent message routing and transport selection
type MessageRouter struct {
rules []RoutingRule
@@ -261,12 +286,22 @@ func (lb *WeightedLoadBalancer) SelectTransport(transports []TransportType, msg
}
if totalWeight == 0 {
// Fall back to random selection
return transports[rand.Intn(len(transports))]
// Fall back to random selection using crypto/rand
idx, err := cryptoRandInt(len(transports))
if err != nil {
// Fallback to first transport if crypto/rand fails
return transports[0]
}
return transports[idx]
}
// Weighted random selection
target := rand.Float64() * totalWeight
targetF, err := cryptoRandFloat()
if err != nil {
// Fallback to first transport if crypto/rand fails
return transports[0]
}
target := targetF * totalWeight
current := 0.0
for _, transport := range transports {

View File

@@ -0,0 +1,316 @@
package transport
import (
"fmt"
"io/ioutil"
"github.com/ethereum/go-ethereum/ethclient"
"gopkg.in/yaml.v3"
)
// UnifiedProviderManager manages all provider pools (read-only, execution, testing)
type UnifiedProviderManager struct {
ReadOnlyPool *ReadOnlyProviderPool
ExecutionPool *ExecutionProviderPool
TestingPool *TestingProviderPool
config ProvidersConfig
providerConfigs map[string]ProviderConfig
}
// OperationMode defines the type of operation being performed
type OperationMode int
const (
ModeReadOnly OperationMode = iota
ModeExecution
ModeTesting
)
// NewUnifiedProviderManager creates a new unified provider manager
func NewUnifiedProviderManager(configPath string) (*UnifiedProviderManager, error) {
// Load configuration
config, err := LoadProvidersConfig(configPath)
if err != nil {
return nil, fmt.Errorf("failed to load provider config: %w", err)
}
// Create provider configs map for easy lookup
providerConfigs := make(map[string]ProviderConfig)
for _, provider := range config.Providers {
providerConfigs[provider.Name] = provider
}
manager := &UnifiedProviderManager{
config: config,
providerConfigs: providerConfigs,
}
// Initialize provider pools
if err := manager.initializePools(); err != nil {
return nil, fmt.Errorf("failed to initialize provider pools: %w", err)
}
return manager, nil
}
// initializePools initializes all provider pools based on configuration
func (upm *UnifiedProviderManager) initializePools() error {
var err error
// Initialize read-only pool if configured
if poolConfig, exists := upm.config.ProviderPools["read_only"]; exists {
upm.ReadOnlyPool, err = NewReadOnlyProviderPool(poolConfig, upm.providerConfigs)
if err != nil {
return fmt.Errorf("failed to initialize read-only pool: %w", err)
}
}
// Initialize execution pool if configured
if poolConfig, exists := upm.config.ProviderPools["execution"]; exists {
upm.ExecutionPool, err = NewExecutionProviderPool(poolConfig, upm.providerConfigs)
if err != nil {
return fmt.Errorf("failed to initialize execution pool: %w", err)
}
}
// Initialize testing pool if configured
if poolConfig, exists := upm.config.ProviderPools["testing"]; exists {
upm.TestingPool, err = NewTestingProviderPool(poolConfig, upm.providerConfigs)
if err != nil {
return fmt.Errorf("failed to initialize testing pool: %w", err)
}
}
return nil
}
// GetPoolForMode returns the appropriate provider pool for the given operation mode
func (upm *UnifiedProviderManager) GetPoolForMode(mode OperationMode) (ProviderPool, error) {
switch mode {
case ModeReadOnly:
if upm.ReadOnlyPool == nil {
return nil, fmt.Errorf("read-only pool not initialized")
}
return upm.ReadOnlyPool, nil
case ModeExecution:
if upm.ExecutionPool == nil {
return nil, fmt.Errorf("execution pool not initialized")
}
return upm.ExecutionPool, nil
case ModeTesting:
if upm.TestingPool == nil {
return nil, fmt.Errorf("testing pool not initialized")
}
return upm.TestingPool, nil
default:
return nil, fmt.Errorf("unknown operation mode: %d", mode)
}
}
// GetReadOnlyHTTPClient returns an HTTP client optimized for read-only operations
func (upm *UnifiedProviderManager) GetReadOnlyHTTPClient() (*ethclient.Client, error) {
if upm.ReadOnlyPool == nil {
return nil, fmt.Errorf("read-only pool not initialized")
}
return upm.ReadOnlyPool.GetHTTPClient()
}
// GetReadOnlyWSClient returns a WebSocket client for real-time data
func (upm *UnifiedProviderManager) GetReadOnlyWSClient() (*ethclient.Client, error) {
if upm.ReadOnlyPool == nil {
return nil, fmt.Errorf("read-only pool not initialized")
}
return upm.ReadOnlyPool.GetWSClient()
}
// GetExecutionHTTPClient returns an HTTP client optimized for transaction execution
func (upm *UnifiedProviderManager) GetExecutionHTTPClient() (*ethclient.Client, error) {
if upm.ExecutionPool == nil {
return nil, fmt.Errorf("execution pool not initialized")
}
return upm.ExecutionPool.GetHTTPClient()
}
// GetTestingHTTPClient returns an HTTP client for testing (preferably Anvil)
func (upm *UnifiedProviderManager) GetTestingHTTPClient() (*ethclient.Client, error) {
if upm.TestingPool == nil {
return nil, fmt.Errorf("testing pool not initialized")
}
return upm.TestingPool.GetHTTPClient()
}
// GetAllStats returns statistics for all provider pools
func (upm *UnifiedProviderManager) GetAllStats() map[string]interface{} {
stats := make(map[string]interface{})
if upm.ReadOnlyPool != nil {
stats["read_only"] = upm.ReadOnlyPool.GetStats()
}
if upm.ExecutionPool != nil {
stats["execution"] = upm.ExecutionPool.GetStats()
}
if upm.TestingPool != nil {
stats["testing"] = upm.TestingPool.GetStats()
}
// Add overall summary
summary := map[string]interface{}{
"total_pools": len(stats),
"pools_initialized": []string{},
}
for poolName := range stats {
summary["pools_initialized"] = append(summary["pools_initialized"].([]string), poolName)
}
stats["summary"] = summary
return stats
}
// CreateTestingSnapshot creates a snapshot in the testing environment
func (upm *UnifiedProviderManager) CreateTestingSnapshot() (string, error) {
if upm.TestingPool == nil {
return "", fmt.Errorf("testing pool not initialized")
}
return upm.TestingPool.CreateSnapshot()
}
// RevertTestingSnapshot reverts to a snapshot in the testing environment
func (upm *UnifiedProviderManager) RevertTestingSnapshot(snapshotID string) error {
if upm.TestingPool == nil {
return fmt.Errorf("testing pool not initialized")
}
return upm.TestingPool.RevertToSnapshot(snapshotID)
}
// Close shuts down all provider pools
func (upm *UnifiedProviderManager) Close() error {
var errors []error
if upm.ReadOnlyPool != nil {
if err := upm.ReadOnlyPool.Close(); err != nil {
errors = append(errors, fmt.Errorf("failed to close read-only pool: %w", err))
}
}
if upm.ExecutionPool != nil {
if err := upm.ExecutionPool.Close(); err != nil {
errors = append(errors, fmt.Errorf("failed to close execution pool: %w", err))
}
}
if upm.TestingPool != nil {
if err := upm.TestingPool.Close(); err != nil {
errors = append(errors, fmt.Errorf("failed to close testing pool: %w", err))
}
}
if len(errors) > 0 {
return fmt.Errorf("errors closing provider pools: %v", errors)
}
return nil
}
// LoadProvidersConfigFromFile loads configuration from a YAML file
func LoadProvidersConfigFromFile(path string) (ProvidersConfig, error) {
var config ProvidersConfig
// Read the YAML file
data, err := ioutil.ReadFile(path)
if err != nil {
return config, fmt.Errorf("failed to read config file %s: %w", path, err)
}
// Unmarshal the YAML data
if err := yaml.Unmarshal(data, &config); err != nil {
return config, fmt.Errorf("failed to parse YAML config: %w", err)
}
// Validate the configuration
if err := validateProvidersConfig(&config); err != nil {
return config, fmt.Errorf("invalid configuration: %w", err)
}
return config, nil
}
// validateProvidersConfig validates the provider configuration
func validateProvidersConfig(config *ProvidersConfig) error {
if len(config.Providers) == 0 {
return fmt.Errorf("no providers configured")
}
// Validate provider pools
for poolName, poolConfig := range config.ProviderPools {
if len(poolConfig.Providers) == 0 {
return fmt.Errorf("provider pool '%s' has no providers", poolName)
}
// Check that all referenced providers exist
providerNames := make(map[string]bool)
for _, provider := range config.Providers {
providerNames[provider.Name] = true
}
for _, providerName := range poolConfig.Providers {
if !providerNames[providerName] {
return fmt.Errorf("provider pool '%s' references unknown provider '%s'", poolName, providerName)
}
}
}
// Validate individual providers
for i, provider := range config.Providers {
if provider.Name == "" {
return fmt.Errorf("provider %d has no name", i)
}
if provider.HTTPEndpoint == "" && provider.WSEndpoint == "" {
return fmt.Errorf("provider %s has no endpoints", provider.Name)
}
if provider.RateLimit.RequestsPerSecond <= 0 {
return fmt.Errorf("provider %s has invalid rate limit", provider.Name)
}
// Validate Anvil config if present
if provider.Type == "anvil_fork" && provider.AnvilConfig == nil {
return fmt.Errorf("provider %s is anvil_fork type but has no anvil_config", provider.Name)
}
}
return nil
}
// GetProviderByName returns a specific provider configuration by name
func (upm *UnifiedProviderManager) GetProviderByName(name string) (ProviderConfig, bool) {
config, exists := upm.providerConfigs[name]
return config, exists
}
// GetProvidersByType returns all providers of a specific type
func (upm *UnifiedProviderManager) GetProvidersByType(providerType string) []ProviderConfig {
var providers []ProviderConfig
for _, provider := range upm.config.Providers {
if provider.Type == providerType {
providers = append(providers, provider)
}
}
return providers
}
// GetProvidersByFeature returns all providers that support a specific feature
func (upm *UnifiedProviderManager) GetProvidersByFeature(feature string) []ProviderConfig {
var providers []ProviderConfig
for _, provider := range upm.config.Providers {
for _, providerFeature := range provider.Features {
if providerFeature == feature {
providers = append(providers, provider)
break
}
}
}
return providers
}

View File

@@ -228,11 +228,12 @@ func (wt *WebSocketTransport) startServer() error {
addr := fmt.Sprintf("%s:%d", wt.address, wt.port)
wt.server = &http.Server{
Addr: addr,
Handler: mux,
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
IdleTimeout: 120 * time.Second,
Addr: addr,
Handler: mux,
ReadHeaderTimeout: 5 * time.Second, // Prevent Slowloris attacks
ReadTimeout: 60 * time.Second,
WriteTimeout: 60 * time.Second,
IdleTimeout: 120 * time.Second,
}
wt.connected = true