loadmonitor.go raw

   1  // Package loadmonitor defines the interface for database load monitoring.
   2  // This allows different database backends to provide their own load metrics
   3  // while the rate limiter remains database-agnostic.
   4  package loadmonitor
   5  
   6  import "time"
   7  
   8  // Metrics contains load metrics from a database backend.
   9  // All values are normalized to 0.0-1.0 where 0 means no load and 1 means at capacity.
  10  type Metrics struct {
  11  	// MemoryPressure indicates memory usage relative to a target limit (0.0-1.0+).
  12  	// Values above 1.0 indicate the target has been exceeded.
  13  	MemoryPressure float64
  14  
  15  	// WriteLoad indicates the write-side load level (0.0-1.0).
  16  	// For Badger: L0 tables and compaction score
  17  	// For Neo4j: active write transactions
  18  	WriteLoad float64
  19  
  20  	// ReadLoad indicates the read-side load level (0.0-1.0).
  21  	// For Badger: cache hit ratio (inverted)
  22  	// For Neo4j: active read transactions
  23  	ReadLoad float64
  24  
  25  	// QueryLatency is the recent average query latency.
  26  	QueryLatency time.Duration
  27  
  28  	// WriteLatency is the recent average write latency.
  29  	WriteLatency time.Duration
  30  
  31  	// Timestamp is when these metrics were collected.
  32  	Timestamp time.Time
  33  
  34  	// InEmergencyMode indicates that memory pressure is critical
  35  	// and aggressive throttling should be applied.
  36  	InEmergencyMode bool
  37  
  38  	// CompactionPending indicates that the database needs compaction
  39  	// and writes should be throttled to allow it to catch up.
  40  	CompactionPending bool
  41  
  42  	// PhysicalMemoryMB is the actual physical memory (RSS - shared) in MB
  43  	PhysicalMemoryMB uint64
  44  }
  45  
  46  // Monitor defines the interface for database load monitoring.
  47  // Implementations are database-specific (Badger, Neo4j, etc.).
  48  type Monitor interface {
  49  	// GetMetrics returns the current load metrics.
  50  	// This should be efficient as it may be called frequently.
  51  	GetMetrics() Metrics
  52  
  53  	// RecordQueryLatency records a query latency sample for averaging.
  54  	RecordQueryLatency(latency time.Duration)
  55  
  56  	// RecordWriteLatency records a write latency sample for averaging.
  57  	RecordWriteLatency(latency time.Duration)
  58  
  59  	// SetMemoryTarget sets the target memory limit in bytes.
  60  	// Memory pressure is calculated relative to this target.
  61  	SetMemoryTarget(bytes uint64)
  62  
  63  	// Start begins background metric collection.
  64  	// Returns a channel that will be closed when the monitor is stopped.
  65  	Start() <-chan struct{}
  66  
  67  	// Stop halts background metric collection.
  68  	Stop()
  69  }
  70  
  71  // CompactableMonitor extends Monitor with compaction-triggering capability.
  72  // Implemented by database backends that support manual compaction (e.g., Badger).
  73  type CompactableMonitor interface {
  74  	Monitor
  75  
  76  	// TriggerCompaction initiates a database compaction operation.
  77  	// This may take significant time; callers should run this in a goroutine.
  78  	// Returns an error if compaction fails or is not supported.
  79  	TriggerCompaction() error
  80  
  81  	// IsCompacting returns true if a compaction is currently in progress.
  82  	IsCompacting() bool
  83  }
  84  
  85  // EmergencyModeMonitor extends Monitor with emergency mode detection.
  86  // Implemented by monitors that can detect critical memory pressure.
  87  type EmergencyModeMonitor interface {
  88  	Monitor
  89  
  90  	// SetEmergencyThreshold sets the memory threshold (as a fraction, e.g., 1.5 = 150% of target)
  91  	// above which emergency mode is triggered.
  92  	SetEmergencyThreshold(threshold float64)
  93  
  94  	// GetEmergencyThreshold returns the current emergency threshold.
  95  	GetEmergencyThreshold() float64
  96  
  97  	// ForceEmergencyMode manually triggers emergency mode for a duration.
  98  	ForceEmergencyMode(duration time.Duration)
  99  }
 100