Files
gemini-banlancer/internal/service/db_log_writer_service.go
2025-11-22 14:20:05 +08:00

137 lines
3.4 KiB
Go

// Filename: internal/service/db_log_writer_service.go
package service
import (
"context"
"encoding/json"
"gemini-balancer/internal/models"
"gemini-balancer/internal/settings"
"gemini-balancer/internal/store"
"sync"
"time"
"github.com/sirupsen/logrus"
"gorm.io/gorm"
)
type DBLogWriterService struct {
db *gorm.DB
store store.Store
logger *logrus.Entry
logBuffer chan *models.RequestLog
stopChan chan struct{}
wg sync.WaitGroup
SettingsManager *settings.SettingsManager
}
func NewDBLogWriterService(db *gorm.DB, s store.Store, settings *settings.SettingsManager, logger *logrus.Logger) *DBLogWriterService {
cfg := settings.GetSettings()
bufferCapacity := cfg.LogBufferCapacity
if bufferCapacity <= 0 {
bufferCapacity = 1000
}
return &DBLogWriterService{
db: db,
store: s,
SettingsManager: settings,
logger: logger.WithField("component", "DBLogWriter📝"),
logBuffer: make(chan *models.RequestLog, bufferCapacity),
stopChan: make(chan struct{}),
}
}
func (s *DBLogWriterService) Start() {
s.wg.Add(2)
go s.eventListenerLoop()
go s.dbWriterLoop()
s.logger.Info("DBLogWriterService started.")
}
func (s *DBLogWriterService) Stop() {
s.logger.Info("DBLogWriterService stopping...")
close(s.stopChan)
s.wg.Wait()
s.logger.Info("DBLogWriterService stopped.")
}
func (s *DBLogWriterService) eventListenerLoop() {
defer s.wg.Done()
ctx := context.Background()
sub, err := s.store.Subscribe(ctx, models.TopicRequestFinished)
if err != nil {
s.logger.Fatalf("Failed to subscribe to topic %s: %v", models.TopicRequestFinished, err)
return
}
defer sub.Close()
s.logger.Info("Subscribed to request events for database logging.")
for {
select {
case msg := <-sub.Channel():
var event models.RequestFinishedEvent
if err := json.Unmarshal(msg.Payload, &event); err != nil {
s.logger.Errorf("Failed to unmarshal event for logging: %v", err)
continue
}
select {
case s.logBuffer <- &event.RequestLog:
default:
s.logger.Warn("Log buffer is full. A log message might be dropped.")
}
case <-s.stopChan:
s.logger.Info("Event listener loop stopping.")
close(s.logBuffer)
return
}
}
}
func (s *DBLogWriterService) dbWriterLoop() {
defer s.wg.Done()
cfg := s.SettingsManager.GetSettings()
batchSize := cfg.LogFlushBatchSize
if batchSize <= 0 {
batchSize = 100
}
flushTimeout := time.Duration(cfg.LogFlushIntervalSeconds) * time.Second
if flushTimeout <= 0 {
flushTimeout = 5 * time.Second
}
batch := make([]*models.RequestLog, 0, batchSize)
ticker := time.NewTicker(flushTimeout)
defer ticker.Stop()
for {
select {
case logEntry, ok := <-s.logBuffer:
if !ok {
if len(batch) > 0 {
s.flushBatch(batch)
}
s.logger.Info("DB writer loop finished.")
return
}
batch = append(batch, logEntry)
if len(batch) >= batchSize {
s.flushBatch(batch)
batch = make([]*models.RequestLog, 0, batchSize)
}
case <-ticker.C:
if len(batch) > 0 {
s.flushBatch(batch)
batch = make([]*models.RequestLog, 0, batchSize)
}
}
}
}
func (s *DBLogWriterService) flushBatch(batch []*models.RequestLog) {
if err := s.db.CreateInBatches(batch, len(batch)).Error; err != nil {
s.logger.WithField("batch_size", len(batch)).WithError(err).Error("Failed to flush log batch to database.")
} else {
s.logger.Infof("Successfully flushed %d logs to database.", len(batch))
}
}