package service import ( "context" "log/slog" "time" "github.com/ez-api/ez-api/internal/model" "gorm.io/gorm" ) // LogWriter batches log records to reduce IO overhead. type LogWriter struct { ch chan model.LogRecord batchSize int flushInterval time.Duration db *gorm.DB } func NewLogWriter(db *gorm.DB, queueCapacity, batchSize int, flushInterval time.Duration) *LogWriter { if batchSize <= 0 { batchSize = 10 } if queueCapacity <= 0 { queueCapacity = 1000 } if flushInterval <= 0 { flushInterval = time.Second } return &LogWriter{ ch: make(chan model.LogRecord, queueCapacity), batchSize: batchSize, flushInterval: flushInterval, db: db, } } // Start begins a background writer. Should be called once at startup. func (w *LogWriter) Start(ctx context.Context) { go func() { ticker := time.NewTicker(w.flushInterval) defer ticker.Stop() buf := make([]model.LogRecord, 0, w.batchSize) flush := func() { if len(buf) == 0 { return } if err := w.db.Create(&buf).Error; err != nil { slog.Default().Error("log batch insert failed", "err", err) } buf = buf[:0] } for { select { case <-ctx.Done(): flush() return case rec := <-w.ch: buf = append(buf, rec) if len(buf) >= w.batchSize { flush() } case <-ticker.C: flush() } } }() } // Write queues a log record; drops silently if buffer is full to protect performance. func (w *LogWriter) Write(rec model.LogRecord) { select { case w.ch <- rec: default: // drop to avoid blocking hot path } }