feat(api): add dashboard summary and system realtime endpoints

Add new admin API endpoints for dashboard metrics and system-wide
realtime statistics:

- Add /admin/dashboard/summary endpoint with aggregated metrics
  including requests, tokens, latency, masters, keys, and provider
  keys statistics with time period filtering
- Add /admin/realtime endpoint for system-level realtime stats
  aggregated across all masters
- Add status filter parameter to ListAPIKeys endpoint
- Add hour grouping option to log stats aggregation
- Update OpenAPI documentation with new endpoints and schemas
This commit is contained in:
zenfun
2025-12-31 13:17:23 +08:00
parent 1a2cc5b798
commit 53c18c3867
9 changed files with 1644 additions and 21 deletions

View File

@@ -0,0 +1,271 @@
package api
import (
"net/http"
"time"
"github.com/ez-api/ez-api/internal/model"
"github.com/ez-api/ez-api/internal/service"
"github.com/gin-gonic/gin"
"gorm.io/gorm"
)
// DashboardHandler handles dashboard-related API endpoints
type DashboardHandler struct {
db *gorm.DB
logDB *gorm.DB
statsService *service.StatsService
logPartitioner *service.LogPartitioner
}
// NewDashboardHandler creates a new DashboardHandler
func NewDashboardHandler(db *gorm.DB, logDB *gorm.DB, statsService *service.StatsService, logPartitioner *service.LogPartitioner) *DashboardHandler {
if logDB == nil {
logDB = db
}
return &DashboardHandler{
db: db,
logDB: logDB,
statsService: statsService,
logPartitioner: logPartitioner,
}
}
func (h *DashboardHandler) logDBConn() *gorm.DB {
if h == nil || h.logDB == nil {
return h.db
}
return h.logDB
}
func (h *DashboardHandler) logBaseQuery() *gorm.DB {
return logBaseQuery(h.logDBConn(), h.logPartitioner)
}
// RequestStats contains request-related statistics
type RequestStats struct {
Total int64 `json:"total"`
Success int64 `json:"success"`
Failed int64 `json:"failed"`
ErrorRate float64 `json:"error_rate"`
}
// TokenStats contains token usage statistics
type TokenStats struct {
Total int64 `json:"total"`
Input int64 `json:"input"`
Output int64 `json:"output"`
}
// LatencyStats contains latency statistics
type LatencyStats struct {
AvgMs float64 `json:"avg_ms"`
}
// CountStats contains simple count statistics
type CountStats struct {
Total int64 `json:"total"`
Active int64 `json:"active"`
}
// ProviderKeyStats contains provider key statistics
type ProviderKeyStats struct {
Total int64 `json:"total"`
Active int64 `json:"active"`
Suspended int64 `json:"suspended"`
AutoDisabled int64 `json:"auto_disabled"`
}
// TopModelStat contains model usage statistics
type TopModelStat struct {
Model string `json:"model"`
Requests int64 `json:"requests"`
Tokens int64 `json:"tokens"`
}
// DashboardSummaryResponse is the response for dashboard summary endpoint
type DashboardSummaryResponse struct {
Period string `json:"period,omitempty"`
Requests RequestStats `json:"requests"`
Tokens TokenStats `json:"tokens"`
Latency LatencyStats `json:"latency"`
Masters CountStats `json:"masters"`
Keys CountStats `json:"keys"`
ProviderKeys ProviderKeyStats `json:"provider_keys"`
TopModels []TopModelStat `json:"top_models"`
UpdatedAt int64 `json:"updated_at"`
}
// GetSummary godoc
// @Summary Dashboard summary
// @Description Returns aggregated metrics for dashboard display including requests, tokens, latency, masters, keys, and provider keys statistics
// @Tags admin
// @Produce json
// @Security AdminAuth
// @Param period query string false "time period: today, week, month, all"
// @Param since query int false "unix seconds"
// @Param until query int false "unix seconds"
// @Success 200 {object} DashboardSummaryResponse
// @Failure 400 {object} gin.H
// @Failure 500 {object} gin.H
// @Router /admin/dashboard/summary [get]
func (h *DashboardHandler) GetSummary(c *gin.Context) {
rng, err := parseStatsRange(c)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// Build log query with time range
logQuery := h.logBaseQuery()
logQuery = applyStatsRange(logQuery, rng)
// 1. Request statistics
var totalRequests int64
if err := logQuery.Session(&gorm.Session{}).Count(&totalRequests).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count requests", "details": err.Error()})
return
}
type statusCount struct {
StatusCode int
Cnt int64
}
var statusCounts []statusCount
if err := logQuery.Session(&gorm.Session{}).
Select("status_code, COUNT(*) as cnt").
Group("status_code").
Scan(&statusCounts).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count by status", "details": err.Error()})
return
}
var successCount, failedCount int64
for _, sc := range statusCounts {
if sc.StatusCode >= 200 && sc.StatusCode < 400 {
successCount += sc.Cnt
} else {
failedCount += sc.Cnt
}
}
errorRate := 0.0
if totalRequests > 0 {
errorRate = float64(failedCount) / float64(totalRequests)
}
// 2. Token statistics
type tokenSums struct {
TokensIn int64
TokensOut int64
AvgLatency float64
}
var ts tokenSums
if err := logQuery.Session(&gorm.Session{}).
Select("COALESCE(SUM(tokens_in),0) as tokens_in, COALESCE(SUM(tokens_out),0) as tokens_out, COALESCE(AVG(latency_ms),0) as avg_latency").
Scan(&ts).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to aggregate tokens", "details": err.Error()})
return
}
// 3. Master statistics
var totalMasters, activeMasters int64
if err := h.db.Model(&model.Master{}).Count(&totalMasters).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count masters", "details": err.Error()})
return
}
if err := h.db.Model(&model.Master{}).Where("status = ?", "active").Count(&activeMasters).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active masters", "details": err.Error()})
return
}
// 4. Key (child token) statistics
var totalKeys, activeKeys int64
if err := h.db.Model(&model.Key{}).Count(&totalKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count keys", "details": err.Error()})
return
}
if err := h.db.Model(&model.Key{}).Where("status = ?", "active").Count(&activeKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active keys", "details": err.Error()})
return
}
// 5. Provider key statistics
var totalProviderKeys, activeProviderKeys, suspendedProviderKeys, autoDisabledProviderKeys int64
if err := h.db.Model(&model.APIKey{}).Count(&totalProviderKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count provider keys", "details": err.Error()})
return
}
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "active").Count(&activeProviderKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active provider keys", "details": err.Error()})
return
}
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "suspended").Count(&suspendedProviderKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count suspended provider keys", "details": err.Error()})
return
}
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "auto_disabled").Count(&autoDisabledProviderKeys).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count auto_disabled provider keys", "details": err.Error()})
return
}
// 6. Top models (limit to 10)
type modelStat struct {
ModelName string
Cnt int64
Tokens int64
}
var topModels []modelStat
if err := logQuery.Session(&gorm.Session{}).
Select("model_name, COUNT(*) as cnt, COALESCE(SUM(tokens_in + tokens_out),0) as tokens").
Group("model_name").
Order("cnt DESC").
Limit(10).
Scan(&topModels).Error; err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get top models", "details": err.Error()})
return
}
topModelStats := make([]TopModelStat, 0, len(topModels))
for _, m := range topModels {
topModelStats = append(topModelStats, TopModelStat{
Model: m.ModelName,
Requests: m.Cnt,
Tokens: m.Tokens,
})
}
c.JSON(http.StatusOK, DashboardSummaryResponse{
Period: rng.Period,
Requests: RequestStats{
Total: totalRequests,
Success: successCount,
Failed: failedCount,
ErrorRate: errorRate,
},
Tokens: TokenStats{
Total: ts.TokensIn + ts.TokensOut,
Input: ts.TokensIn,
Output: ts.TokensOut,
},
Latency: LatencyStats{
AvgMs: ts.AvgLatency,
},
Masters: CountStats{
Total: totalMasters,
Active: activeMasters,
},
Keys: CountStats{
Total: totalKeys,
Active: activeKeys,
},
ProviderKeys: ProviderKeyStats{
Total: totalProviderKeys,
Active: activeProviderKeys,
Suspended: suspendedProviderKeys,
AutoDisabled: autoDisabledProviderKeys,
},
TopModels: topModelStats,
UpdatedAt: time.Now().UTC().Unix(),
})
}