mirror of
https://github.com/EZ-Api/ez-api.git
synced 2026-01-13 17:47:51 +00:00
Add admin endpoints to list and revoke child keys under a master. Standardize OpenAPI responses to use ResponseEnvelope with MapData for error payloads, and regenerate swagger specs accordingly.
463 lines
14 KiB
Go
463 lines
14 KiB
Go
package api
|
|
|
|
import (
|
|
"math"
|
|
"net/http"
|
|
"time"
|
|
|
|
"github.com/ez-api/ez-api/internal/model"
|
|
"github.com/ez-api/ez-api/internal/service"
|
|
"github.com/gin-gonic/gin"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
// DashboardHandler handles dashboard-related API endpoints
|
|
type DashboardHandler struct {
|
|
db *gorm.DB
|
|
logDB *gorm.DB
|
|
statsService *service.StatsService
|
|
logPartitioner *service.LogPartitioner
|
|
}
|
|
|
|
// NewDashboardHandler creates a new DashboardHandler
|
|
func NewDashboardHandler(db *gorm.DB, logDB *gorm.DB, statsService *service.StatsService, logPartitioner *service.LogPartitioner) *DashboardHandler {
|
|
if logDB == nil {
|
|
logDB = db
|
|
}
|
|
return &DashboardHandler{
|
|
db: db,
|
|
logDB: logDB,
|
|
statsService: statsService,
|
|
logPartitioner: logPartitioner,
|
|
}
|
|
}
|
|
|
|
func (h *DashboardHandler) logDBConn() *gorm.DB {
|
|
if h == nil || h.logDB == nil {
|
|
return h.db
|
|
}
|
|
return h.logDB
|
|
}
|
|
|
|
func (h *DashboardHandler) logBaseQuery() *gorm.DB {
|
|
return logBaseQuery(h.logDBConn(), h.logPartitioner)
|
|
}
|
|
|
|
// TrendInfo contains trend calculation results
|
|
type TrendInfo struct {
|
|
Delta *float64 `json:"delta,omitempty"` // Percentage change from previous period (nil if no baseline)
|
|
Direction string `json:"direction,omitempty"` // "up", "down", "stable", or "new" (no baseline)
|
|
}
|
|
|
|
// CalculateTrend calculates the trend between current and previous values.
|
|
// Returns TrendInfo with delta percentage and direction.
|
|
func CalculateTrend(current, previous int64) TrendInfo {
|
|
if previous == 0 {
|
|
if current == 0 {
|
|
return TrendInfo{Direction: "stable"}
|
|
}
|
|
return TrendInfo{Direction: "new"}
|
|
}
|
|
delta := float64(current-previous) / float64(previous) * 100
|
|
// Round to 1 decimal place
|
|
delta = math.Round(delta*10) / 10
|
|
direction := "stable"
|
|
if delta > 0.5 {
|
|
direction = "up"
|
|
} else if delta < -0.5 {
|
|
direction = "down"
|
|
}
|
|
return TrendInfo{Delta: &delta, Direction: direction}
|
|
}
|
|
|
|
// CalculateTrendFloat calculates the trend between current and previous float values.
|
|
func CalculateTrendFloat(current, previous float64) TrendInfo {
|
|
if previous == 0 {
|
|
if current == 0 {
|
|
return TrendInfo{Direction: "stable"}
|
|
}
|
|
return TrendInfo{Direction: "new"}
|
|
}
|
|
delta := (current - previous) / previous * 100
|
|
delta = math.Round(delta*10) / 10
|
|
direction := "stable"
|
|
if delta > 0.5 {
|
|
direction = "up"
|
|
} else if delta < -0.5 {
|
|
direction = "down"
|
|
}
|
|
return TrendInfo{Delta: &delta, Direction: direction}
|
|
}
|
|
|
|
// CalculateTrendFloatWithBaseline calculates trend when baseline existence is explicit.
|
|
func CalculateTrendFloatWithBaseline(current, previous float64, hasBaseline bool) TrendInfo {
|
|
if !hasBaseline {
|
|
if current == 0 {
|
|
return TrendInfo{Direction: "stable"}
|
|
}
|
|
return TrendInfo{Direction: "new"}
|
|
}
|
|
if previous == 0 {
|
|
if current == 0 {
|
|
return TrendInfo{Direction: "stable"}
|
|
}
|
|
return TrendInfo{Direction: "up"}
|
|
}
|
|
delta := (current - previous) / previous * 100
|
|
delta = math.Round(delta*10) / 10
|
|
direction := "stable"
|
|
if delta > 0.5 {
|
|
direction = "up"
|
|
} else if delta < -0.5 {
|
|
direction = "down"
|
|
}
|
|
return TrendInfo{Delta: &delta, Direction: direction}
|
|
}
|
|
|
|
// RequestStats contains request-related statistics
|
|
type RequestStats struct {
|
|
Total int64 `json:"total"`
|
|
Success int64 `json:"success"`
|
|
Failed int64 `json:"failed"`
|
|
ErrorRate float64 `json:"error_rate"`
|
|
}
|
|
|
|
// TokenStats contains token usage statistics
|
|
type TokenStats struct {
|
|
Total int64 `json:"total"`
|
|
Input int64 `json:"input"`
|
|
Output int64 `json:"output"`
|
|
}
|
|
|
|
// LatencyStats contains latency statistics
|
|
type LatencyStats struct {
|
|
AvgMs float64 `json:"avg_ms"`
|
|
}
|
|
|
|
// CountStats contains simple count statistics
|
|
type CountStats struct {
|
|
Total int64 `json:"total"`
|
|
Active int64 `json:"active"`
|
|
}
|
|
|
|
// ProviderKeyStats contains provider key statistics
|
|
type ProviderKeyStats struct {
|
|
Total int64 `json:"total"`
|
|
Active int64 `json:"active"`
|
|
Suspended int64 `json:"suspended"`
|
|
AutoDisabled int64 `json:"auto_disabled"`
|
|
}
|
|
|
|
// TopModelStat contains model usage statistics
|
|
type TopModelStat struct {
|
|
Model string `json:"model"`
|
|
Requests int64 `json:"requests"`
|
|
Tokens int64 `json:"tokens"`
|
|
}
|
|
|
|
// DashboardTrends contains trend data for dashboard metrics
|
|
type DashboardTrends struct {
|
|
Requests TrendInfo `json:"requests"`
|
|
Tokens TrendInfo `json:"tokens"`
|
|
ErrorRate TrendInfo `json:"error_rate"`
|
|
Latency TrendInfo `json:"latency"`
|
|
}
|
|
|
|
// DashboardSummaryResponse is the response for dashboard summary endpoint
|
|
type DashboardSummaryResponse struct {
|
|
Period string `json:"period,omitempty"`
|
|
Requests RequestStats `json:"requests"`
|
|
Tokens TokenStats `json:"tokens"`
|
|
Latency LatencyStats `json:"latency"`
|
|
Masters CountStats `json:"masters"`
|
|
Keys CountStats `json:"keys"`
|
|
ProviderKeys ProviderKeyStats `json:"provider_keys"`
|
|
TopModels []TopModelStat `json:"top_models"`
|
|
Trends *DashboardTrends `json:"trends,omitempty"` // Only present when include_trends=true
|
|
UpdatedAt int64 `json:"updated_at"`
|
|
}
|
|
|
|
// GetSummary godoc
|
|
// @Summary Dashboard summary
|
|
// @Description Returns aggregated metrics for dashboard display including requests, tokens, latency, masters, keys, and provider keys statistics
|
|
// @Tags admin
|
|
// @Produce json
|
|
// @Security AdminAuth
|
|
// @Param period query string false "time period: today, week, month, last7d, last30d, all"
|
|
// @Param since query int false "unix seconds"
|
|
// @Param until query int false "unix seconds"
|
|
// @Param include_trends query bool false "include trend data comparing to previous period"
|
|
// @Success 200 {object} ResponseEnvelope{data=DashboardSummaryResponse}
|
|
// @Failure 400 {object} ResponseEnvelope{data=MapData}
|
|
// @Failure 500 {object} ResponseEnvelope{data=MapData}
|
|
// @Router /admin/dashboard/summary [get]
|
|
func (h *DashboardHandler) GetSummary(c *gin.Context) {
|
|
rng, err := parseStatsRange(c)
|
|
if err != nil {
|
|
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
// Parse include_trends parameter
|
|
includeTrends := c.Query("include_trends") == "true"
|
|
|
|
// Build log query with time range
|
|
logQuery := h.logBaseQuery()
|
|
logQuery = applyStatsRange(logQuery, rng)
|
|
|
|
// 1. Request statistics
|
|
var totalRequests int64
|
|
if err := logQuery.Session(&gorm.Session{}).Count(&totalRequests).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count requests", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
type statusCount struct {
|
|
StatusCode int
|
|
Cnt int64
|
|
}
|
|
var statusCounts []statusCount
|
|
if err := logQuery.Session(&gorm.Session{}).
|
|
Select("status_code, COUNT(*) as cnt").
|
|
Group("status_code").
|
|
Scan(&statusCounts).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count by status", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
var successCount, failedCount int64
|
|
for _, sc := range statusCounts {
|
|
if sc.StatusCode >= 200 && sc.StatusCode < 400 {
|
|
successCount += sc.Cnt
|
|
} else {
|
|
failedCount += sc.Cnt
|
|
}
|
|
}
|
|
|
|
errorRate := 0.0
|
|
if totalRequests > 0 {
|
|
errorRate = float64(failedCount) / float64(totalRequests)
|
|
}
|
|
|
|
// 2. Token statistics
|
|
type tokenSums struct {
|
|
TokensIn int64
|
|
TokensOut int64
|
|
AvgLatency float64
|
|
}
|
|
var ts tokenSums
|
|
if err := logQuery.Session(&gorm.Session{}).
|
|
Select("COALESCE(SUM(tokens_in),0) as tokens_in, COALESCE(SUM(tokens_out),0) as tokens_out, COALESCE(AVG(latency_ms),0) as avg_latency").
|
|
Scan(&ts).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to aggregate tokens", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
// 3. Master statistics
|
|
var totalMasters, activeMasters int64
|
|
if err := h.db.Model(&model.Master{}).Count(&totalMasters).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count masters", "details": err.Error()})
|
|
return
|
|
}
|
|
if err := h.db.Model(&model.Master{}).Where("status = ?", "active").Count(&activeMasters).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active masters", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
// 4. Key (child token) statistics
|
|
var totalKeys, activeKeys int64
|
|
if err := h.db.Model(&model.Key{}).Count(&totalKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count keys", "details": err.Error()})
|
|
return
|
|
}
|
|
if err := h.db.Model(&model.Key{}).Where("status = ?", "active").Count(&activeKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active keys", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
// 5. Provider key statistics
|
|
var totalProviderKeys, activeProviderKeys, suspendedProviderKeys, autoDisabledProviderKeys int64
|
|
if err := h.db.Model(&model.APIKey{}).Count(&totalProviderKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count provider keys", "details": err.Error()})
|
|
return
|
|
}
|
|
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "active").Count(&activeProviderKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count active provider keys", "details": err.Error()})
|
|
return
|
|
}
|
|
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "suspended").Count(&suspendedProviderKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count suspended provider keys", "details": err.Error()})
|
|
return
|
|
}
|
|
if err := h.db.Model(&model.APIKey{}).Where("status = ?", "auto_disabled").Count(&autoDisabledProviderKeys).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to count auto_disabled provider keys", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
// 6. Top models (limit to 10)
|
|
type modelStat struct {
|
|
ModelName string
|
|
Cnt int64
|
|
Tokens int64
|
|
}
|
|
var topModels []modelStat
|
|
if err := logQuery.Session(&gorm.Session{}).
|
|
Select("model_name, COUNT(*) as cnt, COALESCE(SUM(tokens_in + tokens_out),0) as tokens").
|
|
Group("model_name").
|
|
Order("cnt DESC").
|
|
Limit(10).
|
|
Scan(&topModels).Error; err != nil {
|
|
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get top models", "details": err.Error()})
|
|
return
|
|
}
|
|
|
|
topModelStats := make([]TopModelStat, 0, len(topModels))
|
|
for _, m := range topModels {
|
|
topModelStats = append(topModelStats, TopModelStat{
|
|
Model: m.ModelName,
|
|
Requests: m.Cnt,
|
|
Tokens: m.Tokens,
|
|
})
|
|
}
|
|
|
|
// Calculate trends if requested
|
|
var trends *DashboardTrends
|
|
if includeTrends && rng.Period != "" && rng.Period != "all" {
|
|
prevStart, prevEnd := previousPeriodWindow(rng.Period)
|
|
if !prevStart.IsZero() && !prevEnd.IsZero() {
|
|
prevStats, err := h.aggregateFromLogRecords(prevStart, prevEnd)
|
|
if err == nil {
|
|
hasBaseline := prevStats.Requests > 0
|
|
prevErrorRate := 0.0
|
|
if hasBaseline {
|
|
prevErrorRate = float64(prevStats.Failed) / float64(prevStats.Requests)
|
|
}
|
|
prevAvgLatency := 0.0
|
|
if hasBaseline {
|
|
prevAvgLatency = float64(prevStats.LatencySumMs) / float64(prevStats.Requests)
|
|
}
|
|
|
|
trends = &DashboardTrends{
|
|
Requests: CalculateTrend(totalRequests, prevStats.Requests),
|
|
Tokens: CalculateTrend(ts.TokensIn+ts.TokensOut, prevStats.TokensIn+prevStats.TokensOut),
|
|
ErrorRate: CalculateTrendFloatWithBaseline(errorRate, prevErrorRate, hasBaseline),
|
|
Latency: CalculateTrendFloatWithBaseline(ts.AvgLatency, prevAvgLatency, hasBaseline),
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
c.JSON(http.StatusOK, DashboardSummaryResponse{
|
|
Period: rng.Period,
|
|
Requests: RequestStats{
|
|
Total: totalRequests,
|
|
Success: successCount,
|
|
Failed: failedCount,
|
|
ErrorRate: errorRate,
|
|
},
|
|
Tokens: TokenStats{
|
|
Total: ts.TokensIn + ts.TokensOut,
|
|
Input: ts.TokensIn,
|
|
Output: ts.TokensOut,
|
|
},
|
|
Latency: LatencyStats{
|
|
AvgMs: ts.AvgLatency,
|
|
},
|
|
Masters: CountStats{
|
|
Total: totalMasters,
|
|
Active: activeMasters,
|
|
},
|
|
Keys: CountStats{
|
|
Total: totalKeys,
|
|
Active: activeKeys,
|
|
},
|
|
ProviderKeys: ProviderKeyStats{
|
|
Total: totalProviderKeys,
|
|
Active: activeProviderKeys,
|
|
Suspended: suspendedProviderKeys,
|
|
AutoDisabled: autoDisabledProviderKeys,
|
|
},
|
|
TopModels: topModelStats,
|
|
Trends: trends,
|
|
UpdatedAt: time.Now().UTC().Unix(),
|
|
})
|
|
}
|
|
|
|
// aggregatedStats holds aggregated statistics from daily_stats or log_records
|
|
type aggregatedStats struct {
|
|
Requests int64
|
|
Success int64
|
|
Failed int64
|
|
TokensIn int64
|
|
TokensOut int64
|
|
LatencySumMs int64
|
|
}
|
|
|
|
// previousPeriodWindow calculates the comparison window for trend calculation
|
|
func previousPeriodWindow(period string) (start, end time.Time) {
|
|
now := time.Now().UTC()
|
|
startOfDay := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)
|
|
|
|
switch period {
|
|
case "today":
|
|
// Compare to yesterday
|
|
yesterday := startOfDay.AddDate(0, 0, -1)
|
|
return yesterday, startOfDay
|
|
case "last7d":
|
|
// Compare: last 7 days vs 14-7 days ago
|
|
return now.AddDate(0, 0, -14), now.AddDate(0, 0, -7)
|
|
case "last30d":
|
|
// Compare: last 30 days vs 60-30 days ago
|
|
return now.AddDate(0, 0, -60), now.AddDate(0, 0, -30)
|
|
case "week":
|
|
// Compare to previous calendar week
|
|
weekday := int(startOfDay.Weekday())
|
|
if weekday == 0 {
|
|
weekday = 7
|
|
}
|
|
currentWeekStart := startOfDay.AddDate(0, 0, -(weekday - 1))
|
|
prevWeekStart := currentWeekStart.AddDate(0, 0, -7)
|
|
return prevWeekStart, currentWeekStart
|
|
case "month":
|
|
// Compare to previous calendar month
|
|
currentMonthStart := time.Date(now.Year(), now.Month(), 1, 0, 0, 0, 0, time.UTC)
|
|
prevMonthStart := currentMonthStart.AddDate(0, -1, 0)
|
|
return prevMonthStart, currentMonthStart
|
|
default:
|
|
return time.Time{}, time.Time{}
|
|
}
|
|
}
|
|
|
|
// aggregateFromLogRecords queries log_records directly for the given time range
|
|
func (h *DashboardHandler) aggregateFromLogRecords(start, end time.Time) (aggregatedStats, error) {
|
|
var stats struct {
|
|
Requests int64
|
|
Success int64
|
|
Failed int64
|
|
TokensIn int64
|
|
TokensOut int64
|
|
LatencySumMs int64
|
|
}
|
|
|
|
err := h.logBaseQuery().
|
|
Select(`
|
|
COUNT(*) as requests,
|
|
SUM(CASE WHEN status_code >= 200 AND status_code < 400 THEN 1 ELSE 0 END) as success,
|
|
SUM(CASE WHEN status_code >= 400 OR status_code = 0 THEN 1 ELSE 0 END) as failed,
|
|
COALESCE(SUM(tokens_in), 0) as tokens_in,
|
|
COALESCE(SUM(tokens_out), 0) as tokens_out,
|
|
COALESCE(SUM(latency_ms), 0) as latency_sum_ms
|
|
`).
|
|
Where("created_at >= ? AND created_at < ?", start, end).
|
|
Scan(&stats).Error
|
|
|
|
return aggregatedStats{
|
|
Requests: stats.Requests,
|
|
Success: stats.Success,
|
|
Failed: stats.Failed,
|
|
TokensIn: stats.TokensIn,
|
|
TokensOut: stats.TokensOut,
|
|
LatencySumMs: stats.LatencySumMs,
|
|
}, err
|
|
}
|