mirror of
https://github.com/EZ-Api/ez-api.git
synced 2026-01-13 17:47:51 +00:00
feat(api): add admin traffic chart statistics endpoint
Add new endpoint GET /admin/logs/stats/traffic-chart to provide aggregated traffic metrics grouped by time and model. Features include: - Time granularity selection (hour/minute) - Top-N model breakdown with "other" aggregation - Metrics for request counts and token usage Includes generated Swagger documentation.
This commit is contained in:
@@ -609,6 +609,223 @@ func (h *Handler) logStatsByMinute(c *gin.Context, q *gorm.DB, sinceTime, untilT
|
||||
c.JSON(http.StatusOK, GroupedStatsResponse{Items: items})
|
||||
}
|
||||
|
||||
// TrafficMetrics contains the metrics for a model in a bucket
|
||||
type TrafficMetrics struct {
|
||||
Count int64 `json:"count"`
|
||||
TokensIn int64 `json:"tokens_in"`
|
||||
TokensOut int64 `json:"tokens_out"`
|
||||
}
|
||||
|
||||
// TrafficBucket represents one time bucket with model breakdown
|
||||
type TrafficBucket struct {
|
||||
Time string `json:"time"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Breakdown map[string]TrafficMetrics `json:"breakdown"`
|
||||
Total TrafficMetrics `json:"total"`
|
||||
}
|
||||
|
||||
// TrafficChartResponse is the response for traffic chart API
|
||||
type TrafficChartResponse struct {
|
||||
Granularity string `json:"granularity"`
|
||||
Since int64 `json:"since"`
|
||||
Until int64 `json:"until"`
|
||||
Models []string `json:"models"`
|
||||
Buckets []TrafficBucket `json:"buckets"`
|
||||
}
|
||||
|
||||
const (
|
||||
defaultTrafficTopN = 5
|
||||
maxTrafficTopN = 20
|
||||
)
|
||||
|
||||
// GetTrafficChart godoc
|
||||
// @Summary Traffic chart data (admin)
|
||||
// @Description Get time × model aggregated data for stacked traffic charts. Returns time buckets with per-model breakdown.
|
||||
// @Tags admin
|
||||
// @Produce json
|
||||
// @Security AdminAuth
|
||||
// @Param granularity query string false "Time granularity: hour (default) or minute" Enums(hour, minute)
|
||||
// @Param since query int false "Start time (unix seconds), defaults to 24h ago"
|
||||
// @Param until query int false "End time (unix seconds), defaults to now"
|
||||
// @Param top_n query int false "Number of top models to return (1-20), defaults to 5"
|
||||
// @Success 200 {object} TrafficChartResponse
|
||||
// @Failure 400 {object} gin.H
|
||||
// @Failure 500 {object} gin.H
|
||||
// @Router /admin/logs/stats/traffic-chart [get]
|
||||
func (h *Handler) GetTrafficChart(c *gin.Context) {
|
||||
// Parse granularity
|
||||
granularity := strings.TrimSpace(c.Query("granularity"))
|
||||
if granularity == "" {
|
||||
granularity = "hour"
|
||||
}
|
||||
if granularity != "hour" && granularity != "minute" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "granularity must be 'hour' or 'minute'"})
|
||||
return
|
||||
}
|
||||
|
||||
// Parse time range
|
||||
now := time.Now().UTC()
|
||||
var sinceTime, untilTime time.Time
|
||||
|
||||
if t, ok := parseUnixSeconds(c.Query("since")); ok {
|
||||
sinceTime = t
|
||||
} else {
|
||||
sinceTime = now.Add(-24 * time.Hour)
|
||||
}
|
||||
|
||||
if t, ok := parseUnixSeconds(c.Query("until")); ok {
|
||||
untilTime = t
|
||||
} else {
|
||||
untilTime = now
|
||||
}
|
||||
|
||||
// Validate time range for minute granularity
|
||||
if granularity == "minute" {
|
||||
if c.Query("since") == "" || c.Query("until") == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "minute-level aggregation requires both 'since' and 'until' parameters"})
|
||||
return
|
||||
}
|
||||
duration := untilTime.Sub(sinceTime)
|
||||
if duration > maxMinuteRangeDuration {
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "time range too large for minute granularity",
|
||||
"max_hours": 6,
|
||||
"actual_hours": duration.Hours(),
|
||||
})
|
||||
return
|
||||
}
|
||||
if duration < 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "'until' must be after 'since'"})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Parse top_n
|
||||
topN := defaultTrafficTopN
|
||||
if raw := strings.TrimSpace(c.Query("top_n")); raw != "" {
|
||||
if v, err := strconv.Atoi(raw); err == nil && v > 0 {
|
||||
topN = v
|
||||
}
|
||||
}
|
||||
if topN > maxTrafficTopN {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "top_n cannot exceed 20"})
|
||||
return
|
||||
}
|
||||
|
||||
// Build query
|
||||
q := h.logBaseQuery().
|
||||
Where("created_at >= ?", sinceTime).
|
||||
Where("created_at <= ?", untilTime)
|
||||
|
||||
// Select with time truncation based on granularity
|
||||
var truncFunc string
|
||||
if granularity == "minute" {
|
||||
truncFunc = "DATE_TRUNC('minute', created_at)"
|
||||
} else {
|
||||
truncFunc = "DATE_TRUNC('hour', created_at)"
|
||||
}
|
||||
|
||||
type bucketModelStats struct {
|
||||
Bucket time.Time
|
||||
ModelName string
|
||||
Cnt int64
|
||||
TokensIn int64
|
||||
TokensOut int64
|
||||
}
|
||||
var rows []bucketModelStats
|
||||
|
||||
if err := q.Select(truncFunc+" as bucket, model_name, COUNT(*) as cnt, COALESCE(SUM(tokens_in),0) as tokens_in, COALESCE(SUM(tokens_out),0) as tokens_out").
|
||||
Group("bucket, model_name").
|
||||
Order("bucket ASC, cnt DESC").
|
||||
Scan(&rows).Error; err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to aggregate traffic data", "details": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate global model counts for top_n selection
|
||||
modelCounts := make(map[string]int64)
|
||||
for _, r := range rows {
|
||||
modelCounts[r.ModelName] += r.Cnt
|
||||
}
|
||||
|
||||
// Get top N models
|
||||
type modelCount struct {
|
||||
name string
|
||||
count int64
|
||||
}
|
||||
var modelList []modelCount
|
||||
for name, cnt := range modelCounts {
|
||||
modelList = append(modelList, modelCount{name, cnt})
|
||||
}
|
||||
// Sort by count descending
|
||||
for i := 0; i < len(modelList)-1; i++ {
|
||||
for j := i + 1; j < len(modelList); j++ {
|
||||
if modelList[j].count > modelList[i].count {
|
||||
modelList[i], modelList[j] = modelList[j], modelList[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
topModels := make(map[string]bool)
|
||||
var modelNames []string
|
||||
for i := 0; i < len(modelList) && i < topN; i++ {
|
||||
topModels[modelList[i].name] = true
|
||||
modelNames = append(modelNames, modelList[i].name)
|
||||
}
|
||||
hasOther := len(modelList) > topN
|
||||
if hasOther {
|
||||
modelNames = append(modelNames, "other")
|
||||
}
|
||||
|
||||
// Build buckets with breakdown
|
||||
bucketMap := make(map[int64]*TrafficBucket)
|
||||
var bucketOrder []int64
|
||||
|
||||
for _, r := range rows {
|
||||
ts := r.Bucket.Unix()
|
||||
bucket, exists := bucketMap[ts]
|
||||
if !exists {
|
||||
bucket = &TrafficBucket{
|
||||
Time: r.Bucket.UTC().Format(time.RFC3339),
|
||||
Timestamp: ts,
|
||||
Breakdown: make(map[string]TrafficMetrics),
|
||||
}
|
||||
bucketMap[ts] = bucket
|
||||
bucketOrder = append(bucketOrder, ts)
|
||||
}
|
||||
|
||||
modelKey := r.ModelName
|
||||
if !topModels[modelKey] {
|
||||
modelKey = "other"
|
||||
}
|
||||
|
||||
existing := bucket.Breakdown[modelKey]
|
||||
bucket.Breakdown[modelKey] = TrafficMetrics{
|
||||
Count: existing.Count + r.Cnt,
|
||||
TokensIn: existing.TokensIn + r.TokensIn,
|
||||
TokensOut: existing.TokensOut + r.TokensOut,
|
||||
}
|
||||
|
||||
bucket.Total.Count += r.Cnt
|
||||
bucket.Total.TokensIn += r.TokensIn
|
||||
bucket.Total.TokensOut += r.TokensOut
|
||||
}
|
||||
|
||||
// Build response buckets in order
|
||||
buckets := make([]TrafficBucket, 0, len(bucketOrder))
|
||||
for _, ts := range bucketOrder {
|
||||
buckets = append(buckets, *bucketMap[ts])
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, TrafficChartResponse{
|
||||
Granularity: granularity,
|
||||
Since: sinceTime.Unix(),
|
||||
Until: untilTime.Unix(),
|
||||
Models: modelNames,
|
||||
Buckets: buckets,
|
||||
})
|
||||
}
|
||||
|
||||
// ListSelfLogs godoc
|
||||
// @Summary List logs (master)
|
||||
// @Description List request logs for the authenticated master
|
||||
|
||||
Reference in New Issue
Block a user