mirror of
https://github.com/EZ-Api/ez-api.git
synced 2026-01-13 17:47:51 +00:00
refactor(api): update traffic chart response structure
Change the traffic chart API response from bucket-based to series-based to better support frontend visualization libraries. The new format provides a shared X-axis and aligned data arrays for each model series. - Replace `buckets` with `x` and `series` in response - Implement data alignment and zero-filling for time slots - Update Swagger documentation including pending definitions BREAKING CHANGE: The `GET /admin/logs/stats/traffic-chart` response schema has changed. `buckets` and `models` fields are removed.
This commit is contained in:
@@ -609,34 +609,35 @@ func (h *Handler) logStatsByMinute(c *gin.Context, q *gorm.DB, sinceTime, untilT
|
||||
c.JSON(http.StatusOK, GroupedStatsResponse{Items: items})
|
||||
}
|
||||
|
||||
// TrafficMetrics contains the metrics for a model in a bucket
|
||||
type TrafficMetrics struct {
|
||||
Count int64 `json:"count"`
|
||||
TokensIn int64 `json:"tokens_in"`
|
||||
TokensOut int64 `json:"tokens_out"`
|
||||
// TrafficSeries contains the metrics for a model aligned to the shared time axis.
|
||||
type TrafficSeries struct {
|
||||
Name string `json:"name"`
|
||||
Data []int64 `json:"data"`
|
||||
TokensIn []int64 `json:"tokens_in"`
|
||||
TokensOut []int64 `json:"tokens_out"`
|
||||
}
|
||||
|
||||
// ModelMetricsMap is a map from model name to TrafficMetrics.
|
||||
// Keys are model names (e.g. "gpt-4", "claude-3-opus") or "other" for aggregated remaining models.
|
||||
// Example: {"gpt-4": {"count": 10, "tokens_in": 1000, "tokens_out": 500}, "other": {"count": 3, "tokens_in": 200, "tokens_out": 100}}
|
||||
type ModelMetricsMap map[string]TrafficMetrics
|
||||
// TrafficTotals contains aggregated totals aligned to the shared time axis.
|
||||
type TrafficTotals struct {
|
||||
Data []int64 `json:"data"`
|
||||
TokensIn []int64 `json:"tokens_in"`
|
||||
TokensOut []int64 `json:"tokens_out"`
|
||||
}
|
||||
|
||||
// TrafficBucket represents one time bucket with model breakdown
|
||||
type TrafficBucket struct {
|
||||
Time string `json:"time"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
// Breakdown is a map from model name (e.g. "gpt-4", "claude-3-opus", "other") to its metrics
|
||||
Breakdown ModelMetricsMap `json:"breakdown"`
|
||||
Total TrafficMetrics `json:"total"`
|
||||
// TrafficChartAxis defines the shared time axis for chart data.
|
||||
type TrafficChartAxis struct {
|
||||
Labels []string `json:"labels"`
|
||||
Timestamps []int64 `json:"timestamps"`
|
||||
Totals TrafficTotals `json:"totals"`
|
||||
}
|
||||
|
||||
// TrafficChartResponse is the response for traffic chart API
|
||||
type TrafficChartResponse struct {
|
||||
Granularity string `json:"granularity"`
|
||||
Since int64 `json:"since"`
|
||||
Until int64 `json:"until"`
|
||||
Models []string `json:"models"`
|
||||
Buckets []TrafficBucket `json:"buckets"`
|
||||
Granularity string `json:"granularity"`
|
||||
Since int64 `json:"since"`
|
||||
Until int64 `json:"until"`
|
||||
X TrafficChartAxis `json:"x"`
|
||||
Series []TrafficSeries `json:"series"`
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -644,9 +645,121 @@ const (
|
||||
maxTrafficTopN = 20
|
||||
)
|
||||
|
||||
type trafficBucketRow struct {
|
||||
Bucket time.Time
|
||||
ModelName string
|
||||
Cnt int64
|
||||
TokensIn int64
|
||||
TokensOut int64
|
||||
}
|
||||
|
||||
func buildTrafficChartSeriesResponse(rows []trafficBucketRow, topN int, granularity string, sinceTime, untilTime time.Time) TrafficChartResponse {
|
||||
bucketLabels := make(map[int64]string)
|
||||
bucketOrder := make([]int64, 0)
|
||||
for _, r := range rows {
|
||||
ts := r.Bucket.Unix()
|
||||
if _, exists := bucketLabels[ts]; !exists {
|
||||
bucketLabels[ts] = r.Bucket.UTC().Format(time.RFC3339)
|
||||
bucketOrder = append(bucketOrder, ts)
|
||||
}
|
||||
}
|
||||
|
||||
modelCounts := make(map[string]int64)
|
||||
for _, r := range rows {
|
||||
modelCounts[r.ModelName] += r.Cnt
|
||||
}
|
||||
|
||||
type modelCount struct {
|
||||
name string
|
||||
count int64
|
||||
}
|
||||
modelList := make([]modelCount, 0, len(modelCounts))
|
||||
for name, cnt := range modelCounts {
|
||||
modelList = append(modelList, modelCount{name, cnt})
|
||||
}
|
||||
for i := 0; i < len(modelList)-1; i++ {
|
||||
for j := i + 1; j < len(modelList); j++ {
|
||||
if modelList[j].count > modelList[i].count {
|
||||
modelList[i], modelList[j] = modelList[j], modelList[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
topModels := make(map[string]bool, topN)
|
||||
seriesNames := make([]string, 0, topN+1)
|
||||
for i := 0; i < len(modelList) && i < topN; i++ {
|
||||
topModels[modelList[i].name] = true
|
||||
seriesNames = append(seriesNames, modelList[i].name)
|
||||
}
|
||||
if len(modelList) > topN {
|
||||
seriesNames = append(seriesNames, "other")
|
||||
}
|
||||
|
||||
bucketIndex := make(map[int64]int, len(bucketOrder))
|
||||
labels := make([]string, len(bucketOrder))
|
||||
timestamps := make([]int64, len(bucketOrder))
|
||||
for i, ts := range bucketOrder {
|
||||
bucketIndex[ts] = i
|
||||
labels[i] = bucketLabels[ts]
|
||||
timestamps[i] = ts
|
||||
}
|
||||
|
||||
series := make([]TrafficSeries, len(seriesNames))
|
||||
seriesIndex := make(map[string]int, len(seriesNames))
|
||||
for i, name := range seriesNames {
|
||||
series[i] = TrafficSeries{
|
||||
Name: name,
|
||||
Data: make([]int64, len(bucketOrder)),
|
||||
TokensIn: make([]int64, len(bucketOrder)),
|
||||
TokensOut: make([]int64, len(bucketOrder)),
|
||||
}
|
||||
seriesIndex[name] = i
|
||||
}
|
||||
|
||||
totals := TrafficTotals{
|
||||
Data: make([]int64, len(bucketOrder)),
|
||||
TokensIn: make([]int64, len(bucketOrder)),
|
||||
TokensOut: make([]int64, len(bucketOrder)),
|
||||
}
|
||||
|
||||
for _, r := range rows {
|
||||
ts := r.Bucket.Unix()
|
||||
idx, ok := bucketIndex[ts]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
modelKey := r.ModelName
|
||||
if !topModels[modelKey] {
|
||||
modelKey = "other"
|
||||
}
|
||||
if seriesIdx, exists := seriesIndex[modelKey]; exists {
|
||||
series[seriesIdx].Data[idx] += r.Cnt
|
||||
series[seriesIdx].TokensIn[idx] += r.TokensIn
|
||||
series[seriesIdx].TokensOut[idx] += r.TokensOut
|
||||
}
|
||||
|
||||
totals.Data[idx] += r.Cnt
|
||||
totals.TokensIn[idx] += r.TokensIn
|
||||
totals.TokensOut[idx] += r.TokensOut
|
||||
}
|
||||
|
||||
return TrafficChartResponse{
|
||||
Granularity: granularity,
|
||||
Since: sinceTime.Unix(),
|
||||
Until: untilTime.Unix(),
|
||||
X: TrafficChartAxis{
|
||||
Labels: labels,
|
||||
Timestamps: timestamps,
|
||||
Totals: totals,
|
||||
},
|
||||
Series: series,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTrafficChart godoc
|
||||
// @Summary Traffic chart data (admin)
|
||||
// @Description Get time × model aggregated data for stacked traffic charts. Returns time buckets with per-model breakdown. The 'breakdown' field in each bucket is a map where keys are model names (e.g. "gpt-4", "claude-3-opus") and values are TrafficMetrics objects. Models outside top_n are aggregated under the key "other". Example: {"gpt-4": {"count": 10, "tokens_in": 1000, "tokens_out": 500}, "other": {"count": 3, "tokens_in": 200, "tokens_out": 100}}
|
||||
// @Description Get time × model aggregated data for stacked traffic charts. Returns a shared time axis under `x` and per-model series arrays aligned to that axis. Models outside top_n are aggregated under the series name "other".
|
||||
// @Tags admin
|
||||
// @Produce json
|
||||
// @Security AdminAuth
|
||||
@@ -731,14 +844,7 @@ func (h *Handler) GetTrafficChart(c *gin.Context) {
|
||||
truncFunc = "DATE_TRUNC('hour', created_at)"
|
||||
}
|
||||
|
||||
type bucketModelStats struct {
|
||||
Bucket time.Time
|
||||
ModelName string
|
||||
Cnt int64
|
||||
TokensIn int64
|
||||
TokensOut int64
|
||||
}
|
||||
var rows []bucketModelStats
|
||||
var rows []trafficBucketRow
|
||||
|
||||
if err := q.Select(truncFunc + " as bucket, model_name, COUNT(*) as cnt, COALESCE(SUM(tokens_in),0) as tokens_in, COALESCE(SUM(tokens_out),0) as tokens_out").
|
||||
Group("bucket, model_name").
|
||||
@@ -748,88 +854,7 @@ func (h *Handler) GetTrafficChart(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate global model counts for top_n selection
|
||||
modelCounts := make(map[string]int64)
|
||||
for _, r := range rows {
|
||||
modelCounts[r.ModelName] += r.Cnt
|
||||
}
|
||||
|
||||
// Get top N models
|
||||
type modelCount struct {
|
||||
name string
|
||||
count int64
|
||||
}
|
||||
var modelList []modelCount
|
||||
for name, cnt := range modelCounts {
|
||||
modelList = append(modelList, modelCount{name, cnt})
|
||||
}
|
||||
// Sort by count descending
|
||||
for i := 0; i < len(modelList)-1; i++ {
|
||||
for j := i + 1; j < len(modelList); j++ {
|
||||
if modelList[j].count > modelList[i].count {
|
||||
modelList[i], modelList[j] = modelList[j], modelList[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
topModels := make(map[string]bool)
|
||||
var modelNames []string
|
||||
for i := 0; i < len(modelList) && i < topN; i++ {
|
||||
topModels[modelList[i].name] = true
|
||||
modelNames = append(modelNames, modelList[i].name)
|
||||
}
|
||||
hasOther := len(modelList) > topN
|
||||
if hasOther {
|
||||
modelNames = append(modelNames, "other")
|
||||
}
|
||||
|
||||
// Build buckets with breakdown
|
||||
bucketMap := make(map[int64]*TrafficBucket)
|
||||
var bucketOrder []int64
|
||||
|
||||
for _, r := range rows {
|
||||
ts := r.Bucket.Unix()
|
||||
bucket, exists := bucketMap[ts]
|
||||
if !exists {
|
||||
bucket = &TrafficBucket{
|
||||
Time: r.Bucket.UTC().Format(time.RFC3339),
|
||||
Timestamp: ts,
|
||||
Breakdown: make(map[string]TrafficMetrics),
|
||||
}
|
||||
bucketMap[ts] = bucket
|
||||
bucketOrder = append(bucketOrder, ts)
|
||||
}
|
||||
|
||||
modelKey := r.ModelName
|
||||
if !topModels[modelKey] {
|
||||
modelKey = "other"
|
||||
}
|
||||
|
||||
existing := bucket.Breakdown[modelKey]
|
||||
bucket.Breakdown[modelKey] = TrafficMetrics{
|
||||
Count: existing.Count + r.Cnt,
|
||||
TokensIn: existing.TokensIn + r.TokensIn,
|
||||
TokensOut: existing.TokensOut + r.TokensOut,
|
||||
}
|
||||
|
||||
bucket.Total.Count += r.Cnt
|
||||
bucket.Total.TokensIn += r.TokensIn
|
||||
bucket.Total.TokensOut += r.TokensOut
|
||||
}
|
||||
|
||||
// Build response buckets in order
|
||||
buckets := make([]TrafficBucket, 0, len(bucketOrder))
|
||||
for _, ts := range bucketOrder {
|
||||
buckets = append(buckets, *bucketMap[ts])
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, TrafficChartResponse{
|
||||
Granularity: granularity,
|
||||
Since: sinceTime.Unix(),
|
||||
Until: untilTime.Unix(),
|
||||
Models: modelNames,
|
||||
Buckets: buckets,
|
||||
})
|
||||
c.JSON(http.StatusOK, buildTrafficChartSeriesResponse(rows, topN, granularity, sinceTime, untilTime))
|
||||
}
|
||||
|
||||
// ListSelfLogs godoc
|
||||
|
||||
@@ -355,6 +355,83 @@ func TestLogStats_DefaultBehavior(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTrafficChartSeriesResponse(t *testing.T) {
|
||||
bucket1 := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
bucket2 := time.Date(2025, 1, 1, 1, 0, 0, 0, time.UTC)
|
||||
|
||||
rows := []trafficBucketRow{
|
||||
{Bucket: bucket1, ModelName: "a", Cnt: 5, TokensIn: 10, TokensOut: 20},
|
||||
{Bucket: bucket1, ModelName: "b", Cnt: 3, TokensIn: 6, TokensOut: 12},
|
||||
{Bucket: bucket2, ModelName: "a", Cnt: 2, TokensIn: 4, TokensOut: 8},
|
||||
{Bucket: bucket2, ModelName: "c", Cnt: 8, TokensIn: 16, TokensOut: 32},
|
||||
}
|
||||
|
||||
resp := buildTrafficChartSeriesResponse(rows, 2, "hour", bucket1, bucket2)
|
||||
|
||||
if len(resp.X.Labels) != 2 || len(resp.X.Timestamps) != 2 {
|
||||
t.Fatalf("expected 2 time buckets, got labels=%d timestamps=%d", len(resp.X.Labels), len(resp.X.Timestamps))
|
||||
}
|
||||
if resp.X.Labels[0] != bucket1.Format(time.RFC3339) || resp.X.Labels[1] != bucket2.Format(time.RFC3339) {
|
||||
t.Fatalf("unexpected labels: %+v", resp.X.Labels)
|
||||
}
|
||||
if resp.X.Timestamps[0] != bucket1.Unix() || resp.X.Timestamps[1] != bucket2.Unix() {
|
||||
t.Fatalf("unexpected timestamps: %+v", resp.X.Timestamps)
|
||||
}
|
||||
if resp.X.Totals.Data[0] != 8 || resp.X.Totals.Data[1] != 10 {
|
||||
t.Fatalf("unexpected totals data: %+v", resp.X.Totals.Data)
|
||||
}
|
||||
if resp.X.Totals.TokensIn[0] != 16 || resp.X.Totals.TokensIn[1] != 20 {
|
||||
t.Fatalf("unexpected totals tokens_in: %+v", resp.X.Totals.TokensIn)
|
||||
}
|
||||
if resp.X.Totals.TokensOut[0] != 32 || resp.X.Totals.TokensOut[1] != 40 {
|
||||
t.Fatalf("unexpected totals tokens_out: %+v", resp.X.Totals.TokensOut)
|
||||
}
|
||||
|
||||
seriesByName := make(map[string]TrafficSeries, len(resp.Series))
|
||||
for _, s := range resp.Series {
|
||||
seriesByName[s.Name] = s
|
||||
}
|
||||
|
||||
for _, name := range []string{"a", "c", "other"} {
|
||||
if _, ok := seriesByName[name]; !ok {
|
||||
t.Fatalf("missing series %q", name)
|
||||
}
|
||||
}
|
||||
|
||||
aSeries := seriesByName["a"]
|
||||
if aSeries.Data[0] != 5 || aSeries.Data[1] != 2 {
|
||||
t.Fatalf("unexpected series a data: %+v", aSeries.Data)
|
||||
}
|
||||
if aSeries.TokensIn[0] != 10 || aSeries.TokensIn[1] != 4 {
|
||||
t.Fatalf("unexpected series a tokens_in: %+v", aSeries.TokensIn)
|
||||
}
|
||||
if aSeries.TokensOut[0] != 20 || aSeries.TokensOut[1] != 8 {
|
||||
t.Fatalf("unexpected series a tokens_out: %+v", aSeries.TokensOut)
|
||||
}
|
||||
|
||||
cSeries := seriesByName["c"]
|
||||
if cSeries.Data[0] != 0 || cSeries.Data[1] != 8 {
|
||||
t.Fatalf("unexpected series c data: %+v", cSeries.Data)
|
||||
}
|
||||
if cSeries.TokensIn[0] != 0 || cSeries.TokensIn[1] != 16 {
|
||||
t.Fatalf("unexpected series c tokens_in: %+v", cSeries.TokensIn)
|
||||
}
|
||||
if cSeries.TokensOut[0] != 0 || cSeries.TokensOut[1] != 32 {
|
||||
t.Fatalf("unexpected series c tokens_out: %+v", cSeries.TokensOut)
|
||||
}
|
||||
|
||||
otherSeries := seriesByName["other"]
|
||||
if otherSeries.Data[0] != 3 || otherSeries.Data[1] != 0 {
|
||||
t.Fatalf("unexpected series other data: %+v", otherSeries.Data)
|
||||
}
|
||||
if otherSeries.TokensIn[0] != 6 || otherSeries.TokensIn[1] != 0 {
|
||||
t.Fatalf("unexpected series other tokens_in: %+v", otherSeries.TokensIn)
|
||||
}
|
||||
if otherSeries.TokensOut[0] != 12 || otherSeries.TokensOut[1] != 0 {
|
||||
t.Fatalf("unexpected series other tokens_out: %+v", otherSeries.TokensOut)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTrafficChart_TopNOtherAggregation(t *testing.T) {
|
||||
// Skip test when running with SQLite (no DATE_TRUNC support)
|
||||
// This test requires PostgreSQL for time truncation functions
|
||||
|
||||
Reference in New Issue
Block a user