feat(auth): enhance security with token hashing and sync integration

- Add token hash fields to Master and Key models for indexed lookups
- Implement SyncService integration in admin and master handlers
- Update master key validation with backward-compatible digest lookup
- Hash child keys in database and store token digests for Redis sync
- Add master metadata sync to Redis for balancer validation
- Ensure backward compatibility with legacy rows during migration
This commit is contained in:
zenfun
2025-12-05 00:17:22 +08:00
parent 8645b22b83
commit 25e5e105b3
7 changed files with 123 additions and 41 deletions

View File

@@ -77,8 +77,8 @@ func main() {
healthService := service.NewHealthCheckService(db, rdb)
handler := api.NewHandler(db, syncService, logWriter)
adminHandler := api.NewAdminHandler(masterService)
masterHandler := api.NewMasterHandler(masterService)
adminHandler := api.NewAdminHandler(masterService, syncService)
masterHandler := api.NewMasterHandler(masterService, syncService)
// 4.1 Prime Redis snapshots so DP can start with data
if err := syncService.SyncAll(db); err != nil {

View File

@@ -9,10 +9,11 @@ import (
type AdminHandler struct {
masterService *service.MasterService
syncService *service.SyncService
}
func NewAdminHandler(masterService *service.MasterService) *AdminHandler {
return &AdminHandler{masterService: masterService}
func NewAdminHandler(masterService *service.MasterService, syncService *service.SyncService) *AdminHandler {
return &AdminHandler{masterService: masterService, syncService: syncService}
}
type CreateMasterRequest struct {
@@ -43,6 +44,11 @@ func (h *AdminHandler) CreateMaster(c *gin.Context) {
return
}
if err := h.syncService.SyncMaster(master); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to sync master key", "details": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{
"id": master.ID,
"name": master.Name,

View File

@@ -11,10 +11,11 @@ import (
type MasterHandler struct {
masterService *service.MasterService
syncService *service.SyncService
}
func NewMasterHandler(masterService *service.MasterService) *MasterHandler {
return &MasterHandler{masterService: masterService}
func NewMasterHandler(masterService *service.MasterService, syncService *service.SyncService) *MasterHandler {
return &MasterHandler{masterService: masterService, syncService: syncService}
}
type IssueChildKeyRequest struct {
@@ -55,6 +56,11 @@ func (h *MasterHandler) IssueChildKey(c *gin.Context) {
return
}
if err := h.syncService.SyncKey(key); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to sync child key", "details": err.Error()})
return
}
c.JSON(http.StatusCreated, gin.H{
"id": key.ID,
"key_secret": rawChildKey,

View File

@@ -10,9 +10,10 @@ import (
type Master struct {
gorm.Model
Name string `gorm:"size:255" json:"name"`
MasterKey string `gorm:"size:255;uniqueIndex" json:"-"` // Hashed master key
Group string `gorm:"size:100;default:'default'" json:"group"`
Epoch int64 `gorm:"default:1" json:"epoch"`
MasterKey string `gorm:"size:255" json:"-"` // bcrypt hash of master key
MasterKeyDigest string `gorm:"size:64;uniqueIndex" json:"-"` // sha256 digest for lookup
Group string `gorm:"size:100;default:'default'" json:"group"` // routing group
Epoch int64 `gorm:"default:1" json:"epoch"` // used for revocation/rotation
Status string `gorm:"size:50;default:'active'" json:"status"` // active, suspended
MaxChildKeys int `gorm:"default:5" json:"max_child_keys"`
GlobalQPS int `gorm:"default:3" json:"global_qps"`
@@ -22,10 +23,11 @@ type Master struct {
type Key struct {
gorm.Model
MasterID uint `gorm:"not null;index" json:"master_id"`
KeySecret string `gorm:"size:255;uniqueIndex" json:"key_secret"`
Group string `gorm:"size:100;default:'default'" json:"group"`
KeySecret string `gorm:"size:255;column:key_secret" json:"-"` // bcrypt hash of child key
TokenHash string `gorm:"size:64;uniqueIndex" json:"token_hash"` // sha256 digest of child key
Group string `gorm:"size:100;default:'default'" json:"group"` // routing group
Scopes string `gorm:"size:1024" json:"scopes"` // Comma-separated scopes
IssuedAtEpoch int64 `gorm:"not null" json:"issued_at_epoch"`
IssuedAtEpoch int64 `gorm:"not null" json:"issued_at_epoch"` // copy of master epoch at issuance
Status string `gorm:"size:50;default:'active'" json:"status"` // active, suspended
}

View File

@@ -5,8 +5,10 @@ import (
"encoding/hex"
"errors"
"fmt"
"strings"
"github.com/ez-api/ez-api/internal/model"
"github.com/ez-api/ez-api/internal/util"
"golang.org/x/crypto/bcrypt"
"gorm.io/gorm"
)
@@ -30,9 +32,12 @@ func (s *MasterService) CreateMaster(name, group string, maxChildKeys, globalQPS
return nil, "", fmt.Errorf("failed to hash master key: %w", err)
}
masterKeyDigest := util.HashToken(rawMasterKey)
master := &model.Master{
Name: name,
MasterKey: string(hashedMasterKey),
MasterKeyDigest: masterKeyDigest,
Group: group,
MaxChildKeys: maxChildKeys,
GlobalQPS: globalQPS,
@@ -48,20 +53,42 @@ func (s *MasterService) CreateMaster(name, group string, maxChildKeys, globalQPS
}
func (s *MasterService) ValidateMasterKey(masterKey string) (*model.Master, error) {
// This is inefficient. We should query by a hash or an indexed field.
// For now, we iterate. In a real system, this needs optimization.
var masters []model.Master
if err := s.db.Find(&masters).Error; err != nil {
digest := util.HashToken(masterKey)
var master model.Master
if err := s.db.Where("master_key_digest = ?", digest).First(&master).Error; err != nil {
if !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, err
}
for _, master := range masters {
if bcrypt.CompareHashAndPassword([]byte(master.MasterKey), []byte(masterKey)) == nil {
return &master, nil
// Backward compatibility: look for legacy rows without digest.
var masters []model.Master
if err := s.db.Where("master_key_digest = '' OR master_key_digest IS NULL").Find(&masters).Error; err != nil {
return nil, err
}
for _, m := range masters {
if bcrypt.CompareHashAndPassword([]byte(m.MasterKey), []byte(masterKey)) == nil {
master = m
// Opportunistically backfill digest for next time.
if strings.TrimSpace(m.MasterKeyDigest) == "" {
_ = s.db.Model(&m).Update("master_key_digest", digest).Error
}
goto verified
}
}
return nil, errors.New("invalid master key")
}
if bcrypt.CompareHashAndPassword([]byte(master.MasterKey), []byte(masterKey)) != nil {
return nil, errors.New("invalid master key")
}
verified:
if master.Status != "active" {
return nil, fmt.Errorf("master is not active")
}
return &master, nil
}
func (s *MasterService) IssueChildKey(masterID uint, group string, scopes string) (*model.Key, string, error) {
@@ -81,9 +108,17 @@ func (s *MasterService) IssueChildKey(masterID uint, group string, scopes string
return nil, "", fmt.Errorf("failed to generate child key: %w", err)
}
tokenHash := util.HashToken(rawChildKey)
hashedChildKey, err := bcrypt.GenerateFromPassword([]byte(rawChildKey), bcrypt.DefaultCost)
if err != nil {
return nil, "", fmt.Errorf("failed to hash child key: %w", err)
}
key := &model.Key{
MasterID: masterID,
KeySecret: rawChildKey, // In a real system, this should also be hashed
KeySecret: string(hashedChildKey),
TokenHash: tokenHash,
Group: group,
Scopes: scopes,
IssuedAtEpoch: master.Epoch,

View File

@@ -23,7 +23,13 @@ func NewSyncService(rdb *redis.Client) *SyncService {
// SyncKey writes a single key into Redis without rebuilding the entire snapshot.
func (s *SyncService) SyncKey(key *model.Key) error {
ctx := context.Background()
tokenHash := util.HashToken(key.KeySecret)
tokenHash := key.TokenHash
if strings.TrimSpace(tokenHash) == "" {
tokenHash = util.HashToken(key.KeySecret) // backward compatibility
}
if strings.TrimSpace(tokenHash) == "" {
return fmt.Errorf("token hash missing for key %d", key.ID)
}
fields := map[string]interface{}{
"master_id": key.MasterID,
@@ -38,6 +44,20 @@ func (s *SyncService) SyncKey(key *model.Key) error {
return nil
}
// SyncMaster writes master metadata into Redis used by the balancer for validation.
func (s *SyncService) SyncMaster(master *model.Master) error {
ctx := context.Background()
key := fmt.Sprintf("auth:master:%d", master.ID)
if err := s.rdb.HSet(ctx, key, map[string]interface{}{
"epoch": master.Epoch,
"status": master.Status,
"global_qps": master.GlobalQPS,
}).Err(); err != nil {
return fmt.Errorf("write master metadata: %w", err)
}
return nil
}
// SyncProvider writes a single provider into Redis hash storage and updates routing tables.
func (s *SyncService) SyncProvider(provider *model.Provider) error {
ctx := context.Background()
@@ -187,7 +207,13 @@ func (s *SyncService) SyncAll(db *gorm.DB) error {
}
for _, k := range keys {
tokenHash := util.HashToken(k.KeySecret)
tokenHash := strings.TrimSpace(k.TokenHash)
if tokenHash == "" {
tokenHash = util.HashToken(k.KeySecret) // fallback for legacy rows
}
if tokenHash == "" {
return fmt.Errorf("token hash missing for key %d", k.ID)
}
pipe.HSet(ctx, fmt.Sprintf("auth:token:%s", tokenHash), map[string]interface{}{
"master_id": k.MasterID,
"issued_at_epoch": k.IssuedAtEpoch,

View File

@@ -49,11 +49,18 @@ func (s *TokenService) ValidateToken(ctx context.Context, token string) (*TokenI
// 2. Get master metadata from Redis
masterKey := fmt.Sprintf("auth:master:%d", masterID)
masterEpochStr, err := s.rdb.HGet(ctx, masterKey, "epoch").Result()
masterData, err := s.rdb.HGetAll(ctx, masterKey).Result()
if err != nil {
return nil, fmt.Errorf("failed to get master epoch: %w", err)
return nil, fmt.Errorf("failed to get master metadata: %w", err)
}
masterEpoch, _ := strconv.ParseInt(masterEpochStr, 10, 64)
if len(masterData) == 0 {
return nil, errors.New("master metadata not found")
}
masterStatus := masterData["status"]
if masterStatus != "" && masterStatus != "active" {
return nil, errors.New("master is not active")
}
masterEpoch, _ := strconv.ParseInt(masterData["epoch"], 10, 64)
// 3. Core Epoch Validation
if issuedAtEpoch < masterEpoch {