mirror of
https://github.com/EZ-Api/ez-api.git
synced 2026-01-13 17:47:51 +00:00
feat(auth): enhance security with token hashing and sync integration
- Add token hash fields to Master and Key models for indexed lookups - Implement SyncService integration in admin and master handlers - Update master key validation with backward-compatible digest lookup - Hash child keys in database and store token digests for Redis sync - Add master metadata sync to Redis for balancer validation - Ensure backward compatibility with legacy rows during migration
This commit is contained in:
@@ -5,8 +5,10 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ez-api/ez-api/internal/model"
|
||||
"github.com/ez-api/ez-api/internal/util"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -30,14 +32,17 @@ func (s *MasterService) CreateMaster(name, group string, maxChildKeys, globalQPS
|
||||
return nil, "", fmt.Errorf("failed to hash master key: %w", err)
|
||||
}
|
||||
|
||||
masterKeyDigest := util.HashToken(rawMasterKey)
|
||||
|
||||
master := &model.Master{
|
||||
Name: name,
|
||||
MasterKey: string(hashedMasterKey),
|
||||
Group: group,
|
||||
MaxChildKeys: maxChildKeys,
|
||||
GlobalQPS: globalQPS,
|
||||
Status: "active",
|
||||
Epoch: 1,
|
||||
Name: name,
|
||||
MasterKey: string(hashedMasterKey),
|
||||
MasterKeyDigest: masterKeyDigest,
|
||||
Group: group,
|
||||
MaxChildKeys: maxChildKeys,
|
||||
GlobalQPS: globalQPS,
|
||||
Status: "active",
|
||||
Epoch: 1,
|
||||
}
|
||||
|
||||
if err := s.db.Create(master).Error; err != nil {
|
||||
@@ -48,20 +53,42 @@ func (s *MasterService) CreateMaster(name, group string, maxChildKeys, globalQPS
|
||||
}
|
||||
|
||||
func (s *MasterService) ValidateMasterKey(masterKey string) (*model.Master, error) {
|
||||
// This is inefficient. We should query by a hash or an indexed field.
|
||||
// For now, we iterate. In a real system, this needs optimization.
|
||||
var masters []model.Master
|
||||
if err := s.db.Find(&masters).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digest := util.HashToken(masterKey)
|
||||
|
||||
for _, master := range masters {
|
||||
if bcrypt.CompareHashAndPassword([]byte(master.MasterKey), []byte(masterKey)) == nil {
|
||||
return &master, nil
|
||||
var master model.Master
|
||||
if err := s.db.Where("master_key_digest = ?", digest).First(&master).Error; err != nil {
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Backward compatibility: look for legacy rows without digest.
|
||||
var masters []model.Master
|
||||
if err := s.db.Where("master_key_digest = '' OR master_key_digest IS NULL").Find(&masters).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range masters {
|
||||
if bcrypt.CompareHashAndPassword([]byte(m.MasterKey), []byte(masterKey)) == nil {
|
||||
master = m
|
||||
// Opportunistically backfill digest for next time.
|
||||
if strings.TrimSpace(m.MasterKeyDigest) == "" {
|
||||
_ = s.db.Model(&m).Update("master_key_digest", digest).Error
|
||||
}
|
||||
goto verified
|
||||
}
|
||||
}
|
||||
return nil, errors.New("invalid master key")
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid master key")
|
||||
if bcrypt.CompareHashAndPassword([]byte(master.MasterKey), []byte(masterKey)) != nil {
|
||||
return nil, errors.New("invalid master key")
|
||||
}
|
||||
|
||||
verified:
|
||||
if master.Status != "active" {
|
||||
return nil, fmt.Errorf("master is not active")
|
||||
}
|
||||
|
||||
return &master, nil
|
||||
}
|
||||
|
||||
func (s *MasterService) IssueChildKey(masterID uint, group string, scopes string) (*model.Key, string, error) {
|
||||
@@ -81,9 +108,17 @@ func (s *MasterService) IssueChildKey(masterID uint, group string, scopes string
|
||||
return nil, "", fmt.Errorf("failed to generate child key: %w", err)
|
||||
}
|
||||
|
||||
tokenHash := util.HashToken(rawChildKey)
|
||||
|
||||
hashedChildKey, err := bcrypt.GenerateFromPassword([]byte(rawChildKey), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("failed to hash child key: %w", err)
|
||||
}
|
||||
|
||||
key := &model.Key{
|
||||
MasterID: masterID,
|
||||
KeySecret: rawChildKey, // In a real system, this should also be hashed
|
||||
KeySecret: string(hashedChildKey),
|
||||
TokenHash: tokenHash,
|
||||
Group: group,
|
||||
Scopes: scopes,
|
||||
IssuedAtEpoch: master.Epoch,
|
||||
|
||||
@@ -23,7 +23,13 @@ func NewSyncService(rdb *redis.Client) *SyncService {
|
||||
// SyncKey writes a single key into Redis without rebuilding the entire snapshot.
|
||||
func (s *SyncService) SyncKey(key *model.Key) error {
|
||||
ctx := context.Background()
|
||||
tokenHash := util.HashToken(key.KeySecret)
|
||||
tokenHash := key.TokenHash
|
||||
if strings.TrimSpace(tokenHash) == "" {
|
||||
tokenHash = util.HashToken(key.KeySecret) // backward compatibility
|
||||
}
|
||||
if strings.TrimSpace(tokenHash) == "" {
|
||||
return fmt.Errorf("token hash missing for key %d", key.ID)
|
||||
}
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"master_id": key.MasterID,
|
||||
@@ -38,6 +44,20 @@ func (s *SyncService) SyncKey(key *model.Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncMaster writes master metadata into Redis used by the balancer for validation.
|
||||
func (s *SyncService) SyncMaster(master *model.Master) error {
|
||||
ctx := context.Background()
|
||||
key := fmt.Sprintf("auth:master:%d", master.ID)
|
||||
if err := s.rdb.HSet(ctx, key, map[string]interface{}{
|
||||
"epoch": master.Epoch,
|
||||
"status": master.Status,
|
||||
"global_qps": master.GlobalQPS,
|
||||
}).Err(); err != nil {
|
||||
return fmt.Errorf("write master metadata: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncProvider writes a single provider into Redis hash storage and updates routing tables.
|
||||
func (s *SyncService) SyncProvider(provider *model.Provider) error {
|
||||
ctx := context.Background()
|
||||
@@ -187,7 +207,13 @@ func (s *SyncService) SyncAll(db *gorm.DB) error {
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
tokenHash := util.HashToken(k.KeySecret)
|
||||
tokenHash := strings.TrimSpace(k.TokenHash)
|
||||
if tokenHash == "" {
|
||||
tokenHash = util.HashToken(k.KeySecret) // fallback for legacy rows
|
||||
}
|
||||
if tokenHash == "" {
|
||||
return fmt.Errorf("token hash missing for key %d", k.ID)
|
||||
}
|
||||
pipe.HSet(ctx, fmt.Sprintf("auth:token:%s", tokenHash), map[string]interface{}{
|
||||
"master_id": k.MasterID,
|
||||
"issued_at_epoch": k.IssuedAtEpoch,
|
||||
|
||||
@@ -49,11 +49,18 @@ func (s *TokenService) ValidateToken(ctx context.Context, token string) (*TokenI
|
||||
|
||||
// 2. Get master metadata from Redis
|
||||
masterKey := fmt.Sprintf("auth:master:%d", masterID)
|
||||
masterEpochStr, err := s.rdb.HGet(ctx, masterKey, "epoch").Result()
|
||||
masterData, err := s.rdb.HGetAll(ctx, masterKey).Result()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get master epoch: %w", err)
|
||||
return nil, fmt.Errorf("failed to get master metadata: %w", err)
|
||||
}
|
||||
masterEpoch, _ := strconv.ParseInt(masterEpochStr, 10, 64)
|
||||
if len(masterData) == 0 {
|
||||
return nil, errors.New("master metadata not found")
|
||||
}
|
||||
masterStatus := masterData["status"]
|
||||
if masterStatus != "" && masterStatus != "active" {
|
||||
return nil, errors.New("master is not active")
|
||||
}
|
||||
masterEpoch, _ := strconv.ParseInt(masterData["epoch"], 10, 64)
|
||||
|
||||
// 3. Core Epoch Validation
|
||||
if issuedAtEpoch < masterEpoch {
|
||||
|
||||
Reference in New Issue
Block a user