Compare commits

...

2 Commits

Author SHA1 Message Date
79427c9f4c 0430
All checks were successful
Build docker and publish / build (20.15.1) (push) Successful in 5m47s
2026-04-29 12:49:45 -07:00
bcefb274ab perf(server): cache speed limit calculations
All checks were successful
Build docker and publish / build (20.15.1) (push) Successful in 5m37s
2026-04-29 01:37:59 -07:00
6 changed files with 240 additions and 21 deletions

View File

@ -49,12 +49,12 @@ jobs:
if [ "${{ github.ref_name }}" = "main" ]; then
echo "DOCKER_TAG_SUFFIX=latest" >> $GITHUB_ENV
echo "CONTAINER_NAME=ppanel-server" >> $GITHUB_ENV
echo "DEPLOY_PATH=/root/bindbox" >> $GITHUB_ENV
echo "DEPLOY_PATH=/root/hifast" >> $GITHUB_ENV
echo "为 main 分支设置生产环境变量"
elif [ "${{ github.ref_name }}" = "internal" ]; then
echo "DOCKER_TAG_SUFFIX=internal" >> $GITHUB_ENV
echo "CONTAINER_NAME=ppanel-server-internal" >> $GITHUB_ENV
echo "DEPLOY_PATH=/root/bindbox" >> $GITHUB_ENV
echo "DEPLOY_PATH=/root/hifast" >> $GITHUB_ENV
echo "为 internal 分支设置开发环境变量"
else
echo "DOCKER_TAG_SUFFIX=${{ github.ref_name }}" >> $GITHUB_ENV

View File

@ -0,0 +1,152 @@
# MySQL 8.0 master/replica compose for two separate servers.
#
# Master server:
# COMPOSE_PROFILES=master docker compose -f config/docker-compose.mysql-replication.yml up -d
#
# Replica server:
# MASTER_HOST=<master_public_or_private_ip> COMPOSE_PROFILES=replica docker compose -f config/docker-compose.mysql-replication.yml up -d
#
# Required env on both servers:
# MYSQL_ROOT_PASSWORD=<strong-root-password>
# MYSQL_REPLICATION_PASSWORD=<strong-replication-password>
#
# Optional env:
# MYSQL_DATABASE=ppanel
# MYSQL_REPLICATION_USER=repl
# MYSQL_MASTER_PORT=3306
# MYSQL_REPLICA_PORT=3306
# MYSQL_SERVER_ID=1 # master default
# MYSQL_REPLICA_ID=2 # replica default
#
# If the master already has data, import a GTID-aware dump into the replica
# before starting replication. Fresh empty deployments can start master first,
# then replica, then point the application at the master.
services:
mysql-master:
image: mysql:8.0
container_name: ppanel-mysql-master
profiles:
- master
restart: always
ports:
- "${MYSQL_MASTER_PORT:-3306}:3306"
environment:
MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD:?please set MYSQL_ROOT_PASSWORD}"
MYSQL_DATABASE: "${MYSQL_DATABASE:-ppanel}"
MYSQL_REPLICATION_USER: "${MYSQL_REPLICATION_USER:-repl}"
MYSQL_REPLICATION_PASSWORD: "${MYSQL_REPLICATION_PASSWORD:?please set MYSQL_REPLICATION_PASSWORD}"
TZ: Asia/Shanghai
command:
- --default-authentication-plugin=mysql_native_password
- --server-id=${MYSQL_SERVER_ID:-1}
- --log-bin=mysql-bin
- --binlog-format=ROW
- --gtid-mode=ON
- --enforce-gtid-consistency=ON
- --log-replica-updates=ON
- --binlog-expire-logs-seconds=604800
- --max_connections=1000
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
volumes:
- mysql_master_data:/var/lib/mysql
configs:
- source: mysql_master_init
target: /docker-entrypoint-initdb.d/01-create-replication-user.sh
mode: 0755
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -uroot -p$${MYSQL_ROOT_PASSWORD}"]
interval: 10s
timeout: 5s
retries: 10
logging:
driver: json-file
options:
max-size: 10m
max-file: "3"
mysql-replica:
image: mysql:8.0
container_name: ppanel-mysql-replica
profiles:
- replica
restart: always
ports:
- "${MYSQL_REPLICA_PORT:-3306}:3306"
environment:
MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD:?please set MYSQL_ROOT_PASSWORD}"
MYSQL_DATABASE: "${MYSQL_DATABASE:-ppanel}"
TZ: Asia/Shanghai
command:
- --default-authentication-plugin=mysql_native_password
- --server-id=${MYSQL_REPLICA_ID:-2}
- --relay-log=mysql-relay-bin
- --read-only=ON
- --super-read-only=ON
- --gtid-mode=ON
- --enforce-gtid-consistency=ON
- --log-replica-updates=ON
- --binlog-format=ROW
- --max_connections=1000
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
volumes:
- mysql_replica_data:/var/lib/mysql
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -uroot -p$${MYSQL_ROOT_PASSWORD}"]
interval: 10s
timeout: 5s
retries: 10
logging:
driver: json-file
options:
max-size: 10m
max-file: "3"
mysql-replica-init:
image: mysql:8.0
container_name: ppanel-mysql-replica-init
profiles:
- replica
restart: "no"
depends_on:
mysql-replica:
condition: service_healthy
environment:
MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD:?please set MYSQL_ROOT_PASSWORD}"
MYSQL_REPLICATION_USER: "${MYSQL_REPLICATION_USER:-repl}"
MYSQL_REPLICATION_PASSWORD: "${MYSQL_REPLICATION_PASSWORD:?please set MYSQL_REPLICATION_PASSWORD}"
MASTER_HOST: "${MASTER_HOST:?please set MASTER_HOST to the master server ip or hostname}"
MASTER_PORT: "${MASTER_PORT:-3306}"
entrypoint:
- /bin/sh
- -ec
- |
mysql -hmysql-replica -uroot -p"$${MYSQL_ROOT_PASSWORD}" <<SQL
STOP REPLICA;
CHANGE REPLICATION SOURCE TO
SOURCE_HOST='$${MASTER_HOST}',
SOURCE_PORT=$${MASTER_PORT},
SOURCE_USER='$${MYSQL_REPLICATION_USER}',
SOURCE_PASSWORD='$${MYSQL_REPLICATION_PASSWORD}',
SOURCE_AUTO_POSITION=1,
GET_SOURCE_PUBLIC_KEY=1;
START REPLICA;
SQL
configs:
mysql_master_init:
content: |
#!/bin/sh
set -eu
mysql -uroot -p"$${MYSQL_ROOT_PASSWORD}" <<SQL
CREATE USER IF NOT EXISTS '$${MYSQL_REPLICATION_USER}'@'%' IDENTIFIED WITH mysql_native_password BY '$${MYSQL_REPLICATION_PASSWORD}';
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO '$${MYSQL_REPLICATION_USER}'@'%';
FLUSH PRIVILEGES;
SQL
volumes:
mysql_master_data:
mysql_replica_data:

View File

@ -27,7 +27,7 @@ services:
volumes:
- ./configs:/app/etc
- ./logs:/app/logs
- ./cache:/app/cache # GeoLite2-City.mmdb IP 地理位置数据库
- ./cache:/app/cache # GeoLite2-City.mmdb IP 地理位置数据库
environment:
- TZ=Asia/Shanghai
network_mode: host
@ -57,7 +57,7 @@ services:
container_name: ppanel-mysql
restart: always
ports:
- "3306:3306" # 仅宿主机可访问ppanel-server(host网络)通过127.0.0.1连接
- "3306:3306" # 仅宿主机可访问ppanel-server(host网络)通过127.0.0.1连接
environment:
MYSQL_ROOT_PASSWORD: "${MYSQL_ROOT_PASSWORD:?请在 .env 文件中设置 MYSQL_ROOT_PASSWORD}"
MYSQL_DATABASE: "ppanel"
@ -80,7 +80,7 @@ services:
networks:
- ppanel_net
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-uroot", "-p${MYSQL_ROOT_PASSWORD}"]
test: [ "CMD", "mysqladmin", "ping", "-h", "localhost", "-uroot", "-p${MYSQL_ROOT_PASSWORD}" ]
interval: 10s
timeout: 5s
retries: 5
@ -98,11 +98,12 @@ services:
container_name: ppanel-redis
restart: always
ports:
- "127.0.0.1:6379:6379" # 仅宿主机可访问ppanel-server(host网络)通过127.0.0.1连接
- "127.0.0.1:6379:6379" # 仅宿主机可访问ppanel-server(host网络)通过127.0.0.1连接
command:
- redis-server
- --tcp-backlog 65535
- --maxmemory-policy allkeys-lru
- --requirepass hifast67yj
volumes:
- redis_data:/data
ulimits:
@ -113,7 +114,7 @@ services:
networks:
- ppanel_net
healthcheck:
test: ["CMD", "redis-cli", "ping"]
test: [ "CMD", "redis-cli", "ping" ]
interval: 10s
timeout: 5s
retries: 5
@ -138,7 +139,7 @@ services:
- ./tempo/tempo-config.yaml:/etc/tempo.yaml
- ./tempo_data:/var/tempo
ports:
- "127.0.0.1:4317:4317" # OTLP gRPCppanel-server(host网络)通过127.0.0.1:4317发送trace
- "127.0.0.1:4317:4317" # OTLP gRPCppanel-server(host网络)通过127.0.0.1:4317发送trace
networks:
- ppanel_net
logging:
@ -201,7 +202,7 @@ services:
container_name: ppanel-grafana
restart: always
ports:
- "127.0.0.1:3333:3000" # 仅本机可访问,需 SSH 隧道或 Nginx 反代
- "3333:3000" # 仅本机可访问,需 SSH 隧道或 Nginx 反代
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:?请在 .env 文件中设置 GRAFANA_PASSWORD}
- GF_USERS_ALLOW_SIGN_UP=false
@ -229,7 +230,7 @@ services:
container_name: ppanel-prometheus
restart: always
ports:
- "127.0.0.1:9090:9090" # 仅本机可访问
- "127.0.0.1:9090:9090" # 仅本机可访问
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus

View File

@ -137,7 +137,7 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR
return nil, err
}
}
if len(subs) == 0 {
return &types.GetServerUserListResponse{
Users: []types.ServerUser{
@ -194,7 +194,7 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR
val, _ := json.Marshal(resp)
etag := tool.GenerateETag(val)
l.ctx.Header("ETag", etag)
err = l.svcCtx.Redis.Set(l.ctx, cacheKey, string(val), -1).Err()
err = l.svcCtx.Redis.Set(l.ctx, cacheKey, string(val), l.serverUserListCacheTTL()).Err()
if err != nil {
l.Errorw("[ServerUserListCacheKey] redis set error", logger.Field("error", err.Error()))
}
@ -205,6 +205,18 @@ func (l *GetServerUserListLogic) GetServerUserList(req *types.GetServerUserListR
return resp, nil
}
func (l *GetServerUserListLogic) serverUserListCacheTTL() time.Duration {
pullInterval := l.svcCtx.Config.Node.NodePullInterval
if pullInterval <= 0 {
pullInterval = 60
}
ttl := time.Duration(pullInterval*2) * time.Second
if ttl < time.Minute {
return time.Minute
}
return ttl
}
func (l *GetServerUserListLogic) shouldIncludeServerUser(userSub *user.Subscribe, serverNodeGroupIds []int64) bool {
if userSub == nil {
return false
@ -295,6 +307,15 @@ func (l *GetServerUserListLogic) canUseExpiredNodeGroup(userSub *user.Subscribe,
// calculateEffectiveSpeedLimit 计算用户的实际限速值(考虑按量限速规则)
func (l *GetServerUserListLogic) calculateEffectiveSpeedLimit(sub *subscribe.Subscribe, userSub *user.Subscribe) int64 {
result := speedlimit.Calculate(l.ctx.Request.Context(), l.svcCtx.DB, userSub.UserId, userSub.Id, sub.SpeedLimit, sub.TrafficLimit)
result := speedlimit.CalculateWithCache(
l.ctx.Request.Context(),
l.svcCtx.Redis,
l.svcCtx.DB,
userSub.UserId,
userSub.Id,
sub.SpeedLimit,
sub.TrafficLimit,
30*time.Second,
)
return result.EffectiveSpeed
}

View File

@ -2,10 +2,13 @@ package speedlimit
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"time"
"github.com/redis/go-redis/v9"
"gorm.io/gorm"
)
@ -19,13 +22,44 @@ type TrafficLimitRule struct {
// ThrottleResult contains the computed speed limit status for a user subscription.
type ThrottleResult struct {
BaseSpeed int64 `json:"base_speed"` // Plan base speed limit (Mbps, 0=unlimited)
EffectiveSpeed int64 `json:"effective_speed"` // Current effective speed limit (Mbps)
IsThrottled bool `json:"is_throttled"` // Whether the user is currently throttled
ThrottleRule string `json:"throttle_rule"` // Description of the matched rule (empty if not throttled)
UsedTrafficGB float64 `json:"used_traffic_gb"` // Traffic used in the matched rule's window (GB)
ThrottleStart int64 `json:"throttle_start"` // Window start Unix timestamp (seconds), 0 if not throttled
ThrottleEnd int64 `json:"throttle_end"` // Window end Unix timestamp (seconds), 0 if not throttled
BaseSpeed int64 `json:"base_speed"` // Plan base speed limit (Mbps, 0=unlimited)
EffectiveSpeed int64 `json:"effective_speed"` // Current effective speed limit (Mbps)
IsThrottled bool `json:"is_throttled"` // Whether the user is currently throttled
ThrottleRule string `json:"throttle_rule"` // Description of the matched rule (empty if not throttled)
UsedTrafficGB float64 `json:"used_traffic_gb"` // Traffic used in the matched rule's window (GB)
ThrottleStart int64 `json:"throttle_start"` // Window start Unix timestamp (seconds), 0 if not throttled
ThrottleEnd int64 `json:"throttle_end"` // Window end Unix timestamp (seconds), 0 if not throttled
}
// CalculateWithCache computes the effective speed limit with a short Redis cache.
// It is intended for hot read paths such as node user-list pulls where many nodes
// can ask for the same subscription limits in a short period.
func CalculateWithCache(ctx context.Context, cache *redis.Client, db *gorm.DB, userId, subscribeId, baseSpeedLimit int64, trafficLimitJSON string, ttl time.Duration) *ThrottleResult {
if cache == nil || ttl <= 0 || trafficLimitJSON == "" {
return Calculate(ctx, db, userId, subscribeId, baseSpeedLimit, trafficLimitJSON)
}
key := cacheKey(userId, subscribeId, baseSpeedLimit, trafficLimitJSON)
if cached, err := cache.Get(ctx, key).Result(); err == nil && cached != "" {
var result ThrottleResult
if err := json.Unmarshal([]byte(cached), &result); err == nil {
return &result
}
}
result := Calculate(ctx, db, userId, subscribeId, baseSpeedLimit, trafficLimitJSON)
if payload, err := json.Marshal(result); err == nil {
_ = cache.Set(ctx, key, string(payload), ttl).Err()
}
return result
}
// ClearCache removes a cached speed-limit calculation for a user subscription.
func ClearCache(ctx context.Context, cache *redis.Client, userId, subscribeId, baseSpeedLimit int64, trafficLimitJSON string) error {
if cache == nil || trafficLimitJSON == "" {
return nil
}
return cache.Del(ctx, cacheKey(userId, subscribeId, baseSpeedLimit, trafficLimitJSON)).Err()
}
// Calculate computes the effective speed limit for a user subscription,
@ -107,3 +141,8 @@ func Calculate(ctx context.Context, db *gorm.DB, userId, subscribeId, baseSpeedL
return result
}
func cacheKey(userId, subscribeId, baseSpeedLimit int64, trafficLimitJSON string) string {
sum := sha256.Sum256([]byte(trafficLimitJSON))
return fmt.Sprintf("speedlimit:%d:%d:%d:%s", userId, subscribeId, baseSpeedLimit, hex.EncodeToString(sum[:8]))
}

View File

@ -131,9 +131,15 @@ func (l *TrafficStatisticsLogic) ProcessTask(ctx context.Context, task *asynq.Ta
// 写完流量后检查是否触发按量限速,若触发则清除节点缓存使限速立即生效
if planSub, planErr := l.svc.SubscribeModel.FindOne(ctx, sub.SubscribeId); planErr == nil &&
(planSub.SpeedLimit > 0 || planSub.TrafficLimit != "") {
planSub.TrafficLimit != "" {
throttle := speedlimit.Calculate(ctx, l.svc.DB, sub.UserId, sub.Id, planSub.SpeedLimit, planSub.TrafficLimit)
if throttle.IsThrottled {
if delErr := speedlimit.ClearCache(ctx, l.svc.Redis, sub.UserId, sub.Id, planSub.SpeedLimit, planSub.TrafficLimit); delErr != nil {
logger.WithContext(ctx).Error("[TrafficStatistics] Clear speed limit cache failed",
logger.Field("subscribeId", sub.Id),
logger.Field("error", delErr.Error()),
)
}
cacheKey := fmt.Sprintf("%s%d", node.ServerUserListCacheKey, payload.ServerId)
if delErr := l.svc.Redis.Del(ctx, cacheKey).Err(); delErr != nil {
logger.WithContext(ctx).Error("[TrafficStatistics] Clear server user cache failed",