95 Commits

Author SHA1 Message Date
yun 7ab9ac48c2 Redis库升级到V9 2025-10-04 22:00:08 +08:00
yun 793a8da1af 调整一下任务的关闭 2025-10-04 21:33:57 +08:00
yun 62e0d03c9e 上报执行情况 2025-10-04 20:48:08 +08:00
yun 14eb90bf7d 修改集群模式使用封装的方法 2025-10-04 20:44:16 +08:00
yun 81ce4f67d3 添加停止需要释放的资源 2025-10-04 19:00:44 +08:00
yun 737eef2157 优化时间的筛选 2025-10-04 18:51:22 +08:00
yun 2d6e77352f 本地定时任务 2025-10-02 17:54:38 +08:00
yun 304d27e0ac 去掉一个没有用到的库 2025-10-02 16:44:02 +08:00
yun fffe60f975 修改测试文件 2025-09-28 22:59:38 +08:00
yun 62bdf4fcd2 优化部分错误 2025-09-28 22:57:15 +08:00
yun e14305f66c 优化本地运行的定时任务日志打印 2025-09-28 19:49:23 +08:00
Yun 16a392a266 更新 2025-09-24 21:07:06 +08:00
Yun 970a1ca33c 添加全局锁 2025-09-24 17:53:28 +08:00
Yun 84569dc290 添加注释 2025-09-24 17:29:11 +08:00
Yun 85d041753e once使用封装的leader和heartbeat 2025-09-24 17:26:33 +08:00
Yun 0c4e92f164 修改判断的条件 2025-09-24 15:06:33 +08:00
Yun 0be8fd0cdc 调整部分options 2025-09-24 14:50:30 +08:00
Yun 062a39b209 添加一下.gitignore 2025-09-23 11:20:38 +08:00
Yun c50530d4bb 上报添加traceid 2025-09-22 13:51:28 +08:00
Yun e4c453baca 引入一下日志库 2025-09-19 18:52:33 +08:00
Yun 4db3cf81b7 每次执行任务需要上报&调整任务日志打印 2025-09-19 18:43:21 +08:00
Yun f79be3955f 添加心跳相关内容 2025-09-18 21:18:35 +08:00
Yun 549eee700e 时间间隔的需要整块,避免走偏 2025-09-18 16:23:49 +08:00
Yun db1735de30 集群需要判断能否执行reader 2025-09-18 16:23:04 +08:00
Yun a0ec69b7a2 优化策略 2025-09-18 15:34:01 +08:00
Yun 44eeb8468d 优化集群相关代码 2025-09-18 09:56:24 +08:00
Yun 0cff7af265 next_time已经测试完成 2025-09-17 17:09:20 +08:00
yun 28359fbf23 修改部分备注 2025-09-14 23:47:40 +08:00
yun e62cacf1df 更新 2025-09-14 22:44:11 +08:00
yun 464b467868 优化了本地定时器+下次的判断 2025-09-14 19:05:10 +08:00
Yun c351cb084f 优化调整单次任务的执行 2025-08-28 17:45:17 +08:00
Yun 503cabdcbf 优化策略的使用 2025-08-27 15:52:09 +08:00
Yun d39a8b14ee 更新 2025-08-27 14:21:20 +08:00
Yun ddea445188 单次执行的任务类型定义方法 2025-08-27 14:13:17 +08:00
Yun ddc5cd8cb1 Merge branch 'dev' of github.com:yuninks/timerx into dev 2025-08-23 19:00:24 +08:00
Yun bb5b01071a 去掉单例模式,改为每次初始化均生成一个新的实例 2025-08-23 18:55:59 +08:00
yun 2fa0430403 优化优先级策略在各集群中的使用 2025-07-25 22:58:45 +08:00
Yun 2a7092ab0a 调整测试代码 2025-07-24 17:21:11 +08:00
Yun 037d8cf107 调整日志和封装优先级代码 2025-07-24 17:13:17 +08:00
Yun ecaac58926 集群定时器添加优先级 2025-06-11 15:12:08 +08:00
Yun 3716f97eaf 添加一些TODO 2025-05-30 14:16:59 +08:00
Yun c14d65c46a 处理没有traceid的BUG 2025-04-02 20:26:27 +08:00
Yun b5e3b6088b 优化日志 2024-10-15 18:15:09 +08:00
Yun ca0d5a1b99 修复一个单次的BUG 2024-10-11 16:17:22 +08:00
Yun 1ac53f7688 修改附加内容的字段为字节 2024-10-10 10:05:28 +08:00
Yun 4ee3c5e1c2 添加只执行一次的 2024-10-09 19:37:13 +08:00
Yun fa8e3737fa 执行一次的调试 2024-10-09 17:03:54 +08:00
yun cf3e751afe 部分代码尚未完全 2024-07-02 21:18:16 +08:00
Yun 79fda8c78c 添加超时时间的限制 2024-06-22 15:34:49 +08:00
Yun 5ca5b31efb 创建任务不需要验证全局锁 2024-06-19 16:31:51 +08:00
Yun 25b5008af9 修改测试脚本 2024-05-31 13:55:05 +08:00
Yun 3719c417fb 优化传参 2024-05-31 13:05:51 +08:00
Yun ec41fd80a8 修改方法名 2024-05-31 09:52:10 +08:00
Yun c61b82587b 优化关于时区的问题 2024-05-30 11:02:44 +08:00
Yun 6df89da568 修改一些日志提示 2024-05-28 17:36:18 +08:00
Yun 2cc97438b4 回调的ctx添加trace_id 2024-05-28 17:28:20 +08:00
Yun e070933e41 Merge branch 'dev' 2024-05-28 15:03:17 +08:00
Yun bdd0ee714b 优化追加次数 2024-05-28 15:02:57 +08:00
Yun 7863062ad9 Merge commit 'b98a4211162a6e455abbebd29c299d482071e221' 2024-05-28 13:55:34 +08:00
Yun b98a421116 处理消息重入 2024-05-27 20:28:11 +08:00
yun 1382830432 优化once 2024-05-25 20:42:32 +08:00
yun f9c86cb16a 关于Context的优化 2024-05-24 09:55:34 +08:00
yun c9a77c6f38 优化redis的使用 2024-05-23 23:24:18 +08:00
yun 4d769cf997 优化&测试 2024-05-22 15:02:39 +08:00
yun 7912bbc56c 大版本更新 2024-05-20 09:35:12 +08:00
yun 0921514339 添加一些思考的描述 2024-05-18 21:54:26 +08:00
yun ea2ad5b189 添加一些test 2024-05-18 21:54:07 +08:00
yun f46e8e220a 一些优化 2024-05-08 13:01:55 +08:00
yun c3121f425e 修改go.mod为github 2024-05-07 20:26:13 +08:00
yun 170275be3b 优化日志 2024-04-06 19:09:58 +08:00
yun 88056ee8e9 修改mod 2024-04-04 21:57:26 +08:00
yun 4d07ce2c09 优化集群定时器的逻辑 2024-04-04 10:58:57 +08:00
yun 43d2798b41 优化定时器的表述 2023-12-27 17:19:52 +08:00
yun 1beafa934c 优化全局单次定时器的冲突问题 2023-12-27 17:11:33 +08:00
yun 362d1f455a 稳定版本 2023-11-27 22:37:33 +08:00
yun fbb74cdd6d 优化部分逻辑 2023-11-13 23:49:42 +08:00
yun 319d6b6db1 修改公共部分 2023-11-11 17:30:54 +08:00
Administrator 3cc3f0400b 更新.gitlab-ci.yml文件 2023-11-11 09:17:40 +00:00
Administrator 5793afbab7 更新.gitlab-ci.yml文件 2023-11-11 09:13:59 +00:00
Administrator 30093d0717 更新.gitlab-ci.yml文件 2023-11-11 09:10:31 +00:00
Administrator a4a0d86d74 更新.gitlab-ci.yml文件 2023-11-11 09:07:37 +00:00
Administrator 80bd6b4327 更新.gitlab-ci.yml文件 2023-11-11 09:03:01 +00:00
Administrator b9444c8bb6 更新.gitlab-ci.yml文件 2023-11-11 08:58:53 +00:00
Administrator 0dbd1ee9c2 更新.gitlab-ci.yml文件 2023-11-11 08:54:10 +00:00
Administrator aabce29211 更新.gitlab-ci.yml文件 2023-11-11 08:52:18 +00:00
Administrator 7475f9cd3b 更新.gitlab-ci.yml文件 2023-11-11 08:50:10 +00:00
Administrator fabf7f65d7 更新.gitlab-ci.yml文件 2023-11-11 08:47:42 +00:00
Administrator e6915aa766 更新.gitlab-ci.yml文件 2023-11-11 08:40:26 +00:00
Administrator de4a9c8f31 更新.gitlab-ci.yml文件 2023-11-11 08:36:44 +00:00
Administrator 356f843747 更新.gitlab-ci.yml文件 2023-11-11 08:34:32 +00:00
Administrator 52ed316cd1 更新.gitlab-ci.yml文件 2023-11-11 08:28:53 +00:00
Administrator 778bf75650 更新.gitlab-ci.yml文件 2023-11-11 08:16:43 +00:00
Administrator a87fee1f38 更新.gitlab-ci.yml文件 2023-11-11 08:15:06 +00:00
Administrator 1a70738e6d 更新.gitlab-ci.yml文件 2023-11-11 07:02:29 +00:00
yun c929d1a57d 提交 2023-11-10 23:43:32 +08:00
32 changed files with 4867 additions and 910 deletions
+5
View File
@@ -0,0 +1,5 @@
log
cache
test.txt
+24 -25
View File
@@ -1,26 +1,25 @@
# You can override the included template(s) by including variable overrides
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings
# Note that environment variables can be set in several places
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
stages:
- build
- test
- deploy
- review
- dast
- staging
- canary
- production
- incremental rollout 10%
- incremental rollout 25%
- incremental rollout 50%
- incremental rollout 100%
- performance
- cleanup
sast:
stage: test
include:
- template: Auto-DevOps.gitlab-ci.yml
- deploy
before_script:
- 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
- mkdir -p ~/.ssh
- chmod 700 ~/.ssh
- '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config'
deploy_job:
stage: deploy
script:
- git remote remove github || true
- git remote add github git@github.com:yun-ink/timerx.git
- git remote -v
- git checkout master
- git fsck --full
- git prune
- git gc --prune=now --aggressive
- git push -u github master -f
only:
- master
- tags
+501 -202
View File
@@ -1,269 +1,568 @@
package timer
package timerx
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"runtime/debug"
"strconv"
"sync"
"time"
"code.yun.ink/open/timer/lockx"
"github.com/go-redis/redis/v8"
"github.com/redis/go-redis/v9"
"github.com/google/uuid"
"github.com/yuninks/cachex"
"github.com/yuninks/lockx"
"github.com/yuninks/timerx/heartbeat"
"github.com/yuninks/timerx/leader"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
// 单例模式
var clusterOnceLimit sync.Once
// 功能描述
// 已注册的任务列表
var clusterWorkerList sync.Map
// 这是基于Redis的定时任务调度器,能够有效的在服务集群里面调度任务,避免了单点压力过高或单点故障问题
// 由于所有的服务代码是一致的,也就是一个定时任务将在所有的服务都有注册,具体调度到哪个服务运行看调度结果
type cluster struct {
ctx context.Context
redis *redis.Client
lockKey string // 全局计算的key
nextKey string // 下一次执行的key
zsetKey string // 有序集合的key
listKey string // 可执行的任务列表的key
type Cluster struct {
ctx context.Context // context
cancel context.CancelFunc // 取消函数
redis redis.UniversalClient // redis
timeout time.Duration // job执行超时时间
logger logger.Logger // 日志
keyPrefix string // key前缀
location *time.Location // 根据时区计算的时间
lockKey string // 全局计算的key
zsetKey string // 有序集合的key
listKey string // 可执行的任务列表的key
setKey string // 重入集合的key
executeInfoKey string // 执行情况的key
wg sync.WaitGroup // 等待组
workerList sync.Map // 注册的任务列表
stopChan chan struct{} //
instanceId string // 实例ID
priority *priority.Priority // 全局优先级
priorityKey string // 全局优先级的key
usePriority bool // 是否使用优先级
leader *leader.Leader // Leader
heartbeat *heartbeat.HeartBeat // 心跳
cache *cachex.Cache // 本地缓存
}
var clu *cluster = nil
// 初始化定时器
// 全局只需要初始化一次
func InitCluster(ctx context.Context, red redis.UniversalClient, keyPrefix string, opts ...Option) (*Cluster, error) {
func InitCluster(ctx context.Context, red *redis.Client) *cluster {
clusterOnceLimit.Do(func() {
clu = &cluster{
ctx: ctx,
redis: red,
lockKey: "timer:cluster_globalLockKey",
nextKey: "timer:cluster_nextKey",
zsetKey: "timer:cluster_zsetKey",
listKey: "timer:cluster_listKey",
}
// 监听任务
go clu.watch()
timer := time.NewTicker(time.Millisecond * 200)
go func(ctx context.Context, red *redis.Client) {
Loop:
for {
select {
case <-timer.C:
clu.getTask()
clu.getNextTime()
case <-ctx.Done():
break Loop
}
}
}(ctx, red)
})
return clu
}
func (c *cluster) AddTimer(ctx context.Context, uniqueKey string, spaceTime time.Duration, callback callback, extend ExtendParams) error {
_, ok := clusterWorkerList.Load(uniqueKey)
if ok {
return errors.New("key已存在")
if red == nil {
return nil, errors.New("redis is nil")
}
if spaceTime != spaceTime.Abs() {
return errors.New("时间间隔不能为负数")
}
op := newOptions(opts...)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
lock := lockx.NewGlobalLock(ctx, c.redis, uniqueKey)
tB := lock.Try(10)
if !tB {
return errors.New("添加失败")
U, _ := uuid.NewV7()
clu := &Cluster{
ctx: ctx,
cancel: cancel,
redis: red,
cache: cachex.NewCache(),
timeout: op.timeout,
logger: op.logger,
keyPrefix: keyPrefix,
location: op.location,
lockKey: "timer:cluster_globalLockKey" + keyPrefix, // 定时器的全局锁
zsetKey: "timer:cluster_zsetKey" + keyPrefix, // 有序集合
listKey: "timer:cluster_listKey" + keyPrefix, // 列表
setKey: "timer:cluster_setKey" + keyPrefix, // 重入集合
priorityKey: "timer:cluster_priorityKey" + keyPrefix, // 全局优先级的key
executeInfoKey: "timer:cluster_executeInfoKey" + keyPrefix, // 执行情况的key 有序集合
usePriority: op.usePriority,
stopChan: make(chan struct{}),
instanceId: U.String(),
}
defer lock.Unlock()
nowTime := time.Now()
// 初始化优先级
if clu.usePriority {
pri, err := priority.InitPriority(
ctx,
red,
clu.priorityKey,
op.priorityVal,
priority.WithLogger(clu.logger),
priority.WithInstanceId(clu.instanceId),
priority.WithSource("cluster"),
)
if err != nil {
clu.logger.Errorf(ctx, "InitPriority err:%v", err)
return nil, err
}
clu.priority = pri
}
// 初始化leader
le, err := leader.InitLeader(
ctx,
clu.redis,
keyPrefix,
leader.WithLogger(clu.logger),
leader.WithPriority(clu.priority),
leader.WithInstanceId(clu.instanceId),
leader.WithSource("cluster"),
)
if err != nil {
clu.logger.Infof(ctx, "InitLeader err:%v", err)
return nil, err
}
clu.leader = le
// 初始化心跳
heart, err := heartbeat.InitHeartBeat(
ctx,
clu.redis,
clu.keyPrefix,
heartbeat.WithInstanceId(clu.instanceId),
heartbeat.WithLeader(clu.leader),
heartbeat.WithLogger(clu.logger),
heartbeat.WithPriority(clu.priority),
heartbeat.WithSource("once"),
)
if err != nil {
clu.logger.Errorf(ctx, "InitHeartBeat err:%v", err)
return nil, err
}
clu.heartbeat = heart
// 启动守护进程
clu.startDaemon()
clu.logger.Infof(ctx, "InitCluster success keyPrefix:%s instanceId:%s", clu.keyPrefix, clu.instanceId)
return clu, nil
}
// Stop 停止集群定时器
func (l *Cluster) Stop() {
close(l.stopChan)
if l.usePriority && l.priority != nil {
l.priority.Close()
}
if l.leader != nil {
l.leader.Close()
}
if l.heartbeat != nil {
l.heartbeat.Close()
}
if l.cancel != nil {
l.cancel()
}
l.wg.Wait()
}
// 守护任务
func (l *Cluster) startDaemon() {
// 任务调度
l.wg.Add(1)
go l.scheduleTasks()
// 任务执行
l.wg.Add(1)
go l.executeTasks()
l.wg.Add(1)
go l.cleanExecuteInfoLoop()
}
func (l *Cluster) cleanExecuteInfoLoop() {
l.wg.Done()
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for {
select {
case <-l.stopChan:
return
case <-l.ctx.Done():
return
case <-ticker.C:
if l.leader.IsLeader() {
l.cleanExecuteInfo()
}
}
}
}
// 清除过期任务
func (l *Cluster) cleanExecuteInfo() error {
// 移除执行信息
l.redis.ZRemRangeByScore(l.ctx, l.executeInfoKey, "0", strconv.FormatInt(time.Now().Add(-15*time.Minute).UnixMilli(), 10)).Err()
return nil
}
// scheduleTasks 调度任务(只有leader执行)
func (c *Cluster) scheduleTasks() {
defer c.wg.Done()
ticker := time.NewTicker(200 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if !c.leader.IsLeader() {
continue
}
if c.usePriority && !c.priority.IsLatest(c.ctx) {
continue
}
c.calculateNextTimes()
c.moveReadyTasks()
case <-c.stopChan:
return
case <-c.ctx.Done():
return
}
}
}
// 每月执行一次
// @param ctx 上下文
// @param taskId 任务ID
// @param day 每月的几号
// @param hour 小时
// @param minute 分钟
// @param second 秒
// @param callback 回调函数
// @param extendData 扩展数据
// @return error
func (c *Cluster) EveryMonth(ctx context.Context, taskId string, day int, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryMonth,
// CreateTime: nowTime,
Day: day,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 每周执行一次
// @param ctx context.Context 上下文
// @param taskId string 任务ID
// @param week time.Weekday 周
// @param hour int 小时
// @param minute int 分钟
// @param second int 秒
func (c *Cluster) EveryWeek(ctx context.Context, taskId string, week time.Weekday, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryWeek,
// CreateTime: nowTime,
Weekday: week,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 每天执行一次
func (c *Cluster) EveryDay(ctx context.Context, taskId string, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryDay,
// CreateTime: nowTime,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 每小时执行一次
func (c *Cluster) EveryHour(ctx context.Context, taskId string, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryHour,
// CreateTime: nowTime,
Minute: minute,
Second: second,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 每分钟执行一次
func (c *Cluster) EveryMinute(ctx context.Context, taskId string, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryMinute,
// CreateTime: nowTime,
Second: second,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 特定时间间隔
func (c *Cluster) EverySpace(ctx context.Context, taskId string, spaceTime time.Duration, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
nowTime := time.Now().In(c.location)
if spaceTime < 0 {
c.logger.Errorf(ctx, "间隔时间不能小于0")
return errors.New("间隔时间不能小于0")
}
// 获取当天的零点时间
zeroTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), 0, 0, 0, 0, nowTime.Location())
jobData := JobData{
JobType: JobTypeInterval,
BaseTime: zeroTime, // 默认当天的零点
IntervalTime: spaceTime,
}
return c.addJob(ctx, taskId, jobData, callback, extendData)
}
// 统一添加任务
// @param ctx context.Context 上下文
// @param taskId string 任务ID
// @param jobData *JobData 任务数据
// @param callback callback 回调函数
// @param extendData interface{} 扩展数据
// @return error
func (l *Cluster) addJob(ctx context.Context, taskId string, jobData JobData, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
// 判断是否重复
_, ok := l.workerList.Load(taskId)
if ok {
l.logger.Errorf(ctx, "Cluster addJob taskId exits:%s", taskId)
return ErrTaskIdExists
}
// 校验时间是否合法
_, err := GetNextTime(time.Now().In(l.location), jobData)
if err != nil {
l.logger.Errorf(ctx, "Cluster addJob GetNextTime err:%s", err.Error())
return err
}
t := timerStr{
BeginTime: nowTime,
NextTime: nowTime,
SpaceTime: spaceTime,
Callback: callback,
Extend: extend,
UniqueKey: uniqueKey,
Callback: callback,
ExtendData: extendData,
TaskId: taskId,
JobData: &jobData,
}
clusterWorkerList.Store(uniqueKey, t)
l.workerList.Store(taskId, t)
cacheStr, _ := c.redis.Get(ctx, c.nextKey).Result()
execTime := make(map[string]time.Time)
json.Unmarshal([]byte(cacheStr), &execTime)
l.logger.Infof(ctx, "Cluster addJob taskId:%s", taskId)
p := c.redis.Pipeline()
p.ZAdd(ctx, c.zsetKey, &redis.Z{
Score: float64(nextTime.UnixMilli()),
Member: uniqueKey,
})
execTime[uniqueKey] = nextTime
n, _ := json.Marshal(execTime)
// fmt.Println("execTime:", execTime, string(n))
p.Set(ctx, c.nextKey, string(n), 0)
_, err := p.Exec(ctx)
// fmt.Println("添加", err)
return err
return nil
}
// 计算下一次执行的时间
func (c *cluster) getNextTime() {
func (l *Cluster) calculateNextTimes() {
// log.Println("begin computer")
ctx, cancel := context.WithCancel(c.ctx)
defer cancel()
pipe := l.redis.Pipeline()
lock := lockx.NewGlobalLock(ctx, c.redis, c.lockKey)
// 获取锁
lockBool := lock.Lock()
if !lockBool {
// log.Println("timer:获取锁失败")
return
}
defer lock.Unlock()
// 计算下一次时间
// 读取执行的缓存
cacheStr, _ := c.redis.Get(ctx, c.nextKey).Result()
execTime := make(map[string]time.Time)
json.Unmarshal([]byte(cacheStr), &execTime)
p := c.redis.Pipeline()
nowTime := time.Now()
clusterWorkerList.Range(func(key, value interface{}) bool {
// 根据内部注册的任务列表计算下一次执行的时间
l.workerList.Range(func(key, value interface{}) bool {
val := value.(timerStr)
beforeTime := execTime[val.UniqueKey]
if beforeTime.After(nowTime) {
nextTime, err := GetNextTime(time.Now().In(l.location), *val.JobData)
if err != nil {
l.logger.Errorf(l.ctx, "Cluster calculateNextTimes GetNextTime err:%s %s", val.TaskId, err.Error())
return true
}
nextTime := getNextExecTime(beforeTime, val.SpaceTime)
execTime[val.UniqueKey] = nextTime
p.ZAdd(ctx, c.zsetKey, &redis.Z{
Score: float64(nextTime.UnixMilli()),
Member: val.UniqueKey,
})
// log.Println("computeTime add", c.zsetKey, val.UniqueKey, nextTime.UnixMilli())
// l.logger.Infof(l.ctx, "Cluster calculateNextTimes GetNextTime nextTime:%s %s", val.TaskId, nextTime.Format(time.RFC3339))
// 使用Lua脚本原子性添加任务
script := `
local zsetKey = KEYS[1]
local lockKey = KEYS[2]
local score = ARGV[1]
local taskID = ARGV[2]
local expireTime = ARGV[3]
-- 检查是否已存在
local existing = redis.call('zscore', zsetKey, taskID)
if existing and tonumber(existing) <= tonumber(score) then
return 0
end
-- 设置NX锁避免重复计算
local lockAcquired = redis.call('set', lockKey, 1, 'NX', 'EX', expireTime)
if not lockAcquired then
return 0
end
redis.call('zadd', zsetKey, score, taskID)
return 1
`
lockKey := fmt.Sprintf("%s_%s_%d", l.keyPrefix, val.TaskId, nextTime.UnixMilli())
_, err = pipe.Eval(l.ctx, script, []string{l.zsetKey, lockKey},
nextTime.UnixMilli(), val.TaskId, 60).Result()
if err != nil {
l.logger.Errorf(l.ctx, "Failed to schedule task: %v", err)
}
return true
})
// 更新缓存
b, _ := json.Marshal(execTime)
p.Set(ctx, c.nextKey, string(b), 0)
_, err := p.Exec(ctx)
_ = err
_, err := pipe.Exec(l.ctx)
if err != nil {
l.logger.Errorf(l.ctx, "Cluster Failed to schedule task: %v", err)
}
}
// 递归遍历获取执行时间
func getNextExecTime(beforeTime time.Time, spaceTime time.Duration) time.Time {
nowTime := time.Now()
if beforeTime.After(nowTime) {
return beforeTime
}
nextTime := beforeTime.Add(spaceTime)
if nextTime.Before(nowTime) {
nextTime = getNextExecTime(nextTime, spaceTime)
}
return nextTime
}
// 获取任务
func (c *cluster) getTask() {
// 定时去Redis获取任务
// zb := redis.ZRangeBy{
// Min: "0",
// Max: fmt.Sprintf("%+v", time.Now().UnixMilli()),
// }
// taskList, _ := c.redis.ZRangeByScore(c.ctx, c.zsetKey, &zb).Result()
// if len(taskList) == 0 {
// return
// }
// p := c.redis.Pipeline()
// for _, val := range taskList {
// // 添加到可执行队列
// p.LPush(c.ctx, c.listKey, val)
// // 删除有序集合
// p.ZRem(c.ctx, c.zsetKey, val)
// }
// _, err := p.Exec(c.ctx)
// // fmt.Println(err)
// _ = err
// moveReadyTasks 移动就绪任务到执行列表
func (c *Cluster) moveReadyTasks() {
script := `
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
for i,v in ipairs(token) do
redis.call('zrem',KEYS[1],v)
redis.call('lpush',KEYS[2],v)
local zsetKey = KEYS[1]
local listKey = KEYS[2]
local maxTime = ARGV[1]
local limit = ARGV[2]
local tasks = redis.call('zrangebyscore', zsetKey, 0, maxTime, 'LIMIT', 0, limit)
for i, taskID in ipairs(tasks) do
redis.call('zrem', zsetKey, taskID)
redis.call('lpush', listKey, taskID)
end
return "OK"
return #tasks
`
c.redis.Eval(c.ctx, script, []string{c.zsetKey, c.listKey}, 0, time.Now().UnixMilli()).Result()
result, err := c.redis.Eval(c.ctx, script, []string{c.zsetKey, c.listKey},
time.Now().UnixMilli(), 100).Result()
if err != nil && err != redis.Nil {
c.logger.Errorf(c.ctx, "Failed to move ready tasks: %v", err)
return
}
if count, ok := result.(int64); ok && count > 0 {
c.logger.Infof(c.ctx, "Cluster moveReadyTasks Moved %d tasks to ready list", count)
}
}
// executeTasks 执行任务
func (c *Cluster) executeTasks() {
defer c.wg.Done()
for {
select {
case <-c.stopChan:
return
case <-c.ctx.Done():
return
default:
if c.usePriority && !c.priority.IsLatest(c.ctx) {
time.Sleep(5 * time.Second)
continue
}
taskID, err := c.redis.BLPop(c.ctx, 10*time.Second, c.listKey).Result()
if err != nil {
if err != redis.Nil {
c.logger.Errorf(c.ctx, "Failed to pop task: %v", err)
// Redis 异常,休眠一会儿
time.Sleep(5 * time.Second)
}
continue
}
if len(taskID) < 2 {
continue
}
go c.processTask(taskID[1])
}
}
}
// 监听任务
func (c *cluster) watch() {
// 执行任务
for {
keys, err := c.redis.BLPop(c.ctx, time.Second*10, c.listKey).Result()
if err != nil {
fmt.Println("watch err:", err)
continue
}
go doTask(c.ctx, c.redis, keys[1])
}
type ReJobData struct {
TaskId string
Times int
}
// 执行任务
func doTask(ctx context.Context, red *redis.Client, taskId string) {
ctx, cancel := context.WithCancel(ctx)
func (l *Cluster) processTask(taskId string) {
begin := time.Now()
ctx, cancel := context.WithTimeout(l.ctx, l.timeout)
defer cancel()
defer func() {
if err := recover(); err != nil {
fmt.Println("timer:定时器出错", err)
log.Println("errStack", string(debug.Stack()))
}
}()
u, _ := uuid.NewV7()
val, ok := clusterWorkerList.Load(taskId)
ctx = context.WithValue(ctx, "trace_id", u.String())
l.logger.Infof(ctx, "doTask timer begin taskId:%s", taskId)
// 上报执行情况
executeVal := fmt.Sprintf("tid:%s|insId:%s|uuid:%s|time:%s", taskId, l.instanceId, u.String(), begin.Format(time.RFC3339Nano))
l.redis.ZAdd(ctx, l.executeInfoKey, redis.Z{
Score: float64(begin.UnixMilli()),
Member: executeVal,
})
val, ok := l.workerList.Load(taskId)
if !ok {
fmt.Println("doTask timer:任务不存在", taskId)
l.logger.Errorf(ctx, "doTask timer:任务不存在:%s", taskId)
return
}
t, ok := val.(timerStr)
if !ok {
l.logger.Errorf(ctx, "doTask timer:任务不存在:%s", taskId)
return
}
t := val.(timerStr)
// 这里加一个全局锁
lock := lockx.NewGlobalLock(ctx, red, taskId)
tB := lock.Lock()
if !tB {
fmt.Println("doTask timer:获取锁失败", taskId)
lock, err := lockx.NewGlobalLock(ctx, l.redis, taskId)
if err != nil {
l.logger.Errorf(ctx, "doTask timer:获取锁失败:%s", taskId)
return
}
if b, err := lock.Lock(); !b {
l.logger.Errorf(ctx, "doTask timer:获取锁失败:%s %+v", taskId, err)
return
}
defer lock.Unlock()
ctx = context.WithValue(ctx, extendParamKey, t.Extend)
defer func() {
if err := recover(); err != nil {
l.logger.Errorf(ctx, "doTask timer:回调任务panic err:%+v stack:%s", err, string(debug.Stack()))
}
l.logger.Infof(ctx, "doTask timer:执行任务耗时:%s %dms", taskId, time.Since(begin).Milliseconds())
}()
// 执行任务
t.Callback(ctx)
if err := t.Callback(ctx, t.ExtendData); err != nil {
l.logger.Errorf(ctx, "doTask timer:执行任务失败:%s %+v", taskId, err)
return
}
}
+184
View File
@@ -0,0 +1,184 @@
package timerx_test
import (
"context"
"fmt"
"testing"
"time"
"github.com/redis/go-redis/v9"
"github.com/yuninks/timerx"
)
func redisInit() *redis.Client {
return redis.NewClient(&redis.Options{
Addr: "localhost:6379",
Password: "123456",
DB: 0,
})
}
func TestCluster_AddEveryMonth(t *testing.T) {
ctx := context.Background()
redis := redisInit()
defer redis.Close()
cluster, err := timerx.InitCluster(ctx, redis, "test")
if err != nil {
t.Errorf("InitCluster failed, err: %v", err)
return
}
defer cluster.Stop()
taskId := "testTask"
hour := 2
minute := 3
second := 4
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err = cluster.EveryMonth(ctx, taskId, 1, hour, minute, second, callback, extendData)
if err != nil {
t.Errorf("AddEveryMonth failed, err: %v", err)
}
time.Sleep(time.Second * 10)
// TODO: verify the job is added to the cluster and can be executed at the specified time
}
func TestCluster_AddEveryWeek(t *testing.T) {
ctx := context.Background()
redis := redisInit()
defer redis.Close()
cluster, _ := timerx.InitCluster(ctx, redis, "test")
taskId := "testTask"
week := time.Sunday
hour := 2
minute := 3
second := 4
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err := cluster.EveryWeek(ctx, taskId, week, hour, minute, second, callback, extendData)
if err != nil {
t.Errorf("AddEveryWeek failed, err: %v", err)
}
// TODO: verify the job is added to the cluster and can be executed at the specified time
}
func TestCluster_AddEveryDay(t *testing.T) {
ctx := context.Background()
redis := redisInit()
defer redis.Close()
cluster, _ := timerx.InitCluster(ctx, redis, "test")
taskId := "testTask"
hour := 2
minute := 3
second := 4
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err := cluster.EveryDay(ctx, taskId, hour, minute, second, callback, extendData)
if err != nil {
t.Errorf("AddEveryDay failed, err: %v", err)
}
// TODO: verify the job is added to the cluster and can be executed at the specified time
}
func TestCluster_AddEveryHour(t *testing.T) {
ctx := context.Background()
redis := redisInit()
defer redis.Close()
cluster, _ := timerx.InitCluster(ctx, redis, "test")
taskId := "testTask"
minute := 3
second := 4
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err := cluster.EveryHour(ctx, taskId, minute, second, callback, extendData)
if err != nil {
t.Errorf("AddEveryHour failed, err: %v", err)
}
// TODO: verify the job is added to the cluster and can be executed at the specified time
}
func TestCluster_AddEveryMinute(t *testing.T) {
ctx := context.Background()
redis := redisInit()
defer redis.Close()
cluster, _ := timerx.InitCluster(ctx, redis, "test")
taskId := "testTask"
second := 4
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err := cluster.EveryMinute(ctx, taskId, second, callback, extendData)
if err != nil {
t.Errorf("AddEveryMinute failed, err: %v", err)
}
// TODO: verify the job is added to the cluster and can be executed at the specified time
}
func TestCluster_Add(t *testing.T) {
fmt.Println("66666")
ctx := context.Background()
fmt.Println("66666")
redis := redisInit()
defer redis.Close()
t.Log("6666")
cluster, _ := timerx.InitCluster(ctx, redis, "test")
taskId := "testTask"
dur := time.Second
callback := func(ctx context.Context, data interface{}) error {
// do something
fmt.Println("Task executed:", data)
return nil
}
extendData := "testData"
err := cluster.EverySpace(ctx, taskId, dur, callback, extendData)
if err != nil {
t.Errorf("Add failed,1 err: %v", err)
}
time.Sleep(time.Second * 20)
// TODO: verify the job is added to the cluster and can be executed after the specified duration
}
+190 -85
View File
@@ -3,10 +3,12 @@ package main
import (
"context"
"fmt"
"os"
"time"
"code.yun.ink/open/timer"
"github.com/go-redis/redis/v8"
"github.com/redis/go-redis/v9"
"github.com/yuninks/timerx"
"github.com/yuninks/timerx/priority"
)
func main() {
@@ -24,46 +26,175 @@ func main() {
// re()
// d()
worker()
cluster()
// once()
// prioritys()
select {}
}
func prioritys() {
client := getRedis()
ctx := context.Background()
pro, _ := priority.InitPriority(ctx, client, "test", 10)
for {
b := pro.IsLatest(ctx)
fmt.Println("isLatest", b)
time.Sleep(time.Millisecond * 100)
}
}
func once() {
client := getRedis()
ctx := context.Background()
w := OnceWorker{}
ver, err := priority.PriorityByVersion("v2.2.3.4.5")
if err != nil {
panic(err)
}
ops := []timerx.Option{
timerx.WithPriority(ver),
}
one, err := timerx.InitOnce(ctx, client, "test_once", w, ops...)
if err != nil {
panic(err)
}
d := OnceData{
Num: 3,
}
// dy, _ := json.Marshal(d)
err = one.Create("test", "test3", 1*time.Second, d)
if err != nil {
fmt.Println(err)
}
// d = OnceData{
// Num: 4,
// }
// dd := 123
// dy, _ = json.Marshal(d)
// err = one.Save("test", "test4", 2*time.Second, dd)
// if err != nil {
// fmt.Println(err)
// }
// err = one.Save("test", "test5", 5*time.Second, dd)
// if err != nil {
// fmt.Println(err)
// }
}
type OnceData struct {
Num int
}
type OnceWorker struct{}
func (l OnceWorker) Worker(ctx context.Context, taskType timerx.OnceTaskType, taskId string, attachData interface{}) *timerx.OnceWorkerResp {
// 追加写入文件
file, err := os.OpenFile("./test.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
panic(err)
}
defer file.Close()
file.WriteString(fmt.Sprintf("执行时间:%s\n", time.Now().Format("2006-01-02 15:04:05")))
fmt.Println("执行时间:", time.Now().Format("2006-01-02 15:04:05"))
// fmt.Println(taskType, taskId)
// fmt.Printf("原来的参数:%+v %T\n", attachData, attachData)
// v, ok := attachData.(int64)
// fmt.Println("vvvvvvv", v, ok)
// fmt.Printf()
// d := OnceData{}
// json.Unmarshal(ab, &d)
// d.Num++
// fmt.Println(d)
// dy, _ := json.Marshal(d)
return &timerx.OnceWorkerResp{
Retry: true,
AttachData: attachData,
DelayTime: time.Second,
}
}
func cluster() {
client := getRedis()
ctx := context.Background()
// log := loggerx.NewLogger(ctx,loggerx.SetToConsole(),loggerx.SetEscapeHTML(false))
// _ = log
cluster, _ := timerx.InitCluster(ctx, client, "test", timerx.WithPriority(103))
err := cluster.EverySpace(ctx, "test_space1", 1*time.Second, aa, "这是秒任务1")
fmt.Println(err)
err = cluster.EverySpace(ctx, "test_space2", 2*time.Second, aa, "这是秒任务2")
fmt.Println(err)
err = cluster.EverySpace(ctx, "test_space3", 5*time.Second, aa, "这是秒任务3")
fmt.Println(err)
err = cluster.EveryMinute(ctx, "test_min1", 15, aa, "这是分钟任务1")
fmt.Println(err)
err = cluster.EveryMinute(ctx, "test_min2", 30, aa, "这是分钟任务2")
fmt.Println(err)
err = cluster.EveryHour(ctx, "test_hour1", 30, 0, aa, "这是小时任务1")
fmt.Println(err)
err = cluster.EveryHour(ctx, "test_hour2", 30, 15, aa, "这是小时任务2")
fmt.Println(err)
err = cluster.EveryDay(ctx, "test_day1", 5, 0, 0, aa, "这是天任务1")
fmt.Println(err)
err = cluster.EveryDay(ctx, "test_day2", 9, 20, 0, aa, "这是天任务2")
fmt.Println(err)
err = cluster.EveryDay(ctx, "test_day3", 10, 30, 30, aa, "这是天任务3")
fmt.Println(err)
}
func worker() {
client := getRedis()
w := timer.InitWorker(context.Background(), client, &Worker{})
w.Add("test", "test", 1*time.Second, map[string]interface{}{
"test": "test",
})
w.Add("test2", "test", 1*time.Second, map[string]interface{}{
"test": "test",
})
w.Add("test3", "test", 1*time.Second, map[string]interface{}{
"test": "test",
})
w.Add("test4", "test", 1*time.Second, map[string]interface{}{
"test": "test",
})
w.Add("test5", "test", 1*time.Second, map[string]interface{}{
"test": "test",
})
// client := getRedis()
// w := timerx.InitOnce(context.Background(), client, "test", &OnceWorker{})
// w.Save("test", "test", 1*time.Second, map[string]interface{}{
// "test": "test",
// })
// w.Save("test2", "test", 1*time.Second, map[string]interface{}{
// "test": "test",
// })
// w.Save("test3", "test", 1*time.Second, map[string]interface{}{
// "test": "test",
// })
// w.Save("test4", "test", 1*time.Second, map[string]interface{}{
// "test": "test",
// })
// w.Save("test5", "test", 1*time.Second, map[string]interface{}{
// "test": "test",
// })
select {}
}
type Worker struct{}
func (w *Worker) Worker(uniqueKey string, jobType string,data map[string]interface{}) timer.WorkerCode {
fmt.Println("执行时间:", time.Now().Format("2006-01-02 15:04:05"))
fmt.Println(uniqueKey, jobType)
fmt.Println(data)
return timer.WorkerCodeAgain
}
func getRedis() *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1" + ":" + "6379",
Password: "", // no password set
DB: 0, // use default DB
Password: "123456", // no password set
DB: 0, // use default DB
})
if client == nil {
panic("redis init error")
@@ -76,63 +207,37 @@ func re() {
client := getRedis()
ctx := context.Background()
cl := timer.InitCluster(ctx, client)
cl.AddTimer(ctx, "test1", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text1",
},
})
cl.AddTimer(ctx, "test2", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text2",
},
})
cl.AddTimer(ctx, "test3", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text3",
},
})
cl.AddTimer(ctx, "test4", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text4",
},
})
cl.AddTimer(ctx, "test5", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text5",
},
})
cl.AddTimer(ctx, "test6", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text6",
},
})
cl.AddTimer(ctx, "test7", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text7",
},
})
cl.AddTimer(ctx, "test8", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text8",
},
})
cl.AddTimer(ctx, "test9", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text9",
},
})
cl, _ := timerx.InitCluster(ctx, client, "kkkk")
cl.EverySpace(ctx, "test1", 1*time.Millisecond, aa, "data")
cl.EverySpace(ctx, "test2", 1*time.Millisecond, aa, "data")
cl.EverySpace(ctx, "test3", 1*time.Millisecond, aa, "data")
cl.EverySpace(ctx, "test4", 1*time.Millisecond, aa, "data")
cl.EverySpace(ctx, "test5", 1*time.Millisecond, aa, "data")
cl.EverySpace(ctx, "test6", 1*time.Millisecond, aa, "data")
select {}
}
func aa(ctx context.Context) bool {
// fmt.Println(time.Now().Format(time.RFC3339))
// fmt.Println("gggggggggggggggggggggggggggg")
a, err := timer.GetExtendParams(ctx)
fmt.Printf("%+v %+v \n\n", a, err)
time.Sleep(time.Second * 5)
return true
func aa(ctx context.Context, data interface{}) error {
fmt.Println("-执行时间:", data, time.Now().Format("2006-01-02 15:04:05"))
// fmt.Println(data)
// time.Sleep(time.Second * 5)
// 追加到文件
file, err := os.OpenFile("./test.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
fmt.Println("打开文件失败:", err)
return err
}
defer file.Close()
_, err = file.WriteString(fmt.Sprintf("-执行时间:%v %s\n", data, time.Now().Format("2006-01-02 15:04:05")))
if err != nil {
fmt.Println("写入文件失败:", err)
return err
}
return nil
}
func d() {
@@ -147,7 +252,7 @@ func d() {
return
}
client.ZAdd(context.Background(), "lockx:test2", &redis.Z{
client.ZAdd(context.Background(), "lockx:test2", redis.Z{
Score: 50,
Member: "test",
})
+32
View File
@@ -0,0 +1,32 @@
package timerx
import "errors"
var (
// 定时器不存在
ErrTimerNotFound = errors.New("timer not found")
// 任务ID不能为空
ErrTaskIdEmpty = errors.New("taskId can not be empty")
// 每月的天数必须在0-31之间
ErrMonthDay = errors.New("month day must be between 0 and 31")
// 小时必须在0-23之间
ErrHour = errors.New("hour must be between 0 and 23")
// 分钟必须在0-59之间
ErrMinute = errors.New("minute must be between 0 and 59")
// 秒必须在0-59之间
ErrSecond = errors.New("second must be between 0 and 59")
// 回调函数不能为空
ErrCallbackEmpty = errors.New("callback can not be empty")
// 星期必须在0-6之间
ErrWeekday = errors.New("weekday must be between Sunday and Saturday")
// 创建时间不能为空
ErrCreateTime = errors.New("create time can not be empty")
// 基准时间不能为空
ErrBaseTime = errors.New("base time can not be empty")
// 间隔时间必须大于0
ErrIntervalTime = errors.New("interval time must be greater than 0")
// 任务Id已存在
ErrTaskIdExists = errors.New("taskId already exists")
// 任务已执行
ErrTaskExecuted = errors.New("task already executed")
)
-48
View File
@@ -1,48 +0,0 @@
package timer
import (
"fmt"
"testing"
"github.com/go-redis/redis/v8"
)
// 示例测试
// func exampleDemo(ctx context.Context) bool {
// fmt.Println("fff")
// return false
// }
// func ExampleB() {
// ctx := context.Background()
// timer.InitSingle(ctx)
// timer.AddToTimer(1, exampleDemo)
// // OutPut:
// }
func TestMain(m *testing.M) {
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1" + ":" + "6379",
Password: "", // no password set
DB: 0, // use default DB
})
if client == nil {
fmt.Println("redis init error")
return
}
// Redis = client
}
func TestRedis(t *testing.T) {
fmt.Println("6666")
t.Log("fffff")
// t.Fail()
// t.Error("ffff")
// Redis.Set(context.Background(), "dddd", "dddd", 0)
// str, err := Redis.Get(context.Background(), "dddd").Result()
// fmt.Println("ssss", str, err)
// t.Log(str, err)
// t.Fail()
}
+17 -6
View File
@@ -1,10 +1,21 @@
module code.yun.ink/open/timer
module github.com/yuninks/timerx
go 1.19
require github.com/go-redis/redis/v8 v8.11.5
go 1.24
require (
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/google/uuid v1.6.0
github.com/redis/go-redis/v9 v9.14.0
github.com/stretchr/testify v1.11.1
github.com/yuninks/cachex v1.0.5
github.com/yuninks/lockx v1.1.3
)
require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
+32 -13
View File
@@ -1,15 +1,34 @@
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/redis/go-redis/v9 v9.14.0 h1:u4tNCjXOyzfgeLN+vAZaW1xUooqWDqVEsZN0U01jfAE=
github.com/redis/go-redis/v9 v9.14.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/yuninks/cachex v1.0.5 h1:Y2NmTsuEgwEVYb7FVFh5tUN67kmrUioeksQqLbOAwsM=
github.com/yuninks/cachex v1.0.5/go.mod h1:5357qz18UvHTJSgZzkMamUzZoFzGeKG9+4tIUBXRSVM=
github.com/yuninks/lockx v1.1.3 h1:OA6rb4/XOj+M+1vKLs8fqsU4ZCadvga+oARTWTHpwx0=
github.com/yuninks/lockx v1.1.3/go.mod h1:+HyRozwQHMHrykyOFlotV4Z+z2yrgRSdDl8TxxRMFzw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+155
View File
@@ -0,0 +1,155 @@
package heartbeat
import (
"context"
"errors"
"strconv"
"sync"
"time"
"github.com/redis/go-redis/v9"
"github.com/yuninks/timerx/leader"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
// 心跳
// 作用:上报实例存活状态
// 依赖:leader priority
type HeartBeat struct {
ctx context.Context
cancel context.CancelFunc
redis redis.UniversalClient // redis
logger logger.Logger
priority *priority.Priority // (允许nil)全局优先级
leader *leader.Leader // (允许nil)领导
heartbeatKey string // 心跳Key 有序集合
instanceId string // 实例ID
wg sync.WaitGroup
}
func InitHeartBeat(ctx context.Context, ref redis.UniversalClient, keyPrefix string, opts ...Option) (*HeartBeat, error) {
if ref == nil {
return nil, errors.New("redis is nil")
}
op := newOptions(opts...)
ctx, cancel := context.WithCancel(ctx)
l := &HeartBeat{
ctx: ctx,
cancel: cancel,
heartbeatKey: "timer:heartbeat_key" + op.source + keyPrefix,
priority: op.priority,
redis: ref,
logger: op.logger,
leader: op.leader,
instanceId: op.instanceId,
}
l.logger.Infof(l.ctx, "InitHeartBeat InstanceId %s lockKey:%s", l.instanceId, l.heartbeatKey)
l.startDaemon()
return l, nil
}
func (l *HeartBeat) Close() {
l.cleanHeartbeat(true)
l.cancel()
l.wg.Wait()
}
func (l *HeartBeat) startDaemon() {
l.wg.Add(1)
go l.heartbeatLoop()
l.wg.Add(1)
go l.cleanHeartbeatLoop()
}
// 心跳上报
// 需要确定当前存活的实例&当前实例是否是领导
func (l *HeartBeat) heartbeatLoop() {
defer l.wg.Done()
// 先执行一次
l.heartbeat()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
return
case <-ticker.C:
l.heartbeat()
}
}
}
// 单次心跳
func (l *HeartBeat) heartbeat() error {
err := l.redis.ZAdd(l.ctx, l.heartbeatKey, redis.Z{
Score: float64(time.Now().UnixMilli()),
Member: l.instanceId,
}).Err()
if err != nil {
l.logger.Errorf(l.ctx, "heartbeat redis.ZAdd err:%v", err)
return err
}
return nil
}
// 心跳清理(leader可操作)
func (l *HeartBeat) cleanHeartbeatLoop() {
defer l.wg.Done()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-l.ctx.Done():
return
case <-ticker.C:
if l.leader != nil {
if !l.leader.IsLeader() {
// l.logger.Infof(l.ctx, "cleanHeartbeatLoop not leader")
continue
}
}
l.cleanHeartbeat(false)
}
}
}
// 单次清理
func (l *HeartBeat) cleanHeartbeat(cleanSelf bool) error {
// 仅移除自己
if cleanSelf {
return l.redis.ZRem(l.ctx, l.heartbeatKey, l.instanceId).Err()
}
// 移除心跳
l.redis.ZRemRangeByScore(l.ctx, l.heartbeatKey, "0", strconv.FormatInt(time.Now().Add(-15*time.Second).UnixMilli(), 10)).Err()
return nil
}
+66
View File
@@ -0,0 +1,66 @@
package heartbeat
import (
"github.com/google/uuid"
"github.com/yuninks/timerx/leader"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
type Options struct {
logger logger.Logger // 日志
instanceId string // 实例ID
priority *priority.Priority // 全局优先级
leader *leader.Leader // Leader
source string // 来源服务
}
func defaultOptions() Options {
u, _ := uuid.NewV7()
return Options{
logger: logger.NewLogger(),
instanceId: u.String(),
}
}
type Option func(*Options)
func newOptions(opts ...Option) Options {
o := defaultOptions()
for _, opt := range opts {
opt(&o)
}
return o
}
func WithLogger(log logger.Logger) Option {
return func(o *Options) {
o.logger = log
}
}
func WithPriority(p *priority.Priority) Option {
return func(o *Options) {
o.priority = p
}
}
func WithLeader(l *leader.Leader) Option {
return func(o *Options) {
o.leader = l
}
}
func WithInstanceId(instanceId string) Option {
return func(o *Options) {
o.instanceId = instanceId
}
}
func WithSource(source string) Option {
return func(o *Options) {
o.source = source
}
}
+167
View File
@@ -0,0 +1,167 @@
package leader
// 竞选Leader
import (
"context"
"errors"
"sync"
"time"
"github.com/redis/go-redis/v9"
"github.com/yuninks/lockx"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
// 领导
// 作用:领导选举,是领导才执行,避免资源浪费
// 依赖:priority
type Leader struct {
ctx context.Context
cancel context.CancelFunc
isLeader bool // 是否是领导
leaderLock sync.RWMutex // 领导锁
leaderUniLockKey string // 领导唯一锁
leaderKey string // 上报当前的Leader
redis redis.UniversalClient // redis
logger logger.Logger
priority *priority.Priority // 全局优先级
wg sync.WaitGroup
instanceId string // 实例ID
}
// leader负责选举
func InitLeader(ctx context.Context, ref redis.UniversalClient, keyPrefix string, opts ...Option) (*Leader, error) {
if ref == nil {
return nil, errors.New("redis is nil")
}
op := newOptions(opts...)
ctx, cancel := context.WithCancel(ctx)
l := &Leader{
ctx: ctx,
cancel: cancel,
redis: ref,
leaderUniLockKey: "timer:leader_lockKey" + op.source + keyPrefix,
priority: op.priority,
instanceId: op.instanceId,
logger: op.logger,
}
l.wg.Add(1)
go l.leaderElection()
l.logger.Infof(l.ctx, "InitLeader InstanceId %s lockKey:%s", l.instanceId, l.leaderUniLockKey)
return l, nil
}
func (l *Leader) Close() {
l.cancel()
l.wg.Wait()
}
// 领导选举
// 领导作用:全局推选一个人计算执行时间&移入队列,避免每个都进行计算浪费资源
func (l *Leader) leaderElection() {
defer l.wg.Done()
// 先执行一次
l.getLeaderLock()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
l.getLeaderLock()
case <-l.ctx.Done():
return
}
}
}
// 成为领导
func (l *Leader) getLeaderLock() error {
// 非当前优先级不用抢leader
if l.priority != nil && !l.priority.IsLatest(l.ctx) {
return nil
}
ctx, cancel := context.WithCancel(l.ctx)
defer cancel()
// 尝试加锁
lock, err := lockx.NewGlobalLock(ctx, l.redis, l.leaderUniLockKey)
if err != nil {
l.logger.Errorf(l.ctx, "getLeaderLock err:%+v", err)
return err
}
if b, _ := lock.Lock(); !b {
// 加锁失败 非Reader
l.leaderLock.Lock()
l.isLeader = false
l.leaderLock.Unlock()
return nil
}
defer lock.Unlock()
// 加锁成功
l.leaderLock.Lock()
l.isLeader = true
l.leaderLock.Unlock()
// 上报当前的Leader实例
l.redis.Set(l.ctx, l.leaderKey, l.instanceId, time.Hour*24)
l.logger.Infof(l.ctx, "getLeaderLock Instance %s became leader", lock.GetValue())
go func() {
if l.priority == nil {
return
}
for {
select {
case <-ctx.Done():
return
default:
if !l.priority.IsLatest(l.ctx) {
cancel()
return
}
time.Sleep(100 * time.Millisecond)
}
}
}()
// 等待超时退出
<-lock.GetCtx().Done()
// 已过期
// l.leaderLock.Lock()
// l.isLeader = false
// l.leaderLock.Unlock()
return nil
}
// isCurrentLeader 检查当前实例是否是leader
func (c *Leader) IsLeader() bool {
c.leaderLock.RLock()
defer c.leaderLock.RUnlock()
return c.isLeader
}
+58
View File
@@ -0,0 +1,58 @@
package leader
import (
"github.com/google/uuid"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
type Options struct {
logger logger.Logger
instanceId string
priority *priority.Priority // 全局优先级
source string // 来源服务
}
func defaultOptions() Options {
u, _ := uuid.NewV7()
return Options{
logger: logger.NewLogger(),
instanceId: u.String(),
}
}
type Option func(*Options)
func newOptions(opts ...Option) Options {
o := defaultOptions()
for _, opt := range opts {
opt(&o)
}
return o
}
func WithLogger(log logger.Logger) Option {
return func(o *Options) {
o.logger = log
}
}
func WithPriority(p *priority.Priority) Option {
return func(o *Options) {
o.priority = p
}
}
func WithInstanceId(instanceId string) Option {
return func(o *Options) {
o.instanceId = instanceId
}
}
func WithSource(source string) Option {
return func(o *Options) {
o.source = source
}
}
-120
View File
@@ -1,120 +0,0 @@
package lockx
import (
"context"
"fmt"
"log"
"time"
"github.com/go-redis/redis/v8"
)
// 全局锁
type globalLock struct {
redis *redis.Client
ctx context.Context
cancel context.CancelFunc
uniqueKey string
value string
}
func NewGlobalLock(ctx context.Context, red *redis.Client, uniqueKey string) *globalLock {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
return &globalLock{
redis: red,
ctx: ctx,
cancel: cancel,
uniqueKey: uniqueKey,
value: fmt.Sprintf("%d", time.Now().UnixNano()),
}
}
// 获取锁
func (g *globalLock) Lock() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == false
then
return redis.call('set',KEYS[1],ARGV[1],'EX',ARGV[2])
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value, 5).Result()
if resp != "OK" {
_ = err
log.Println("globalLock Lock", resp, err, g.uniqueKey, g.value)
}
if resp == "OK" {
g.refresh()
return true
}
return false
}
// 尝试获取锁
func (g *globalLock) Try(limitTimes int) bool {
for i := 0; i < limitTimes; i++ {
if g.Lock() {
return true
}
time.Sleep(time.Millisecond * 100)
}
return false
}
// 删除锁
func (g *globalLock) Unlock() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == ARGV[1]
then
redis.call('del',KEYS[1])
return 'OK'
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value).Result()
if resp != "OK" {
log.Println("globalLock Unlock", resp, err, g.uniqueKey, g.value)
}
g.cancel()
return false
}
// 刷新锁
func (g *globalLock) refresh() {
go func() {
t := time.NewTicker(time.Second)
for {
select {
case <-t.C:
g.refreshExec()
case <-g.ctx.Done():
t.Stop()
return
}
}
}()
}
func (g *globalLock) refreshExec() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == ARGV[1]
then
redis.call('set',KEYS[1],ARGV[1],'EX',ARGV[2])
return 'OK'
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value, 5).Result()
if resp != "OK" {
log.Println("globalLock refresh", resp, err, g.uniqueKey, g.value)
}
return resp == "OK"
}
-52
View File
@@ -1,52 +0,0 @@
package lockx_test
import (
"context"
"fmt"
"testing"
"code.yun.ink/open/timer/lockx"
"github.com/go-redis/redis/v8"
)
var Redis *redis.Client
// func TestMain(m *testing.M) {
// client := redis.NewClient(&redis.Options{
// Addr: "127.0.0.1" + ":" + "6379",
// Password: "", // no password set
// DB: 0, // use default DB
// })
// if client == nil {
// fmt.Println("redis init error")
// return
// }
// // fmt.Println("ffff")
// Redis = client
// }
func TestLockx(t *testing.T) {
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1" + ":" + "6379",
Password: "", // no password set
DB: 0, // use default DB
})
if client == nil {
fmt.Println("redis init error")
return
}
fmt.Println("begin")
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
lock := lockx.NewGlobalLock(ctx, client, "lockx:test")
if !lock.Lock() {
fmt.Println("lock error")
}
defer lock.Unlock()
fmt.Println("ssss")
}
+30
View File
@@ -0,0 +1,30 @@
package logger
import (
"context"
"log"
)
type Logger interface {
Infof(ctx context.Context, format string, v ...any)
// Warnf(ctx context.Context, format string, v ...any)
Errorf(ctx context.Context, format string, v ...any)
}
type defaultLogger struct{}
func NewLogger() *defaultLogger {
return &defaultLogger{}
}
func (l *defaultLogger) Infof(ctx context.Context, format string, v ...any) {
log.Printf("[INFO] "+format, v...)
}
// func (l *defaultLogger) Warnf(ctx context.Context, format string, v ...any) {
// log.Printf("[WARN] "+format, v...)
// }
func (l *defaultLogger) Errorf(ctx context.Context, format string, v ...any) {
log.Printf("[ERROR] "+format, v...)
}
+251
View File
@@ -0,0 +1,251 @@
package timerx
import (
"errors"
"time"
)
// 计算该任务下次执行时间
// @param job *JobData 任务数据
// @param t time.Time 当前时间
// @return time.Time 下次执行时间
// @return error 错误信息
func GetNextTime(t time.Time, job JobData) (*time.Time, error) {
if err := validateJobData(job); err != nil {
return nil, err
}
var next *time.Time
var err error
switch job.JobType {
case JobTypeEveryMonth:
next, err = calculateNextMonthTime(t, job)
case JobTypeEveryWeek:
next, err = calculateNextWeekTime(t, job)
case JobTypeEveryDay:
next, err = calculateNextDayTime(t, job)
case JobTypeEveryHour:
next, err = calculateNextHourTime(t, job)
case JobTypeEveryMinute:
next, err = calculateNextMinuteTime(t, job)
case JobTypeInterval:
next, err = calculateNextInterval(t, job)
default:
return nil, errors.New("未知的任务类型: " + string(job.JobType))
}
if err != nil {
return nil, err
}
return next, nil
}
// 参数校验
func validateJobData(job JobData) error {
switch job.JobType {
case JobTypeEveryMonth:
if job.Day < 1 || job.Day > 31 {
return ErrMonthDay
}
case JobTypeEveryWeek:
if job.Weekday < time.Sunday || job.Weekday > time.Saturday {
return ErrWeekday
}
case JobTypeEveryDay:
if job.Hour < 0 || job.Hour > 23 {
return ErrHour
}
case JobTypeEveryHour:
if job.Minute < 0 || job.Minute > 59 {
return ErrMinute
}
case JobTypeEveryMinute:
if job.Second < 0 || job.Second > 59 {
return ErrSecond
}
case JobTypeInterval:
if job.IntervalTime <= 0 {
return ErrIntervalTime
}
if job.BaseTime.IsZero() {
return ErrBaseTime
}
}
if job.Hour < 0 || job.Hour > 23 {
return ErrHour
}
if job.Minute < 0 || job.Minute > 59 {
return ErrMinute
}
if job.Second < 0 || job.Second > 59 {
return ErrSecond
}
return nil
}
// 计算间隔任务下一次执行时间
// 固定基准时间,因为在不同的实例中需要对齐基准点
func calculateNextInterval(t time.Time, job JobData) (*time.Time, error) {
if job.BaseTime.IsZero() {
return nil, ErrBaseTime
}
if job.IntervalTime <= 0 {
return nil, ErrIntervalTime
}
// 计算从基准时间到当前时间经过了多少个间隔
elapsed := t.Sub(job.BaseTime)
intervals := elapsed / job.IntervalTime
// 计算下一个执行时间
next := job.BaseTime.Add((intervals + 1) * job.IntervalTime)
// 需要整的
next = next.Round(job.IntervalTime)
// 确保下次执行时间不早于当前时间
if next.Before(t) || next.Equal(t) {
next = next.Add(job.IntervalTime)
}
return &next, nil
}
func calculateNextMonthTime(t time.Time, job JobData) (*time.Time, error) {
// 尝试当月是否有这个天数
// time.Date(2025, 2, 30, 0, 0, 0, 0, t.Location()) => 2025-03-02 00:00:00 +0800 CST 当月不足往后补
currentMonthTime := time.Date(t.Year(), t.Month(), job.Day, job.Hour, job.Minute, job.Second, 0, t.Location())
// 如果日期无效(比如2月30号),则调整到该月最后一天
if currentMonthTime.Day() != job.Day {
// 获取该月的最后一天(0日就是上个月最后一天)
// time.Date(2025,2,0,0,0,0,0,time.Local) => 2025-01-31 00:00:00 +0800 CST
lastDay := time.Date(t.Year(), t.Month()+1, 0, 0, 0, 0, 0, t.Location()).Day()
if job.Day > lastDay {
currentMonthTime = time.Date(t.Year(), t.Month(), lastDay, job.Hour, job.Minute, job.Second, 0, t.Location())
}
}
if currentMonthTime.After(t) {
return &currentMonthTime, nil
}
// 计算下个月的同一天
nextMonth := t.Month() + 1
year := t.Year()
if nextMonth > 12 {
nextMonth = 1
year++
}
nextMonthTime := time.Date(year, nextMonth, job.Day, job.Hour, job.Minute, job.Second, 0, t.Location())
// 如果日期无效,调整到下个月的最后一天
if nextMonthTime.Day() != job.Day {
lastDay := time.Date(year, nextMonth+1, 0, 0, 0, 0, 0, t.Location()).Day()
if job.Day > lastDay {
nextMonthTime = time.Date(year, nextMonth, lastDay, job.Hour, job.Minute, job.Second, 0, t.Location())
}
}
return &nextMonthTime, nil
}
func calculateNextWeekTime(t time.Time, job JobData) (*time.Time, error) {
currentWeekday := t.Weekday()
targetWeekday := job.Weekday
// 计算距离目标星期几的天数
daysToAdd := int(targetWeekday - currentWeekday)
if daysToAdd < 0 {
daysToAdd += 7
}
// 本周的目标时间
thisWeekTime := time.Date(t.Year(), t.Month(), t.Day()+daysToAdd, job.Hour, job.Minute, job.Second, 0, t.Location())
if thisWeekTime.After(t) {
return &thisWeekTime, nil
}
// 下周的目标时间
nextWeekTime := time.Date(t.Year(), t.Month(), t.Day()+daysToAdd+7, job.Hour, job.Minute, job.Second, 0, t.Location())
return &nextWeekTime, nil
}
func calculateNextDayTime(t time.Time, job JobData) (*time.Time, error) {
// 今天的目标时间
todayTime := time.Date(t.Year(), t.Month(), t.Day(), job.Hour, job.Minute, job.Second, 0, t.Location())
if todayTime.After(t) {
return &todayTime, nil
}
// 明天的时间
nextDayTime := time.Date(t.Year(), t.Month(), t.Day()+1, job.Hour, job.Minute, job.Second, 0, t.Location())
return &nextDayTime, nil
}
func calculateNextHourTime(t time.Time, job JobData) (*time.Time, error) {
// 计算当前小时的目标时间
currentHourTime := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), job.Minute, job.Second, 0, t.Location())
if currentHourTime.After(t) {
return &currentHourTime, nil
}
// 下一个小时的时间
nextHourTime := time.Date(t.Year(), t.Month(), t.Day(), t.Hour()+1, job.Minute, job.Second, 0, t.Location())
return &nextHourTime, nil
}
func calculateNextMinuteTime(t time.Time, job JobData) (*time.Time, error) {
// 计算当前分钟的目标时间
currentMinuteTime := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), job.Second, 0, t.Location())
if currentMinuteTime.After(t) {
return &currentMinuteTime, nil
}
// 下一分钟的时间
nextMinuteTime := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute()+1, job.Second, 0, t.Location())
return &nextMinuteTime, nil
}
// 检查是否本周期可以运行
// 检查是否本周期可以运行(已弃用,使用新的时间比较逻辑)
// 保留此函数用于向后兼容,但建议使用新的时间计算逻辑
func canRun(t time.Time, job JobData) bool {
targetTime := time.Date(t.Year(), t.Month(), t.Day(), job.Hour, job.Minute, job.Second, 0, t.Location())
switch job.JobType {
case JobTypeEveryMonth:
// 对于月任务,需要比较日期
targetTime = time.Date(t.Year(), t.Month(), job.Day, job.Hour, job.Minute, job.Second, 0, t.Location())
return !targetTime.Before(t)
case JobTypeEveryWeek:
// 对于周任务,需要比较星期
currentWeekday := t.Weekday()
if currentWeekday < job.Weekday {
return true
}
if currentWeekday == job.Weekday {
return targetTime.After(t) || targetTime.Equal(t)
}
return false
case JobTypeEveryDay:
return targetTime.After(t) || targetTime.Equal(t)
case JobTypeEveryHour:
hourTarget := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), job.Minute, job.Second, 0, t.Location())
return hourTarget.After(t) || hourTarget.Equal(t)
case JobTypeEveryMinute:
minuteTarget := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), job.Second, 0, t.Location())
return minuteTarget.After(t) || minuteTarget.Equal(t)
default:
return false
}
}
+840
View File
@@ -0,0 +1,840 @@
package timerx
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGetNextTime(t *testing.T) {
tt := time.Date(2025, 10, 16, 10, 30, 5, 0, time.Local)
// Test cases
tests := []struct {
name string
job JobData
expectedTime time.Time
expectedError error
}{
{
name: "Test JobTypeEveryMonth",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 15,
Hour: 10,
Minute: 0,
Second: 0,
},
expectedTime: time.Date(tt.Year(), tt.Month()+1, 15, 10, 0, 0, 0, time.Local),
expectedError: nil,
},
{
name: "Test JobTypeEveryWeek",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Tuesday,
Hour: 10,
Minute: 0,
Second: 0,
},
expectedTime: time.Date(2025, 10, 21, 10, 0, 0, 0, time.Local), // Assuming current date is March 7, 2022
expectedError: nil,
},
{
name: "Test JobTypeEveryDay",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 10,
Minute: 0,
Second: 0,
},
expectedTime: time.Date(2025, 10, 17, 10, 0, 0, 0, time.Local), // Assuming current date is March 7, 2022
expectedError: nil,
},
{
name: "Test JobTypeEveryHour",
job: JobData{
JobType: JobTypeEveryHour,
Minute: 0,
Second: 0,
},
expectedTime: time.Date(2025, 10, 16, 11, 0, 0, 0, time.Local), // Assuming current date is March 7, 2022, 10:30 AM
expectedError: nil,
},
{
name: "Test JobTypeEveryMinute",
job: JobData{
JobType: JobTypeEveryMinute,
Second: 12,
},
expectedTime: time.Date(tt.Year(), tt.Month(), tt.Day(), tt.Hour(), tt.Minute(), 12, 0, time.Local), // Assuming current date is March 7, 2022, 10:30 AM
expectedError: nil,
},
{
name: "Test JobTypeIntervalHour",
job: JobData{
JobType: JobTypeInterval,
BaseTime: tt,
IntervalTime: 1 * time.Hour,
},
expectedTime: time.Date(2025, 10, 16, 12, 00, 0, 0, time.Local), // Assuming current date is March 7, 2022, 10:30 AM
expectedError: nil,
},
{
name: "Test JobTypeIntervalMinute",
job: JobData{
JobType: JobTypeInterval,
BaseTime: tt,
IntervalTime: 1 * time.Minute,
},
expectedTime: time.Date(2025, 10, 16, 10, 31, 0, 0, time.Local), // Assuming current date is March 7, 2022, 10:30 AM
expectedError: nil,
},
{
name: "Test JobTypeIntervalSecond",
job: JobData{
JobType: JobTypeInterval,
BaseTime: tt,
IntervalTime: 1 * time.Second,
},
expectedTime: tt.Add(1 * time.Second), // Assuming current date is March 7, 2022, 10:30 AM
expectedError: nil,
},
{
name: "Test unknown JobType",
job: JobData{
JobType: JobType("100"),
},
expectedTime: time.Time{},
expectedError: errors.New("未知的任务类型: 100"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// loc := time.FixedZone("CST", 8*3600)
nextTime, err := GetNextTime(tt, test.job)
if err != nil {
if test.expectedError == nil || err.Error() != test.expectedError.Error() {
t.Errorf("Expected error: %v, Got error: %v", test.expectedError, err)
}
} else {
if nextTime.IsZero() != (test.expectedTime == time.Time{}) || (!nextTime.Equal(test.expectedTime)) {
t.Errorf("Expected time: %v, Got time: %v", test.expectedTime, nextTime)
}
}
})
}
}
// 测试参数验证
func TestValidateJobData(t *testing.T) {
tests := []struct {
name string
job JobData
expected error
}{
{
name: "有效月任务",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 15,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: nil,
},
{
name: "无效月任务-日期太小",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 0,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrMonthDay,
},
{
name: "无效月任务-日期太大",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 32,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrMonthDay,
},
{
name: "有效周任务",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Monday,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: nil,
},
{
name: "无效周任务-星期超出范围",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Weekday(7), // 超出范围
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrWeekday,
},
{
name: "有效间隔任务",
job: JobData{
JobType: JobTypeInterval,
BaseTime: time.Now(),
IntervalTime: time.Minute,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: nil,
},
{
name: "无效间隔任务-间隔时间为0",
job: JobData{
JobType: JobTypeInterval,
BaseTime: time.Now(),
IntervalTime: 0,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrIntervalTime,
},
{
name: "无效间隔任务-创建时间为空",
job: JobData{
JobType: JobTypeInterval,
BaseTime: time.Time{},
IntervalTime: time.Minute,
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrBaseTime,
},
{
name: "无效小时",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 24, // 无效小时
Minute: 30,
Second: 0,
},
expected: ErrHour,
},
{
name: "无效分钟",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 12,
Minute: 60, // 无效分钟
Second: 0,
},
expected: ErrMinute,
},
{
name: "无效秒数",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 12,
Minute: 30,
Second: 60, // 无效秒数
},
expected: ErrSecond,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := validateJobData(tt.job)
assert.Equal(t, tt.expected, err)
})
}
}
// 测试间隔任务
func TestCalculateNextInterval(t *testing.T) {
now := time.Date(2023, 6, 15, 12, 0, 0, 0, time.UTC)
createTime := time.Date(2023, 6, 15, 10, 0, 0, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "间隔1小时-当前时间在创建时间之后",
job: JobData{
JobType: JobTypeInterval,
BaseTime: createTime,
IntervalTime: time.Hour,
},
currentTime: now,
expected: time.Date(2023, 6, 15, 13, 0, 0, 0, time.UTC),
},
{
name: "间隔30分钟-刚好在间隔点上",
job: JobData{
JobType: JobTypeInterval,
BaseTime: createTime,
IntervalTime: 30 * time.Minute,
},
currentTime: time.Date(2023, 6, 15, 12, 30, 0, 0, time.UTC),
expected: time.Date(2023, 6, 15, 13, 0, 0, 0, time.UTC),
},
{
name: "间隔1天-跨天",
job: JobData{
JobType: JobTypeInterval,
BaseTime: createTime,
IntervalTime: 24 * time.Hour,
},
currentTime: now,
expected: time.Date(2023, 6, 16, 0, 0, 0, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextInterval(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试月任务
func TestCalculateNextMonthTime(t *testing.T) {
baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "本月还能执行",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 20,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 20, 12, 30, 45, 0, time.UTC),
},
{
name: "本月已过,下个月执行",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 10,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 7, 10, 12, 30, 45, 0, time.UTC),
},
{
name: "2月30日调整到2月28日",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 30,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 2, 15, 12, 30, 45, 0, time.UTC),
expected: time.Date(2023, 2, 28, 12, 30, 45, 0, time.UTC),
},
{
name: "闰年2月29日",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 29,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2024, 2, 15, 12, 30, 45, 0, time.UTC), // 2024是闰年
expected: time.Date(2024, 2, 29, 12, 30, 45, 0, time.UTC),
},
{
name: "跨年",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 15,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 12, 20, 12, 30, 45, 0, time.UTC),
expected: time.Date(2024, 1, 15, 12, 30, 45, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextMonthTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
func TestCalculateNextMonthTimeOnce(t *testing.T) {
// baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "2月30日调整到2月28日",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 30,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 1, 31, 12, 30, 45, 0, time.UTC),
expected: time.Date(2023, 2, 28, 12, 30, 45, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextMonthTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试周任务
func TestCalculateNextWeekTime(t *testing.T) {
baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC) // 星期四
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "本周还能执行-周五",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Friday,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 16, 12, 30, 45, 0, time.UTC),
},
{
name: "本周已过,下周执行-周三",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Wednesday,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 21, 12, 30, 45, 0, time.UTC),
},
{
name: "同一天但时间已过",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Thursday,
Hour: 10, // 早于当前时间
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 22, 10, 30, 45, 0, time.UTC),
},
{
name: "跨月",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Monday,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 6, 30, 12, 30, 45, 0, time.UTC), // 周五
expected: time.Date(2023, 7, 3, 12, 30, 45, 0, time.UTC), // 下周一
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextWeekTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试日任务
func TestCalculateNextDayTime(t *testing.T) {
baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "今天还能执行",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 14,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 15, 14, 30, 45, 0, time.UTC),
},
{
name: "今天已过,明天执行",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 10,
Minute: 30,
Second: 45,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 16, 10, 30, 45, 0, time.UTC),
},
{
name: "跨月",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 10,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 6, 30, 12, 30, 45, 0, time.UTC),
expected: time.Date(2023, 7, 1, 10, 30, 45, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextDayTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试小时任务
func TestCalculateNextHourTime(t *testing.T) {
baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "本小时还能执行",
job: JobData{
JobType: JobTypeEveryHour,
Minute: 45,
Second: 0,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 15, 12, 45, 0, 0, time.UTC),
},
{
name: "本小时已过,下小时执行",
job: JobData{
JobType: JobTypeEveryHour,
Minute: 15,
Second: 0,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 15, 13, 15, 0, 0, time.UTC),
},
{
name: "跨天",
job: JobData{
JobType: JobTypeEveryHour,
Minute: 15,
Second: 0,
},
currentTime: time.Date(2023, 6, 15, 23, 30, 45, 0, time.UTC),
expected: time.Date(2023, 6, 16, 0, 15, 0, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextHourTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试分钟任务
func TestCalculateNextMinuteTime(t *testing.T) {
baseTime := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "本分钟还能执行",
job: JobData{
JobType: JobTypeEveryMinute,
Second: 50,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 15, 12, 30, 50, 0, time.UTC),
},
{
name: "本分钟已过,下分钟执行",
job: JobData{
JobType: JobTypeEveryMinute,
Second: 30,
},
currentTime: baseTime,
expected: time.Date(2023, 6, 15, 12, 31, 30, 0, time.UTC),
},
{
name: "跨小时",
job: JobData{
JobType: JobTypeEveryMinute,
Second: 30,
},
currentTime: time.Date(2023, 6, 15, 12, 59, 45, 0, time.UTC),
expected: time.Date(2023, 6, 15, 13, 0, 30, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := calculateNextMinuteTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试GetNextTime集成函数
func TestGetNextTime_Integration(t *testing.T) {
now := time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC)
tests := []struct {
name string
job JobData
expected time.Time
}{
{
name: "月任务集成测试",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 20,
Hour: 12,
Minute: 30,
Second: 45,
},
expected: time.Date(2023, 6, 20, 12, 30, 45, 0, time.UTC),
},
{
name: "周任务集成测试",
job: JobData{
JobType: JobTypeEveryWeek,
Weekday: time.Friday,
Hour: 12,
Minute: 30,
Second: 45,
},
expected: time.Date(2023, 6, 16, 12, 30, 45, 0, time.UTC),
},
{
name: "间隔任务集成测试",
job: JobData{
JobType: JobTypeInterval,
BaseTime: time.Date(2023, 6, 15, 10, 0, 0, 0, time.UTC),
IntervalTime: time.Hour,
},
expected: time.Date(2023, 6, 15, 13, 0, 0, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := GetNextTime(now, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试错误情况
func TestGetNextTime_ErrorCases(t *testing.T) {
now := time.Now()
tests := []struct {
name string
job JobData
expected error
}{
{
name: "未知任务类型",
job: JobData{
JobType: "99", // 无效类型
},
expected: errors.New("未知的任务类型: 99"),
},
{
name: "无效月任务日期",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 32, // 无效日期
Hour: 12,
Minute: 30,
Second: 0,
},
expected: ErrMonthDay,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := GetNextTime(now, tt.job)
assert.Nil(t, result)
assert.Equal(t, tt.expected, err)
})
}
}
// 测试边界条件
func TestGetNextTime_EdgeCases(t *testing.T) {
tests := []struct {
name string
job JobData
currentTime time.Time
expected time.Time
}{
{
name: "刚好在执行时间点上-应该到下一个周期",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 6, 15, 12, 30, 45, 0, time.UTC),
expected: time.Date(2023, 6, 16, 12, 30, 45, 0, time.UTC),
},
{
name: "刚好在执行时间点上-应该到下一个周期-秒",
job: JobData{
JobType: JobTypeEveryDay,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 6, 16, 12, 30, 44, 0, time.UTC),
expected: time.Date(2023, 6, 16, 12, 30, 45, 0, time.UTC),
},
{
name: "闰年2月29日",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 29,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2024, 2, 15, 12, 30, 45, 0, time.UTC), // 闰年
expected: time.Date(2024, 2, 29, 12, 30, 45, 0, time.UTC),
},
{
name: "非闰年2月29日调整到28日",
job: JobData{
JobType: JobTypeEveryMonth,
Day: 29,
Hour: 12,
Minute: 30,
Second: 45,
},
currentTime: time.Date(2023, 2, 15, 12, 30, 45, 0, time.UTC), // 非闰年
expected: time.Date(2023, 2, 28, 12, 30, 45, 0, time.UTC),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result, err := GetNextTime(tt.currentTime, tt.job)
assert.NoError(t, err)
assert.Equal(t, tt.expected, *result)
})
}
}
// 测试时区处理
func TestGetNextTime_Timezone(t *testing.T) {
// 测试不同时区
locations := []*time.Location{
time.UTC,
time.FixedZone("TEST+8", 8*60*60),
time.FixedZone("TEST-5", -5*60*60),
}
for _, loc := range locations {
t.Run(loc.String(), func(t *testing.T) {
currentTime := time.Date(2023, 6, 15, 12, 30, 45, 0, loc)
job := JobData{
JobType: JobTypeEveryDay,
Hour: 14,
Minute: 30,
Second: 45,
}
result, err := GetNextTime(currentTime, job)
assert.NoError(t, err)
expected := time.Date(2023, 6, 15, 14, 30, 45, 0, loc)
assert.Equal(t, expected, *result)
assert.Equal(t, loc, result.Location())
})
}
}
+530
View File
@@ -0,0 +1,530 @@
package timerx
import (
"context"
"encoding/json"
"errors"
"fmt"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"github.com/redis/go-redis/v9"
"github.com/google/uuid"
"github.com/yuninks/lockx"
"github.com/yuninks/timerx/heartbeat"
"github.com/yuninks/timerx/leader"
"github.com/yuninks/timerx/logger"
"github.com/yuninks/timerx/priority"
)
// 功能描述
// 1. 任务可以多节点发布
// 2. 每个任务的执行在全局仅会执行一次
// 3. 任务执行失败支持快捷重新加入队列
// 单次的任务队列
type Once struct {
ctx context.Context // ctx
cancel context.CancelFunc // cancel
logger logger.Logger // 日志
zsetKey string // 任务列表 有序集合
listKey string // 列表
globalLockPrefix string // 全局锁的前缀
redis redis.UniversalClient // Redis
worker Callback // 回调
keyPrefix string //
priority *priority.Priority // 全局优先级
usePriority bool //
batchSize int // 每批大小
wg sync.WaitGroup //
stopChan chan struct{} //
instanceId string // 实例ID
leader *leader.Leader // 领导
heartbeat *heartbeat.HeartBeat
executeInfoKey string // 执行情况的key 有序集合
keySeparator string // 分割符
timeout time.Duration // 任务执行超时时间
maxRetryCount int // 最大重试次数 0代表不限
}
type OnceWorkerResp struct {
Retry bool // 是否重试 true
DelayTime time.Duration // 等待时间
AttachData any // 附加数据
}
type OnceTaskType string
// 需要考虑执行失败重新放入队列的情况
type Callback interface {
// 任务执行
// @param jobType string 任务类型
// @param uniTaskId string 任务唯一标识
// @param data interface{} 任务数据
// @return WorkerCode 任务执行结果
// @return time.Duration 任务执行时间间隔
Worker(ctx context.Context, taskType OnceTaskType, taskId string, attachData any) *OnceWorkerResp
}
type extendData struct {
Delay time.Duration
Data any
RetryCount int // 重试次数
}
// 初始化
func InitOnce(ctx context.Context, re redis.UniversalClient, keyPrefix string, call Callback, opts ...Option) (*Once, error) {
op := newOptions(opts...)
if re == nil {
op.logger.Errorf(ctx, "redis client is nil")
return nil, errors.New("redis client is nil")
}
ctx, cancel := context.WithCancel(ctx)
u, _ := uuid.NewV7()
wo := &Once{
ctx: ctx,
cancel: cancel,
logger: op.logger,
zsetKey: "timer:once_zsetkey" + keyPrefix,
listKey: "timer:once_listkey" + keyPrefix,
executeInfoKey: "timer:once_executeInfoKey" + keyPrefix,
globalLockPrefix: "timer:once_globalLockPrefix" + keyPrefix,
usePriority: op.usePriority,
redis: re,
worker: call,
keyPrefix: keyPrefix,
batchSize: op.batchSize,
stopChan: make(chan struct{}),
instanceId: u.String(),
keySeparator: "[:]",
timeout: op.timeout,
maxRetryCount: op.maxRetryCount,
}
// 初始化优先级
if wo.usePriority {
pri, err := priority.InitPriority(
ctx,
re,
keyPrefix,
op.priorityVal,
priority.WithLogger(wo.logger),
)
if err != nil {
wo.logger.Errorf(ctx, "InitPriority err:%v", err)
return nil, err
}
wo.priority = pri
}
// 初始化leader
le, err := leader.InitLeader(
ctx,
re,
wo.keyPrefix,
leader.WithLogger(wo.logger),
leader.WithPriority(wo.priority),
leader.WithInstanceId(wo.instanceId),
)
if err != nil {
wo.logger.Infof(ctx, "InitLeader err:%v", err)
return nil, err
}
wo.leader = le
// 初始化心跳
heart, err := heartbeat.InitHeartBeat(
ctx,
re,
wo.keyPrefix,
heartbeat.WithInstanceId(wo.instanceId),
heartbeat.WithLeader(wo.leader),
heartbeat.WithLogger(wo.logger),
heartbeat.WithPriority(wo.priority),
heartbeat.WithSource("once"),
)
if err != nil {
wo.logger.Errorf(ctx, "InitHeartBeat err:%v", err)
return nil, err
}
wo.heartbeat = heart
wo.startDaemon()
return wo, nil
}
// Close 停止集群定时器
func (l *Once) Close() {
close(l.stopChan)
if l.usePriority && l.priority != nil {
l.priority.Close()
}
if l.leader != nil {
l.leader.Close()
}
if l.heartbeat != nil {
l.heartbeat.Close()
}
l.cancel()
l.wg.Wait()
}
func (l *Once) startDaemon() {
// 任务调度
l.wg.Add(1)
go l.scheduleTasks()
// 任务执行
l.wg.Add(1)
go l.executeTasks()
// 清理过期任务
l.wg.Add(1)
go l.cleanExecuteInfoLoop()
}
func (l *Once) cleanExecuteInfoLoop() {
l.wg.Done()
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for {
select {
case <-l.stopChan:
return
case <-l.ctx.Done():
return
case <-ticker.C:
if l.leader.IsLeader() {
l.cleanExecuteInfo()
}
}
}
}
// 清除过期任务
func (l *Once) cleanExecuteInfo() error {
// 移除执行信息
l.redis.ZRemRangeByScore(l.ctx, l.executeInfoKey, "0", strconv.FormatInt(time.Now().Add(-15*time.Minute).UnixMilli(), 10)).Err()
return nil
}
// 任务调度 领导
func (l *Once) scheduleTasks() {
defer l.wg.Done()
timer := time.NewTicker(time.Millisecond * 200)
defer timer.Stop()
for {
select {
case <-l.stopChan:
return
case <-l.ctx.Done():
return
case <-timer.C:
// 优先级
if l.usePriority {
if !l.priority.IsLatest(l.ctx) {
continue
}
}
// 领导
if !l.leader.IsLeader() {
continue
}
l.batchGetTasks()
}
}
}
// 任务执行
func (l *Once) executeTasks() {
defer l.wg.Done()
for {
if l.usePriority {
if !l.priority.IsLatest(l.ctx) {
time.Sleep(time.Second * 5)
continue
}
}
select {
case <-l.stopChan:
return
case <-l.ctx.Done():
return
default:
keys, err := l.redis.BLPop(l.ctx, time.Second*10, l.listKey).Result()
if err != nil {
if err != redis.Nil {
l.logger.Errorf(l.ctx, "Failed to pop task: %v", err)
// Redis 异常,休眠一会儿再重试
time.Sleep(time.Second * 5)
}
continue
}
go l.processTask(keys[1])
}
}
}
// 构建Redis key
func (l *Once) buildRedisKey(taskType OnceTaskType, taskId string) string {
return fmt.Sprintf("%s%s%s", taskType, l.keySeparator, taskId)
}
// 解析Redis Key
func (l *Once) parseRedisKey(key string) (OnceTaskType, string, error) {
parts := strings.Split(key, l.keySeparator)
if len(parts) < 2 {
return "", "", fmt.Errorf("invalid key format: %s", key)
}
return OnceTaskType(parts[0]), parts[1], nil
}
// 添加任务(覆盖)
// 重复插入就代表覆盖
// @param jobType string 任务类型
// @param uniTaskId string 任务唯一标识
// @param delayTime time.Duration 延迟时间
// @param attachData interface{} 附加数据
func (l *Once) Save(taskType OnceTaskType, taskId string, delayTime time.Duration, attachData interface{}) error {
return l.save(taskType, taskId, delayTime, attachData, 0)
}
// 添加任务(覆盖)
// 重复插入就代表覆盖
func (w *Once) save(taskType OnceTaskType, taskId string, delayTime time.Duration, attachData interface{}, retryCount int) error {
if delayTime <= 0 {
return fmt.Errorf("delay time must be positive")
}
redisKey := w.buildRedisKey(taskType, taskId)
executeTime := time.Now().Add(delayTime)
ed := extendData{
Delay: delayTime,
Data: attachData,
RetryCount: retryCount,
}
b, _ := json.Marshal(ed)
// 使用事务确保原子性
pipe := w.redis.TxPipeline()
dataExpire := delayTime + time.Minute*30
pipe.SetEx(w.ctx, w.keyPrefix+redisKey, b, dataExpire)
pipe.ZAdd(w.ctx, w.zsetKey, redis.Z{
Score: float64(executeTime.UnixMilli()),
Member: redisKey,
})
_, err := pipe.Exec(w.ctx)
if err != nil {
w.logger.Errorf(w.ctx, "save task failed:%v", err)
return err
}
return nil
}
// 添加任务(不覆盖)
func (l *Once) Create(taskType OnceTaskType, taskId string, delayTime time.Duration, attachData any) error {
return l.create(taskType, taskId, delayTime, attachData, 0)
}
func (l *Once) create(taskType OnceTaskType, taskId string, delayTime time.Duration, attachData any, retryCount int) error {
if delayTime <= 0 {
return fmt.Errorf("delay time must be positive")
}
redisKey := l.buildRedisKey(taskType, taskId)
score, err := l.redis.ZScore(l.ctx, l.zsetKey, redisKey).Result()
if err != nil {
if errors.Is(err, redis.Nil) {
return l.Save(taskType, taskId, delayTime, attachData)
}
l.logger.Errorf(l.ctx, "redis.ZScore err:%v", err)
return err
}
if score > 0 {
return fmt.Errorf("task already exists")
}
return l.save(taskType, taskId, delayTime, attachData, retryCount)
}
// 删除任务
func (w *Once) Delete(taskType OnceTaskType, taskId string) error {
redisKey := w.buildRedisKey(taskType, taskId)
pipe := w.redis.TxPipeline()
pipe.Del(w.ctx, redisKey)
pipe.ZRem(w.ctx, w.zsetKey, redisKey)
_, err := pipe.Exec(w.ctx)
if err != nil {
w.logger.Errorf(w.ctx, "delete task failed:%v", err)
return err
}
return nil
}
// 获取任务
func (l *Once) Get(taskType OnceTaskType, taskId string) {
//
}
// 批量获取任务
func (l *Once) batchGetTasks() {
script := `
local tasks = redis.call('zrangebyscore', KEYS[1], 0, ARGV[1], 'LIMIT', 0, ARGV[2])
if #tasks == 0 then return 0 end
for i, task in ipairs(tasks) do
redis.call('zrem', KEYS[1], task)
redis.call('lpush', KEYS[2], task)
end
return #tasks
`
result, err := l.redis.Eval(
l.ctx,
script,
[]string{l.zsetKey, l.listKey},
time.Now().UnixMilli(),
l.batchSize,
).Result()
if err != nil && err != redis.Nil {
l.logger.Errorf(l.ctx, "batch get tasks failed: %s", err.Error())
return
}
if count, ok := result.(int64); ok && count > 0 {
l.logger.Infof(l.ctx, "moved %d tasks to ready queue", count)
}
}
// 执行任务
func (l *Once) processTask(key string) {
begin := time.Now()
ctx, cancel := context.WithTimeout(l.ctx, l.timeout)
defer cancel()
u, _ := uuid.NewV7()
ctx = context.WithValue(ctx, "trace_id", u.String())
l.logger.Infof(ctx, "processTask start key:%s", key)
taskType, taskId, err := l.parseRedisKey(key)
if err != nil {
l.logger.Errorf(ctx, "processTask parseRedisKey:%v key:%s", err, key)
return
}
// 这里加一个全局锁
lock, err := lockx.NewGlobalLock(ctx, l.redis, l.globalLockPrefix+key)
if err != nil {
l.logger.Errorf(ctx, "processTask timer:获取锁失败:%s", taskId)
return
}
if b, err := lock.Lock(); !b {
l.logger.Errorf(ctx, "processTask timer:获取锁失败:%s %+v", taskId, err)
return
}
defer lock.Unlock()
// 上报执行情况
executeVal := fmt.Sprintf("tid:%s|insId:%s|uuid:%s|time:%s", key, l.instanceId, u.String(), begin.Format(time.RFC3339Nano))
l.redis.ZAdd(ctx, l.executeInfoKey, redis.Z{
Score: float64(begin.UnixMilli()),
Member: executeVal,
})
defer func() {
if err := recover(); err != nil {
l.logger.Errorf(ctx, "processTask panic:%s stack:%s", err, string(debug.Stack()))
}
}()
// 读取数据
redisKey := l.keyPrefix + l.buildRedisKey(taskType, taskId)
str, err := l.redis.Get(ctx, redisKey).Result()
if err != nil {
l.logger.Errorf(ctx, "processTask redis.Get key:%s err:%s", key, err)
return
}
ed := extendData{}
json.Unmarshal([]byte(str), &ed)
resp := l.worker.Worker(ctx, taskType, taskId, ed.Data)
l.logger.Infof(ctx, "processTask exec key:%s resp:%+v data:%s", key, resp, str)
if resp == nil || !resp.Retry {
// 完成 删除任务
// 删除任务
l.logger.Infof(ctx, "processTask delete key:%s", key)
if err := l.Delete(taskType, taskId); err != nil {
l.logger.Errorf(ctx, "processTask delete errprocessTask delete err:%v", err)
}
return
}
// 重新放入队列
if err := l.handleRetry(ctx, taskType, taskId, &ed, resp); err != nil {
l.logger.Errorf(ctx, "processTask handleRetry err:%v", err)
}
}
func (l *Once) handleRetry(ctx context.Context, taskType OnceTaskType, taskId string,
ed *extendData, resp *OnceWorkerResp) error {
// 限制重试次数
ed.RetryCount++
if l.maxRetryCount > 0 && ed.RetryCount > l.maxRetryCount {
l.logger.Infof(ctx, "handleRetry task exceeded retry limit: %s %s %d", taskType, taskId, l.maxRetryCount)
return nil
}
// 更新延迟时间
if resp.DelayTime > 0 {
ed.Delay = resp.DelayTime
}
if resp.AttachData != nil {
ed.Data = resp.AttachData
}
l.logger.Infof(ctx, "handleRetry retrying task: %s:%s, retry count: %d",
taskType, taskId, ed.RetryCount)
// 不覆盖的新建
return l.create(taskType, taskId, ed.Delay, ed.Data, ed.RetryCount)
}
+7
View File
@@ -0,0 +1,7 @@
package timerx
import "testing"
func Test2(t *testing.T) {
}
+86
View File
@@ -0,0 +1,86 @@
package timerx
import (
"time"
"github.com/yuninks/timerx/logger"
)
type Options struct {
logger logger.Logger
location *time.Location
timeout time.Duration // 任务最长执行时间
usePriority bool
priorityVal int64
batchSize int
maxRetryCount int
}
func defaultOptions() Options {
return Options{
logger: logger.NewLogger(),
location: time.Local,
timeout: time.Hour, //
usePriority: false,
priorityVal: 0,
batchSize: 100,
maxRetryCount: 0,
}
}
type Option func(*Options)
func newOptions(opts ...Option) Options {
o := defaultOptions()
for _, opt := range opts {
opt(&o)
}
return o
}
// 设置日志
func WithLogger(log logger.Logger) Option {
return func(o *Options) {
o.logger = log
}
}
// 设定时区
func WithLocation(zone *time.Location) Option {
return func(o *Options) {
o.location = zone
}
}
// 设置任务最长执行时间
func WithTimeout(d time.Duration) Option {
return func(o *Options) {
o.timeout = d
}
}
// 设置优先级
func WithPriority(priority int64) Option {
return func(o *Options) {
o.usePriority = true
o.priorityVal = priority
}
}
func WithBatchSize(size int) Option {
return func(o *Options) {
if size <= 1 {
size = 1
}
o.batchSize = size
}
}
func WithMaxRetryCount(count int) Option {
return func(o *Options) {
if count < 0 {
count = 0
}
o.maxRetryCount = count
}
}
+70
View File
@@ -0,0 +1,70 @@
package priority
import (
"time"
"github.com/google/uuid"
"github.com/yuninks/timerx/logger"
)
type Options struct {
getInterval time.Duration // 查询周期
updateInterval time.Duration // 更新间隔
expireTime time.Duration // 有效时间
logger logger.Logger // 日志
source string // 来源服务
instanceId string // 实例ID
}
func defaultOptions() Options {
u, _ := uuid.NewV7()
return Options{
getInterval: time.Second * 2,
updateInterval: time.Second * 4,
expireTime: time.Second * 8,
logger: logger.NewLogger(),
instanceId: u.String(),
}
}
type Option func(*Options)
func newOptions(opts ...Option) Options {
o := defaultOptions()
for _, opt := range opts {
opt(&o)
}
return o
}
func WithLogger(log logger.Logger) Option {
return func(o *Options) {
o.logger = log
}
}
// 更新周期
func WithUpdateInterval(d time.Duration) Option {
if d.Abs() < time.Second {
d = time.Second * 5
}
return func(o *Options) {
o.updateInterval = d
o.expireTime = d*2 + time.Second
o.getInterval = d / 3
}
}
func WithInstanceId(instanceId string) Option {
return func(o *Options) {
o.instanceId = instanceId
}
}
func WithSource(s string) Option {
return func(o *Options) {
o.source = s
}
}
+256
View File
@@ -0,0 +1,256 @@
package priority
import (
"context"
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/redis/go-redis/v9"
"github.com/yuninks/timerx/logger"
)
// 多版本场景判断当前是否最新版本
type Priority struct {
ctx context.Context // 上下文
cancel context.CancelFunc // 取消函数
priority int64 // 优先级
redis redis.UniversalClient // redis
redisKey string // redis key
logger logger.Logger // 日志
expireTime time.Duration // 过期时间
setInterval time.Duration // 尝试set的间隔
getInterval time.Duration // 尝试get的间隔
wg sync.WaitGroup
isLatest bool // 是否是最新版本
latestMux sync.RWMutex // 最新版本锁
instanceId string // 实例ID
}
func InitPriority(ctx context.Context, re redis.UniversalClient, keyPrefix string, priority int64, opts ...Option) (*Priority, error) {
if re == nil {
return nil, errors.New("redis is nil")
}
conf := newOptions(opts...)
ctx, cancel := context.WithCancel(ctx)
pro := &Priority{
ctx: ctx,
cancel: cancel,
priority: priority,
redis: re,
logger: conf.logger,
redisKey: "timer:priority_" + conf.source + keyPrefix,
expireTime: conf.expireTime,
setInterval: conf.updateInterval,
getInterval: conf.getInterval,
instanceId: conf.instanceId,
}
pro.startDaemon()
return pro, nil
}
func (p *Priority) Close() {
if p.cancel != nil {
p.cancel()
}
p.wg.Wait()
}
// 守护进程
func (l *Priority) startDaemon() {
// 启动更新缓存
l.wg.Add(1)
go l.runUpdateLoop()
l.wg.Add(1)
go l.getLatestLoop()
}
func (p *Priority) runUpdateLoop() {
defer p.wg.Done()
// 立即尝试设置一次优先级
if _, err := p.setPriority(); err != nil {
p.logger.Errorf(p.ctx, "Initial priority set failed: %v", err)
}
ticker := time.NewTicker(p.setInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if _, err := p.setPriority(); err != nil {
p.logger.Errorf(p.ctx, "Priority update failed: %v", err)
}
case <-p.ctx.Done():
return
}
}
}
func (l *Priority) getLatestLoop() {
defer l.wg.Done()
if err := l.getLatest(); err != nil {
l.logger.Errorf(l.ctx, "Priority update failed: %v", err)
}
ticker := time.NewTicker(l.getInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := l.getLatest(); err != nil {
l.logger.Errorf(l.ctx, "Priority update failed: %v", err)
}
case <-l.ctx.Done():
return
}
}
}
func (p *Priority) IsLatest(ctx context.Context) bool {
p.latestMux.RLock()
defer p.latestMux.RUnlock()
return p.isLatest
}
func (p *Priority) setPriority() (string, error) {
script := `
-- KEYS[1] 是全局优先级的key
local priorityKey = KEYS[1]
-- ARGV[1] 是新的优先级
local priority = ARGV[1]
-- ARGV[2] 是过期时间
local expireTime = ARGV[2]
-- 校验参数完整性
if not priorityKey or not priority or not expireTime then
return redis.error_reply("Missing required arguments")
end
-- 尝试将字符串转换为数字
local currentPriority = redis.call('get', priorityKey)
local currentPriorityNum = tonumber(currentPriority)
local newPriorityNum = tonumber(priority)
if not currentPriority then
-- 如果当前优先级不存在则设置新优先级并设置TTL
redis.call('set', priorityKey, priority, 'ex', expireTime)
return { "SET" }
elseif currentPriorityNum < newPriorityNum then
-- 如果当前优先级小于新优先级则更新优先级并更新TTL
redis.call('set', priorityKey, priority, 'ex', expireTime)
return { "RESET" }
elseif currentPriorityNum == newPriorityNum then
-- 优先级相同则更新TTL
redis.call('expire', priorityKey, expireTime)
return { "UPDATE" }
else
-- 如果当前优先级大于新优先级则不更新
return { "NOAUCH" }
end
`
newPriorityStr := strconv.FormatInt(p.priority, 10)
result, err := p.redis.Eval(p.ctx, script, []string{p.redisKey}, newPriorityStr, p.expireTime.Seconds()).Result()
// p.logger.Infof(p.ctx, "Priority update result:%+v err:%+v", result, err)
if err != nil {
p.logger.Errorf(p.ctx, "Priority update err:%s", err.Error())
return "", err
}
// 解析结果
if resultMap, ok := result.([]interface{}); ok && len(resultMap) == 1 {
resultStr := resultMap[0].(string)
return resultStr, nil
}
return "", fmt.Errorf("script error: %v", result)
}
func (l *Priority) getLatest() error {
// 查询Redis获取当前最高优先级
currentPriority, err := l.getCurrentPriority()
l.logger.Infof(l.ctx, "Priority getLatest currentPriority:%d l.priority:%d err:%+v", currentPriority, l.priority, err)
if err != nil {
l.logger.Errorf(l.ctx, "Priority getLatest getCurrentPriority err:%s", err.Error())
return err
}
if currentPriority > l.priority {
// 当前不是最新的
l.latestMux.Lock()
l.isLatest = false
l.latestMux.Unlock()
return nil
}
l.latestMux.Lock()
l.isLatest = true
l.latestMux.Unlock()
return nil
}
func (p *Priority) getCurrentPriority() (int64, error) {
result, err := p.redis.Get(p.ctx, p.redisKey).Result()
if err != nil {
if err == redis.Nil {
// Key不存在,返回0作为默认值
return 0, nil
}
return 0, err
}
priority, err := strconv.ParseInt(result, 10, 64)
if err != nil {
return 0, err
}
return priority, nil
}
// ForceRefresh 强制刷新优先级,用于紧急情况
func (p *Priority) ForceRefresh() error {
_, err := p.setPriority()
if err != nil {
p.logger.Errorf(p.ctx, "Priority ForceRefresh setPriority err:%s", err.Error())
return err
}
err = p.getLatest()
if err != nil {
p.logger.Errorf(p.ctx, "Priority ForceRefresh getLatest err:%s", err.Error())
return err
}
return nil
}
// GetCurrentMaxPriority 获取当前系统中的最大优先级
func (p *Priority) GetCurrentMaxPriority(ctx context.Context) (int64, error) {
return p.getCurrentPriority()
}
+208
View File
@@ -0,0 +1,208 @@
package priority
import (
"context"
"fmt"
"sync"
"testing"
"time"
"github.com/redis/go-redis/v9"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
func getRedis() *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1" + ":" + "6379",
Password: "123456", // no password set
DB: 0, // use default DB
})
if client == nil {
panic("redis init error")
}
return client
}
func TestPriority(t *testing.T) {
re := getRedis()
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
fmt.Println("ff")
go func() {
time.Sleep(time.Second * 5)
ctx, cancel := context.WithCancel(ctx)
pro, _ := InitPriority(ctx, re, "test", 10, WithUpdateInterval(time.Second*1))
for i := 0; i < 10; i++ {
bb := pro.IsLatest(ctx)
fmt.Println("cc:", bb)
time.Sleep(time.Second)
}
cancel()
}()
pro, _ := InitPriority(ctx, re, "test", 0, WithUpdateInterval(time.Second*1))
for i := 0; i < 25; i++ {
bb := pro.IsLatest(ctx)
fmt.Println("bb:", bb)
time.Sleep(time.Second)
}
}
// MockRedisClient 模拟Redis客户端
type MockRedisClient struct {
redis.UniversalClient
mock.Mock
}
func (m *MockRedisClient) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *redis.Cmd {
arguments := m.Called(ctx, script, keys, args)
return arguments.Get(0).(*redis.Cmd)
}
func (m *MockRedisClient) Get(ctx context.Context, key string) *redis.StringCmd {
arguments := m.Called(ctx, key)
return arguments.Get(0).(*redis.StringCmd)
}
func (m *MockRedisClient) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd {
arguments := m.Called(ctx, key, value, expiration)
return arguments.Get(0).(*redis.StatusCmd)
}
func TestInitPriority(t *testing.T) {
ctx := context.Background()
// 测试正常初始化
priority, _ := InitPriority(ctx, getRedis(), "test", 100)
assert.NotNil(t, priority)
assert.Equal(t, int64(100), priority.priority)
}
func TestSetPriorityScenarios(t *testing.T) {
testCases := []struct {
name string
currentRedis interface{}
newPriority int64
expectedStatus string
expectedValue int64
}{
{
name: "首次设置优先级",
currentRedis: nil, // Redis中不存在key
newPriority: 100,
expectedStatus: "SET",
expectedValue: 100,
},
{
name: "更新更高优先级",
currentRedis: "50", // Redis中存在较低优先级
newPriority: 100,
expectedStatus: "UPDATED",
expectedValue: 100,
},
{
name: "保持相同优先级",
currentRedis: "100", // Redis中存在相同优先级
newPriority: 100,
expectedStatus: "EXTENDED",
expectedValue: 100,
},
{
name: "忽略较低优先级",
currentRedis: "150", // Redis中存在更高优先级
newPriority: 100,
expectedStatus: "IGNORED",
expectedValue: 150,
},
}
ctx := context.Background()
redisConn := getRedis()
// 删除Key
redisConn.Del(ctx, "timer:priority_test22")
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
priority, _ := InitPriority(ctx, redisConn, "test22", tc.newPriority)
defer priority.Close()
time.Sleep(time.Second * 1)
_, err := priority.setPriority()
assert.NoError(t, err)
// assert.Equal(t, tc.expectedStatus, sta)
})
}
}
// 并发安全测试
func TestConcurrentAccess(t *testing.T) {
ctx := context.Background()
priority, _ := InitPriority(ctx, getRedis(), "testacc", 100)
time.Sleep(time.Second * 1)
// 并发读取IsLatest
var wg sync.WaitGroup
results := make(chan bool, 100)
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
defer wg.Done()
results <- priority.IsLatest(ctx)
}()
}
wg.Wait()
close(results)
// 所有结果应该相同
firstResult := <-results
for result := range results {
t.Log(result)
assert.Equal(t, firstResult, result)
}
}
// 错误处理测试
func TestErrorScenarios(t *testing.T) {
t.Run("Redis连接失败", func(t *testing.T) {
ctx := context.Background()
priority, _ := InitPriority(ctx, getRedis(), "test", 100)
_, err := priority.setPriority()
assert.Error(t, err)
})
t.Run("Redis返回值解析错误", func(t *testing.T) {
ctx := context.Background()
priority := &Priority{
redis: getRedis(),
redisKey: "timer:priority_test",
priority: 100,
ctx: ctx,
}
_, err := priority.getCurrentPriority()
assert.Error(t, err)
})
}
+55
View File
@@ -0,0 +1,55 @@
package priority
import (
"errors"
"math"
"strconv"
"strings"
)
var (
ErrVersionFormat = errors.New("version format error")
)
// 版本号转策略等级
func PriorityByVersion(version string) (priority int64, err error) {
// 版本不能为空
if version == "" {
return 0, ErrVersionFormat
}
// 除掉版本号中的v或V
if version[0] == 'v' || version[0] == 'V' {
version = version[1:]
}
// 用点号切割
vs := strings.Split(version, ".")
// 最多只支持5位
if len(vs) > 5 {
return 0, ErrVersionFormat
}
// base 16位
var baseNum float64 = 0
// 每一位转成数字&每一位不能大于999
for key, val := range vs {
if val == "" {
return 0, ErrVersionFormat
}
i, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return 0, ErrVersionFormat
}
if i < 0 || i > 999 {
return 0, ErrVersionFormat
}
p := (4 - key) * 3
num := math.Pow10(p) * float64(i)
baseNum += num
}
return int64(baseNum), nil
}
+109
View File
@@ -0,0 +1,109 @@
package priority_test
import (
"testing"
"github.com/yuninks/timerx/priority"
)
func TestVersionToPriority(t *testing.T) {
tests := []struct {
name string
version string
want int64
wantErr bool
}{
{
name: "standard version",
version: "1.2.3",
want: 1002003000000,
wantErr: false,
},
{
name: "version with v prefix",
version: "v1.2.3",
want: 1002003000000,
wantErr: false,
},
{
name: "version with V prefix",
version: "V1.2.3",
want: 1002003000000,
wantErr: false,
},
{
name: "single digit version",
version: "5",
want: 5000000000000,
wantErr: false,
},
{
name: "max digits version",
version: "999.999.999.999.999",
want: 999999999999999,
wantErr: false,
},
{
name: "empty version",
version: "",
want: 0,
wantErr: true,
},
{
name: "invalid character",
version: "1.a.3",
want: 0,
wantErr: true,
},
{
name: "zero version part",
version: "1.0.3",
want: 1000003000000,
wantErr: false,
},
{
name: "zero version part 2",
version: "1.0.3.",
want: 0,
wantErr: true,
},
{
name: "negative version part",
version: "1.-2.3",
want: 0,
wantErr: true,
},
{
name: "version part too large",
version: "1.1000.3",
want: 0,
wantErr: true,
},
{
name: "too many parts",
version: "1.2.3.4.5.6",
want: 0,
wantErr: true,
},
{
name: "empty part",
version: "1..3",
want: 0,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := priority.PriorityByVersion(tt.version)
if (err != nil) != tt.wantErr {
t.Errorf("VersionToPriority() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("VersionToPriority() = %v, want %v", got, tt.want)
}
})
}
}
+31 -9
View File
@@ -1,14 +1,36 @@
开发目标
# 功能支持
1. 支持本地任务
2. 支持集群任务
3. 支持单次任务
# 功能说明
# 功能实现
1. 集群间任务调度和任务的唯一依赖于redis进行实现
# 缺陷
1. 针对月的任务,需要注意日期有效性,且在月末的最后一天,需要考虑月末的最后一天的下一个任务执行时间
1. 集群部署时,存在新旧的代码混合问题,任务调度可能存在问题(需要根据实际需要进行版本上线/下线操作)
## 方案一
1. 启动的时候定时向redis注册任务项
2. 每次计算执行时间的时候根据注册的任务项进行任务计算
3. 注册任务项需要有下线机制,避免能运行它的节点下线了它还被执行
现在有根据要求根据系统时间整点运行任务的要求,这个比简单的定时重复更复杂,因为不但要按时执行,并且不能重复执行,需要全局记录任务执行的状态,由于任务的间隔时间不确定,这个任务执行状态的保存周期也是有变化的
# 待实现
- [ ] 允许执行完重置任务倒计时
1. 支持单机定时
2. 支持集群定时
3. 支持间隔定时
4. 支持固定时间
5. 支持全局唯一
设计思想
1. 不再单独区分单机还是集群,统一按集群处理,单机只是集群里面只有一个节点
2. 计算和执行分离,计算只负责计算,执行只负责执行,计算和执行之间通过消息队列进行通信
+417 -158
View File
@@ -1,214 +1,473 @@
package timer
package timerx
// 作者:黄新云
import (
"context"
"errors"
"fmt"
"log"
"runtime/debug"
"sync"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/yuninks/timerx/logger"
)
// 定时器
// 原理:每毫秒的时间触发
// 简单定时器
// 1. 这个定时器的作用范围是本机
// 2. 适用简单的时间间隔定时任务
// uuid -> timerStr
var timerMap = make(map[string]*timerStr)
var timerMapMux sync.Mutex
var timerCount int // 当前定时数目
var onceLimit sync.Once // 实现单例
var nextTime = time.Now() // 下一次执行的时间
type ContextValueKey string // 定义context 传递的Key类型
const (
extendParamKey ContextValueKey = "extend_param"
)
type single struct{}
var sin *single = nil
type Single struct {
ctx context.Context
cancel context.CancelFunc
logger logger.Logger
location *time.Location
nextTime time.Time
nextTimeMux sync.RWMutex
wg sync.WaitGroup
workerList sync.Map
timerIndex int64
stopChan chan struct{}
hasRun sync.Map
timeout time.Duration
}
// 定时器类
func InitSingle(ctx context.Context) *single {
onceLimit.Do(func() {
sin = &single{}
// @param ctx context.Context 上下文
// @param opts ...Option 配置项
func InitSingle(ctx context.Context, opts ...Option) *Single {
op := newOptions(opts...)
ctx, cancel := context.WithCancel(ctx)
timer := time.NewTicker(time.Millisecond * 200)
go func(ctx context.Context) {
Loop:
for {
select {
case t := <-timer.C:
if t.Before(nextTime) {
// 当前时间小于下次发送时间:跳过
continue
}
// 迭代定时器
sin.iteratorTimer(ctx, t)
// fmt.Println("timer: 执行")
case <-ctx.Done():
// 跳出循环
break Loop
}
}
log.Println("timer: initend")
}(ctx)
})
sin := &Single{
ctx: ctx,
cancel: cancel,
logger: op.logger,
location: op.location,
nextTime: time.Now(),
stopChan: make(chan struct{}),
timeout: op.timeout,
}
sin.startDaemon()
return sin
}
// 间隔定时器
func (s *single) AddTimer(space time.Duration, call callback, extend ExtendParams) (int, error) {
timerMapMux.Lock()
defer timerMapMux.Unlock()
func (l *Single) startDaemon() {
if space != space.Abs() {
return 0, errors.New("space must be positive")
}
l.wg.Add(1)
go l.timerLoop()
timerCount += 1
l.wg.Add(1)
go l.cleanupLoop()
nowTime := time.Now()
t := timerStr{
Callback: call,
BeginTime: nowTime,
NextTime: nowTime, // nowTime.Add(space), // 添加任务的时候就执行一次
SpaceTime: space,
CanRunning: make(chan struct{}, 1),
UniqueKey: "",
Extend: extend,
}
timerMap[fmt.Sprintf("%d", timerCount)] = &t
if t.NextTime.Before(nextTime) {
// 本条规则下次需要发送的时间小于系统下次发送时间:替换
nextTime = t.NextTime
}
return timerCount, nil
}
// 添加需要定时的规则
func (s *single) AddToTimer(space time.Duration, call callback) int {
extend := ExtendParams{}
count, _ := s.AddTimer(space, call, extend)
// 停止所有定时任务
func (l *Single) Stop() {
close(l.stopChan)
if l.cancel != nil {
l.cancel()
}
l.wg.Wait()
l.logger.Infof(l.ctx, "timer single: stopped")
}
// 获取任务数量
func (l *Single) TaskCount() int {
count := 0
l.workerList.Range(func(k, v interface{}) bool {
count++
return true
})
return count
}
func (s *single) DelToTimer(index string) {
timerMapMux.Lock()
defer timerMapMux.Unlock()
delete(timerMap, index)
func (l *Single) MaxIndex() int64 {
return atomic.LoadInt64(&l.timerIndex) + 1
}
// 定时器主循环
func (l *Single) timerLoop() {
defer l.wg.Done()
ticker := time.NewTicker(100 * time.Millisecond) // 提高精度到100ms
defer ticker.Stop()
for {
select {
case t := <-ticker.C:
l.nextTimeMux.RLock()
nextTime := l.nextTime
l.nextTimeMux.RUnlock()
if t.Before(nextTime) {
continue
}
l.iterator(l.ctx)
case <-l.ctx.Done():
l.logger.Infof(l.ctx, "timer: context cancelled, stopping timer loop")
return
case <-l.stopChan:
l.logger.Infof(l.ctx, "timer: received stop signal, stopping timer loop")
return
}
}
}
// 清理循环
func (s *Single) cleanupLoop() {
defer s.wg.Done()
ticker := time.NewTicker(time.Minute)
defer ticker.Stop()
for {
select {
case <-ticker.C:
now := time.Now()
cleanupTime := now.Add(-2 * time.Minute) // 清理2分钟前的记录
s.hasRun.Range(func(k, v any) bool {
t, ok := v.(time.Time)
if !ok || t.Before(cleanupTime) {
s.hasRun.Delete(k)
}
return true
})
case <-s.ctx.Done():
s.logger.Infof(s.ctx, "timer: context cancelled, stopping cleanup loop")
return
case <-s.stopChan:
s.logger.Infof(s.ctx, "timer: received stop signal, stopping cleanup loop")
return
}
}
}
// 每月执行一次
// @param ctx 上下文
// @param taskId 任务ID
// @param day 每月的几号
// @param hour 小时
// @param minute 分钟
// @param second 秒
// @param callback 回调函数
// @param extendData 扩展数据
// @return error
func (c *Single) EveryMonth(ctx context.Context, taskId string, day int, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryMonth,
TaskId: taskId,
// CreateTime: nowTime,
Day: day,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 每周执行一次
// @param ctx context.Context 上下文
// @param taskId string 任务ID
// @param week time.Weekday 周
// @param hour int 小时
// @param minute int 分钟
// @param second int 秒
func (c *Single) EveryWeek(ctx context.Context, taskId string, week time.Weekday, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryWeek,
TaskId: taskId,
// CreateTime: nowTime,
Weekday: week,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 每天执行一次
func (c *Single) EveryDay(ctx context.Context, taskId string, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryDay,
TaskId: taskId,
// CreateTime: nowTime,
Hour: hour,
Minute: minute,
Second: second,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 每小时执行一次
func (c *Single) EveryHour(ctx context.Context, taskId string, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryHour,
TaskId: taskId,
// CreateTime: nowTime,
Minute: minute,
Second: second,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 每分钟执行一次
func (c *Single) EveryMinute(ctx context.Context, taskId string, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
// nowTime := time.Now().In(c.location)
jobData := JobData{
JobType: JobTypeEveryMinute,
TaskId: taskId,
// CreateTime: nowTime,
Second: second,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 特定时间间隔
func (c *Single) EverySpace(ctx context.Context, taskId string, spaceTime time.Duration, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) (int64, error) {
nowTime := time.Now().In(c.location)
if spaceTime < 0 {
c.logger.Errorf(ctx, "间隔时间不能小于0")
return 0, ErrIntervalTime
}
// 获取当天的零点时间
zeroTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), 0, 0, 0, 0, nowTime.Location())
jobData := JobData{
JobType: JobTypeInterval,
TaskId: taskId,
// CreateTime: nowTime,
BaseTime: zeroTime,
IntervalTime: spaceTime,
}
return c.addJob(ctx, jobData, callback, extendData)
}
// 间隔定时器
// @param space 间隔时间
// @param call 回调函数
// @param extend 附加参数
// @return int 定时器索引
// @return error 错误
func (l *Single) addJob(ctx context.Context, jobData JobData, call func(ctx context.Context, extendData interface{}) error, extend interface{}) (int64, error) {
if jobData.TaskId == "" {
l.logger.Errorf(ctx, "任务ID不能为空")
return 0, ErrTaskIdEmpty
}
if jobData.Day < 0 || jobData.Day > 31 {
l.logger.Errorf(ctx, "每月的天数必须在0-31之间")
return 0, ErrMonthDay
}
if jobData.Hour < 0 || jobData.Hour > 23 {
l.logger.Errorf(ctx, "小时必须在0-23之间")
return 0, ErrHour
}
if jobData.Minute < 0 || jobData.Minute > 59 {
l.logger.Errorf(ctx, "分钟必须在0-59之间")
return 0, ErrMinute
}
if jobData.Second < 0 || jobData.Second > 59 {
l.logger.Errorf(ctx, "秒必须在0-59之间")
return 0, ErrSecond
}
if call == nil {
l.logger.Errorf(ctx, "回调函数不能为空")
return 0, ErrCallbackEmpty
}
nextTime, err := GetNextTime(time.Now().In(l.location), jobData)
if err != nil {
l.logger.Errorf(ctx, "获取下次执行时间失败:%s", err.Error())
return 0, err
}
jobData.NextTime = *nextTime
// 生成唯一索引
index := atomic.AddInt64(&l.timerIndex, 1)
t := timerStr{
Callback: call,
CanRunning: make(chan struct{}, 1),
ExtendData: extend,
TaskId: jobData.TaskId,
JobData: &jobData,
}
l.workerList.Store(index, t)
// 计算下次执行时间(全局)
l.updateNextTimeIfEarlier(*nextTime)
return index, nil
}
// 如果更早则更新下次执行时间(全局)
func (s *Single) updateNextTimeIfEarlier(candidate time.Time) {
s.nextTimeMux.Lock()
defer s.nextTimeMux.Unlock()
if candidate.Before(s.nextTime) {
s.nextTime = candidate
}
}
// 删除定时器
func (l *Single) Del(index int64) {
if _, ok := l.workerList.Load(index); ok {
l.workerList.Delete(index)
}
}
func (l *Single) DelByTaskId(taskId string) {
l.workerList.Range(func(k, v interface{}) bool {
timeStr, ok := v.(timerStr)
if ok && timeStr.TaskId == taskId {
l.workerList.Delete(k)
}
return true
})
}
// 迭代定时器列表
func (s *single) iteratorTimer(ctx context.Context, nowTime time.Time) {
timerMapMux.Lock()
defer timerMapMux.Unlock()
// fmt.Println("nowTime:", nowTime.Format("2006-01-02 15:04:05.000"))
func (l *Single) iterator(ctx context.Context) {
// 当前时间
nowTime := time.Now().In(l.location)
// 默认5秒后(如果没有值就暂停进来5秒)
newNextTime := nowTime.Add(time.Second * 5)
index := 0
for _, v := range timerMap {
index++
v := v
// 判断执行的时机
if v.NextTime.Before(nowTime) {
// fmt.Println("NextTime", v.NextTime.Format("2006-01-02 15:04:05.000"))
l.workerList.Range(func(k, v interface{}) bool {
timeStr, ok := v.(timerStr)
if !ok {
l.logger.Errorf(ctx, "timer: 类型断言失败,跳过该任务")
l.workerList.Delete(k)
return true
}
v.NextTime = v.NextTime.Add(v.SpaceTime)
if timeStr.JobData.NextTime.Before(nowTime) || timeStr.JobData.NextTime.Equal(nowTime) {
// 判断下次执行时间与当前时间
if v.NextTime.Before(nowTime) {
v.NextTime = nowTime.Add(v.SpaceTime)
originTime := timeStr.JobData.NextTime
// 计算下次执行时间
nextTime, err := GetNextTime(nowTime, *timeStr.JobData)
if err != nil {
l.logger.Errorf(ctx, "timer: 计算下次执行时间失败:%s", err.Error())
return true
}
// 更新下次执行时间
timeStr.JobData.NextTime = *nextTime
if index == 1 {
// 循环的第一个需要替换默认值
newNextTime = v.NextTime
}
// 获取最小的
if v.NextTime.Before(newNextTime) {
if nextTime.Before(newNextTime) {
// 本规则下次发送时间小于系统下次需要执行的时间:替换
newNextTime = v.NextTime
newNextTime = *nextTime
}
// 处理中就跳过本次
go func(ctx context.Context, v *timerStr) {
select {
case v.CanRunning <- struct{}{}:
defer func() {
// fmt.Printf("timer: 执行完成 %v %v \n", k, v.Tag)
select {
case <-v.CanRunning:
return
default:
return
}
}()
// fmt.Printf("timer: 准备执行 %v %v \n", k, v.Tag)
s.timerAction(ctx, v.Callback, v.UniqueKey, v.Extend)
default:
// fmt.Printf("timer: 已在执行 %v %v \n", k, v.Tag)
return
}
}(ctx, v)
}
}
go l.executeTask(ctx, timeStr, originTime)
// 实际下次时间小于预期下次时间:替换
if nextTime.Before(newNextTime) {
// 判断一下避免异常
if newNextTime.Before(nowTime) {
// 比当前时间小
nextTime = nowTime
} else {
nextTime = newNextTime
}
}
// fmt.Println("timer: one finish")
return true
})
l.updateNextTime(newNextTime)
}
// 定义各个回调函数
type callback func(ctx context.Context) bool
// 执行任务
func (s *Single) executeTask(ctx context.Context, timer timerStr, originTime time.Time) {
// 创建带追踪ID的上下文
u, _ := uuid.NewV7()
traceCtx := context.WithValue(ctx, "trace_id", u.String())
s.logger.Infof(traceCtx, "timer Single begin taskId:%s originTime:%d", timer.TaskId, originTime.UnixMilli())
traceCtx, cancel := context.WithTimeout(traceCtx, s.timeout) // 设置执行超时
defer cancel()
select {
case timer.CanRunning <- struct{}{}:
defer func() {
select {
case <-timer.CanRunning:
default:
}
}()
// 执行回调
begin := time.Now()
if err := s.doTask(traceCtx, timer, originTime); err != nil {
s.logger.Errorf(traceCtx, "timer: 任务执行失败: %s", err.Error())
}
s.logger.Infof(traceCtx, "timer Single end taskId:%s originTime:%d cost:%dms", timer.TaskId, originTime.UnixMilli(), time.Since(begin).Milliseconds())
case <-traceCtx.Done():
s.logger.Errorf(traceCtx, "timer: 任务执行超时: %s", timer.TaskId)
default:
// 任务正在执行中,跳过本次
s.logger.Infof(traceCtx, "timer: 任务正在执行中,跳过本次 %s", timer.TaskId)
}
}
// 定时器操作类
// 这里不应painc
func (s *single) timerAction(ctx context.Context, call callback, uniqueKey string, extend ExtendParams) bool {
func (l *Single) doTask(ctx context.Context, timeStr timerStr, originTime time.Time) error {
// 检查任务是否已执行
taskKey := fmt.Sprintf("%s:%d", timeStr.TaskId, originTime.UnixMilli())
if _, loaded := l.hasRun.LoadOrStore(taskKey, time.Now()); loaded {
l.logger.Errorf(ctx, "timer: 任务已执行,跳过本次执行 %s", timeStr.TaskId)
return ErrTaskExecuted
}
defer func() {
if err := recover(); err != nil {
fmt.Println("timer:定时器出错", err)
log.Println("errStack", string(debug.Stack()))
l.logger.Errorf(ctx, "timer Single call panic err:%+v stack:%s", err, string(debug.Stack()))
}
}()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// 附加数据
ctx = context.WithValue(ctx, extendParamKey, extend)
return call(ctx)
}
// 快捷方法
func GetExtendParams(ctx context.Context) (*ExtendParams, error) {
val := ctx.Value(extendParamKey)
params, ok := val.(ExtendParams)
if !ok {
return nil, errors.New("没找到参数")
err := timeStr.Callback(ctx, timeStr.ExtendData)
if err != nil {
l.logger.Errorf(ctx, "timer Single call back %s, err: %v", timeStr.TaskId, err)
return err
}
return nil
}
// 更新下次执行时间
func (s *Single) updateNextTime(newTime time.Time) {
s.nextTimeMux.Lock()
defer s.nextTimeMux.Unlock()
now := time.Now()
if newTime.Before(now) {
s.nextTime = now
} else {
s.nextTime = newTime
}
return &params, nil
}
+509
View File
@@ -0,0 +1,509 @@
package timerx_test
import (
"context"
"fmt"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/yuninks/timerx"
)
// MockLogger 用于测试的日志记录器
type MockLogger struct {
Infos []string
Errors []string
Warns []string
mu sync.Mutex
}
func (m *MockLogger) Infof(ctx context.Context, format string, args ...interface{}) {
m.mu.Lock()
defer m.mu.Unlock()
m.Infos = append(m.Infos, fmt.Sprintf(format, args...))
}
func (m *MockLogger) Errorf(ctx context.Context, format string, args ...interface{}) {
m.mu.Lock()
defer m.mu.Unlock()
m.Errors = append(m.Errors, fmt.Sprintf(format, args...))
}
func (m *MockLogger) Warnf(ctx context.Context, format string, args ...interface{}) {
m.mu.Lock()
defer m.mu.Unlock()
m.Warns = append(m.Warns, fmt.Sprintf(format, args...))
}
func (m *MockLogger) Clear() {
m.mu.Lock()
defer m.mu.Unlock()
m.Infos = nil
m.Errors = nil
m.Warns = nil
}
// 测试基础功能
func TestSingleTimer_Basic(t *testing.T) {
ctx := context.Background()
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx,
timerx.WithLogger(mockLogger),
timerx.WithLocation(time.UTC))
defer timer.Stop()
// 测试任务计数
assert.Equal(t, 0, timer.TaskCount())
var executionCount int32
taskFunc := func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
return nil
}
// 添加间隔任务
index, err := timer.EverySpace(ctx, "test-task", 100*time.Millisecond, taskFunc, nil)
assert.NoError(t, err)
assert.Greater(t, index, int64(0))
assert.Equal(t, 1, timer.TaskCount())
// 等待任务执行
time.Sleep(300 * time.Millisecond)
assert.GreaterOrEqual(t, atomic.LoadInt32(&executionCount), int32(2))
// 删除任务
timer.Del(index)
assert.Equal(t, 0, timer.TaskCount())
}
// 测试错误参数
func TestSingleTimer_InvalidParams(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx)
defer timer.Stop()
validFunc := func(ctx context.Context, data interface{}) error { return nil }
// 测试空taskId
_, err := timer.EverySpace(ctx, "", time.Second, validFunc, nil)
assert.Error(t, err)
// 测试nil回调函数
_, err = timer.EverySpace(ctx, "test", time.Second, nil, nil)
assert.Error(t, err)
// 测试无效间隔时间
_, err = timer.EverySpace(ctx, "test", -time.Second, validFunc, nil)
assert.Error(t, err)
_, err = timer.EverySpace(ctx, "test", 0, validFunc, nil)
assert.Error(t, err)
}
// 测试任务去重
func TestSingleTimer_Deduplication(t *testing.T) {
ctx := context.Background()
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx, timerx.WithLogger(mockLogger))
defer timer.Stop()
var executionCount int32
taskFunc := func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
time.Sleep(100 * time.Millisecond) // 模拟耗时任务
return nil
}
// 添加短间隔任务
_, err := timer.EverySpace(ctx, "dedup-test", 50*time.Millisecond, taskFunc, nil)
assert.NoError(t, err)
// 等待一段时间,检查去重是否生效
time.Sleep(250 * time.Millisecond)
// 应该只有1次执行(因为任务执行需要100ms,50ms的间隔会被去重)
assert.Equal(t, int32(1), atomic.LoadInt32(&executionCount))
// t.Logf("warn: %+v", mockLogger.Warns)
// t.Logf("info: %+v", mockLogger.Infos)
fmt.Println("info:", mockLogger.Infos)
fmt.Println("warn:", mockLogger.Warns)
// 检查是否有去重日志
assert.Contains(t, mockLogger.Infos, "timer: 任务正在执行中,跳过本次 dedup-test")
}
// 测试并发安全
func TestSingleTimer_Concurrency(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx)
defer timer.Stop()
var wg sync.WaitGroup
var executionCount int32
// 并发添加任务
for i := 0; i < 10; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
taskFunc := func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
return nil
}
_, err := timer.EverySpace(ctx, fmt.Sprintf("concurrent-%d", i),
time.Duration(i+1)*100*time.Millisecond, taskFunc, nil)
assert.NoError(t, err)
}(i)
}
wg.Wait()
assert.Equal(t, 10, timer.TaskCount())
// 等待任务执行
time.Sleep(500 * time.Millisecond)
assert.Greater(t, atomic.LoadInt32(&executionCount), int32(0))
// 并发删除任务
timer.TaskCount()
maxIndex := timer.MaxIndex()
for i := int64(1); i < maxIndex; i++ {
wg.Add(1)
go func(index int64) {
defer wg.Done()
timer.Del(index)
}(i)
}
wg.Wait()
assert.Equal(t, 0, timer.TaskCount())
}
// 测试任务超时
func TestSingleTimer_Timeout(t *testing.T) {
ctx := context.Background()
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx, timerx.WithLogger(mockLogger), timerx.WithTimeout(1*time.Second))
defer timer.Stop()
// 长时间运行的任务
longTask := func(ctx context.Context, data interface{}) error {
fmt.Println("long task start")
select {
case <-time.After(2 * time.Second): // 超过超时时间
case <-ctx.Done():
return ctx.Err()
}
return nil
}
_, err := timer.EverySpace(ctx, "timeout-test", 100*time.Millisecond, longTask, nil)
assert.NoError(t, err)
time.Sleep(time.Second * 5)
// 检查是否有超时相关的错误日志
if len(mockLogger.Errors) == 0 {
t.Fatalf("expected timeout error log, got none")
}
isTimeout := false
for _, err := range mockLogger.Errors {
isTimeout = strings.Contains(err, "context deadline exceeded")
if isTimeout {
break
}
}
assert.True(t, isTimeout)
}
// 测试panic恢复
func TestSingleTimer_PanicRecovery(t *testing.T) {
ctx := context.Background()
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx, timerx.WithLogger(mockLogger))
defer timer.Stop()
panicTask := func(ctx context.Context, data interface{}) error {
panic("test panic")
}
_, err := timer.EverySpace(ctx, "panic-test", 100*time.Millisecond, panicTask, nil)
assert.NoError(t, err)
time.Sleep(200 * time.Millisecond)
// 检查是否有panic恢复日志
if len(mockLogger.Errors) == 0 {
t.Fatalf("expected panic recovery log, got none")
}
isPanic := false
for _, err := range mockLogger.Errors {
isPanic = strings.Contains(err, "timer Single call panic err")
if isPanic {
break
}
}
assert.True(t, isPanic)
}
// 测试不同时间类型的任务
func TestSingleTimer_DifferentJobTypes(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx, timerx.WithLocation(time.UTC))
defer timer.Stop()
var counts struct {
month int32
week int32
day int32
hour int32
minute int32
space int32
}
now := time.Now().UTC()
// 月任务(下个月同一天)
_, err := timer.EveryMonth(ctx, "month-job", now.Day(), now.Hour(), now.Minute(), now.Second()+1,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&counts.month, 1)
return nil
}, nil)
assert.NoError(t, err)
// 周任务(下周同一天)
_, err = timer.EveryWeek(ctx, "week-job", now.Weekday(), now.Hour(), now.Minute(), now.Second()+1,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&counts.week, 1)
return nil
}, nil)
assert.NoError(t, err)
// 间隔任务(立即执行)
_, err = timer.EverySpace(ctx, "space-job", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&counts.space, 1)
return nil
}, nil)
assert.NoError(t, err)
time.Sleep(time.Second)
// 只有间隔任务应该执行
assert.Equal(t, int32(9), atomic.LoadInt32(&counts.space))
assert.Equal(t, int32(1), atomic.LoadInt32(&counts.month))
assert.Equal(t, int32(1), atomic.LoadInt32(&counts.week))
}
// 测试上下文取消
func TestSingleTimer_ContextCancellation(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx, timerx.WithLogger(mockLogger))
var executionCount int32
_, err := timer.EverySpace(ctx, "cancel-test", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
return nil
}, nil)
assert.NoError(t, err)
// 让任务执行一次
time.Sleep(150 * time.Millisecond)
initialCount := atomic.LoadInt32(&executionCount)
// 取消上下文
cancel()
time.Sleep(100 * time.Millisecond) // 等待停止
// 检查是否停止了执行
finalCount := atomic.LoadInt32(&executionCount)
assert.Equal(t, initialCount, finalCount) // 计数不应该再增加
// 检查是否有停止日志
assert.Contains(t, mockLogger.Infos, "timer: context cancelled, stopping timer loop")
}
// 测试扩展数据传递
func TestSingleTimer_ExtendData(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx)
defer timer.Stop()
type TestData struct {
Message string
Count int
}
testData := &TestData{Message: "hello", Count: 42}
var receivedData *TestData
_, err := timer.EverySpace(ctx, "data-test", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
fmt.Println("data:", data)
if data != nil {
receivedData = data.(*TestData)
}
return nil
}, testData)
assert.NoError(t, err)
time.Sleep(time.Second)
t.Logf("receivedData: %+v", receivedData)
assert.NotNil(t, receivedData)
assert.Equal(t, "hello", receivedData.Message)
assert.Equal(t, 42, receivedData.Count)
}
// 测试任务删除
func TestSingleTimer_TaskDeletion(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx)
defer timer.Stop()
var executionCount int32
// 添加多个任务
index1, err := timer.EverySpace(ctx, "task-1", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
return nil
}, nil)
assert.NoError(t, err)
index2, err := timer.EverySpace(ctx, "task-2", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
atomic.AddInt32(&executionCount, 1)
return nil
}, nil)
assert.NoError(t, err)
assert.Equal(t, 2, timer.TaskCount())
// 删除一个任务
timer.Del(index1)
assert.Equal(t, 1, timer.TaskCount())
// 等待执行
time.Sleep(200 * time.Millisecond)
count := atomic.LoadInt32(&executionCount)
// 应该只有task-2执行
assert.True(t, count >= 1 && count <= 2)
// 删除另一个任务
timer.Del(index2)
assert.Equal(t, 0, timer.TaskCount())
}
// 测试GetNextTime函数(需要根据实际实现调整)
func TestGetNextTime2(t *testing.T) {
now := time.Now().UTC()
// 测试间隔任务
jobData := timerx.JobData{
JobType: timerx.JobTypeInterval,
IntervalTime: time.Minute,
// CreateTime: now,
BaseTime: now,
}
tt := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), 0, 0, time.UTC)
nextTime, err := timerx.GetNextTime(now, jobData)
assert.NoError(t, err)
assert.WithinDuration(t, tt.Add(time.Minute), *nextTime, time.Second)
}
// 基准测试
func BenchmarkSingleTimer_AddAndExecute(b *testing.B) {
ctx := context.Background()
timer := timerx.InitSingle(ctx)
defer timer.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
timer.EverySpace(ctx, fmt.Sprintf("bench-%d", i), time.Millisecond,
func(ctx context.Context, data interface{}) error {
return nil
}, nil)
}
}
// 测试日志记录
func TestSingleTimer_Logging(t *testing.T) {
ctx := context.Background()
mockLogger := &MockLogger{}
timer := timerx.InitSingle(ctx, timerx.WithLogger(mockLogger))
defer timer.Stop()
// 添加会panic的任务
_, err := timer.EverySpace(ctx, "logging-test", 100*time.Millisecond,
func(ctx context.Context, data interface{}) error {
panic("test panic for logging")
}, nil)
assert.NoError(t, err)
time.Sleep(200 * time.Millisecond)
// 检查日志记录
assert.NotEmpty(t, mockLogger.Errors)
if len(mockLogger.Errors) == 0 {
t.Fatalf("expected panic recovery log, got none")
}
isPanic := false
for _, err := range mockLogger.Errors {
isPanic = strings.Contains(err, "test panic for logging")
}
assert.True(t, isPanic)
}
// 测试时区处理
func TestSingleTimer_Timezone(t *testing.T) {
// 测试不同时区
locations := []*time.Location{
time.UTC,
time.FixedZone("TEST+8", 8*60*60),
time.FixedZone("TEST-5", -5*60*60),
}
for _, loc := range locations {
t.Run(loc.String(), func(t *testing.T) {
ctx := context.Background()
timer := timerx.InitSingle(ctx, timerx.WithLocation(loc))
defer timer.Stop()
var executed bool
// now := time.Now().In(loc)
// 添加下一秒执行的任务
_, err := timer.EverySpace(ctx, "tz-test", time.Second,
func(ctx context.Context, data interface{}) error {
fmt.Println("executed in location:", loc)
executed = true
return nil
}, nil)
assert.NoError(t, err)
time.Sleep(5 * time.Second)
assert.True(t, executed)
})
}
}
-21
View File
@@ -1,21 +0,0 @@
package timer_test
import "testing"
// 单元测试
func TestHelloWorld(t *testing.T) {
// 日志
t.Log("hello world")
s := "ddd"
t.Logf("Log测试%s", s)
// t.Errorf("ErrorF %s", s)
// 标记错误(继续运行)
// t.Fail()
// 终止运行
// t.FailNow()
}
+37 -12
View File
@@ -1,18 +1,43 @@
package timer
package timerx
import "time"
import (
"context"
"time"
)
type timerStr struct {
Callback callback // 需要回调的方法
CanRunning chan (struct{}) // 是否允许执行
BeginTime time.Time // 初始化任务的时间
NextTime time.Time // [删]下一次执行的时间
SpaceTime time.Duration // 任务间隔时间
UniqueKey string // 全局唯一键
Extend ExtendParams // 附加参数
Callback func(ctx context.Context, extendData any) error // 需要回调的方法
CanRunning chan (struct{}) // 是否允许执行(only single)
TaskId string // 任务ID 全局唯一键
ExtendData any // 附加参数
JobData *JobData // 任务时间数据
}
// 扩展参数
type ExtendParams struct {
Params map[string]interface{} // 带出去的参数
type JobType string
const (
JobTypeEveryMonth JobType = "every_month" // 每月
JobTypeEveryWeek JobType = "every_week" // 每周
JobTypeEveryDay JobType = "every_day" // 每天
JobTypeEveryHour JobType = "every_hour" // 每小时
JobTypeEveryMinute JobType = "every_minute" // 每分钟
JobTypeEverySecond JobType = "every_second" // 每秒
JobTypeInterval JobType = "interval" // 指定时间间隔
)
type JobData struct {
JobType JobType // 任务类型
TaskId string // 任务ID 全局唯一键(only cluster)
NextTime time.Time // 下次执行时间
BaseTime time.Time // 基准时间(间隔的基准时间)
IntervalTime time.Duration // 任务间隔时间
Month time.Month // 每年的第几个月
Weekday time.Weekday // 每周的周几
Day int // 每月的第几天
Hour int // 每天的第几个小时
Minute int // 每小时的第几分钟
Second int // 每分钟的第几秒
}
// 定义各个回调函数
// type callback func(ctx context.Context, extendData interface{}) error
-159
View File
@@ -1,159 +0,0 @@
package timer
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
)
// 单次的任务队列
type worker struct {
ctx context.Context
zsetKey string
listKey string
redis *redis.Client
worker WorkerInterface
}
type WorkerCode int
const (
WorkerCodeSuccess WorkerCode = 0
WorkerCodeAgain WorkerCode = -1
)
// 需要考虑执行失败重新放入队列的情况
type WorkerInterface interface {
Worker(uniqueKey string, jobType string, data map[string]interface{}) WorkerCode
}
var wo *worker = nil
var once sync.Once
type extendData struct {
Delay time.Duration
Data map[string]interface{}
}
func InitWorker(ctx context.Context, re *redis.Client, w WorkerInterface) *worker {
once.Do(func() {
wo = &worker{
ctx: ctx,
zsetKey: "timer:job_zsetkey",
listKey: "timer:job_listkey",
redis: re,
worker: w,
}
go wo.getTask()
go wo.execTask()
})
return wo
}
// 添加任务
// 重复插入就代表覆盖
func (w *worker) Add(uniqueKey string, jobType string, delayTime time.Duration, data map[string]interface{}) error {
if delayTime.Abs() != delayTime {
return fmt.Errorf("时间间隔不能为负数")
}
if delayTime == 0 {
return fmt.Errorf("时间间隔不能为0")
}
redisKey := fmt.Sprintf("%s[:]%s", uniqueKey, jobType)
ed := extendData{
Delay: delayTime,
Data: data,
}
b, _ := json.Marshal(ed)
_, err := w.redis.SetEX(w.ctx, redisKey, b, delayTime+time.Second*5).Result()
if err != nil {
return err
}
_, err = w.redis.ZAdd(w.ctx, w.zsetKey, &redis.Z{
Score: float64(time.Now().Add(delayTime).UnixMilli()),
Member: redisKey,
}).Result()
return err
}
// 删除任务
func (w *worker) Del(uniqueKey string, jobType string) error {
redisKey := fmt.Sprintf("%s[:]%s", uniqueKey, jobType)
w.redis.Del(w.ctx, redisKey).Result()
w.redis.ZRem(w.ctx, w.zsetKey, redisKey).Result()
return nil
}
// 获取任务
func (w *worker) getTask() {
timer := time.NewTicker(time.Millisecond * 200)
defer timer.Stop()
Loop:
for {
select {
case <-timer.C:
script := `
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
for i,v in ipairs(token) do
redis.call('zrem',KEYS[1],v)
redis.call('lpush',KEYS[2],v)
end
return "OK"
`
w.redis.Eval(w.ctx, script, []string{w.zsetKey, w.listKey}, 0, time.Now().UnixMilli()).Result()
// fmt.Println(i, err)
case <-w.ctx.Done():
break Loop
}
}
}
// 执行任务
func (w *worker) execTask() {
for {
keys, err := w.redis.BLPop(w.ctx, time.Second*10, w.listKey).Result()
if err != nil {
fmt.Println("watch err:", err)
continue
}
go func() {
s := strings.Split(keys[1], "[:]")
// 读取数据
str, err := w.redis.Get(w.ctx, keys[1]).Result()
if err != nil {
fmt.Println("execJob err:", err)
return
}
ed := extendData{}
json.Unmarshal([]byte(str), &ed)
fmt.Println("开始时间:", time.Now().Format("2006-01-02 15:04:05"))
code := w.worker.Worker(s[0], s[1], ed.Data)
if code == WorkerCodeAgain {
// 重新放入队列
fmt.Println("重入时间:", time.Now().Format("2006-01-02 15:04:05"))
w.Add(s[0], s[1], ed.Delay, ed.Data)
}
}()
}
}