2023-11-27 22:37:33 +08:00
|
|
|
package timerx
|
2023-08-27 23:39:58 +08:00
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"context"
|
2024-05-27 20:28:11 +08:00
|
|
|
"encoding/json"
|
2023-08-27 23:39:58 +08:00
|
|
|
"errors"
|
2024-05-22 15:02:39 +08:00
|
|
|
"fmt"
|
2023-08-27 23:39:58 +08:00
|
|
|
"runtime/debug"
|
2025-06-11 15:12:08 +08:00
|
|
|
"strconv"
|
2023-08-27 23:39:58 +08:00
|
|
|
"sync"
|
|
|
|
|
"time"
|
|
|
|
|
|
|
|
|
|
"github.com/go-redis/redis/v8"
|
2024-05-28 17:28:20 +08:00
|
|
|
uuid "github.com/satori/go.uuid"
|
2024-05-22 15:02:39 +08:00
|
|
|
"github.com/yuninks/cachex"
|
|
|
|
|
"github.com/yuninks/lockx"
|
2025-07-24 17:13:17 +08:00
|
|
|
"github.com/yuninks/timerx/logger"
|
2023-08-27 23:39:58 +08:00
|
|
|
)
|
|
|
|
|
|
2023-11-13 23:49:42 +08:00
|
|
|
// 功能描述
|
2024-04-04 10:58:57 +08:00
|
|
|
|
2023-11-13 23:49:42 +08:00
|
|
|
// 这是基于Redis的定时任务调度器,能够有效的在服务集群里面调度任务,避免了单点压力过高或单点故障问题
|
|
|
|
|
// 由于所有的服务代码是一致的,也就是一个定时任务将在所有的服务都有注册,具体调度到哪个服务运行看调度结果
|
|
|
|
|
|
|
|
|
|
// 暂不支持删除定时器,因为这个定时器的设计是基于全局的,如果删除了,那么其他服务就不知道了
|
|
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
// 单例模式
|
|
|
|
|
var clusterOnceLimit sync.Once
|
|
|
|
|
|
|
|
|
|
// 已注册的任务列表
|
|
|
|
|
var clusterWorkerList sync.Map
|
|
|
|
|
|
2023-11-13 23:49:42 +08:00
|
|
|
type Cluster struct {
|
2024-05-22 15:02:39 +08:00
|
|
|
ctx context.Context
|
2024-05-23 23:24:18 +08:00
|
|
|
redis redis.UniversalClient
|
2024-05-22 15:02:39 +08:00
|
|
|
cache *cachex.Cache
|
2024-06-22 15:34:49 +08:00
|
|
|
timeout time.Duration
|
2025-07-24 17:13:17 +08:00
|
|
|
logger logger.Logger
|
2024-05-30 11:02:44 +08:00
|
|
|
keyPrefix string // key前缀
|
|
|
|
|
location *time.Location // 根据时区计算的时间
|
2024-05-20 09:35:12 +08:00
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
lockKey string // 全局计算的key
|
|
|
|
|
zsetKey string // 有序集合的key
|
2023-09-02 12:19:27 +08:00
|
|
|
listKey string // 可执行的任务列表的key
|
2024-04-04 10:58:57 +08:00
|
|
|
setKey string // 重入集合的key
|
2025-06-11 15:12:08 +08:00
|
|
|
|
|
|
|
|
priority int // 全局优先级
|
|
|
|
|
priorityKey string // 全局优先级的key
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
2023-11-13 23:49:42 +08:00
|
|
|
var clu *Cluster = nil
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2024-04-04 10:58:57 +08:00
|
|
|
// 初始化定时器
|
|
|
|
|
// 全局只需要初始化一次
|
2024-05-23 23:24:18 +08:00
|
|
|
func InitCluster(ctx context.Context, red redis.UniversalClient, keyPrefix string, opts ...Option) *Cluster {
|
2024-04-04 10:58:57 +08:00
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
clusterOnceLimit.Do(func() {
|
2024-05-20 09:35:12 +08:00
|
|
|
op := newOptions(opts...)
|
|
|
|
|
|
2023-11-13 23:49:42 +08:00
|
|
|
clu = &Cluster{
|
2025-06-11 15:12:08 +08:00
|
|
|
ctx: ctx,
|
|
|
|
|
redis: red,
|
|
|
|
|
cache: cachex.NewCache(),
|
|
|
|
|
timeout: op.timeout,
|
|
|
|
|
logger: op.logger,
|
|
|
|
|
keyPrefix: keyPrefix,
|
|
|
|
|
location: op.location,
|
|
|
|
|
priority: op.priority,
|
|
|
|
|
lockKey: "timer:cluster_globalLockKey" + keyPrefix, // 定时器的全局锁
|
|
|
|
|
zsetKey: "timer:cluster_zsetKey" + keyPrefix, // 有序集合
|
|
|
|
|
listKey: "timer:cluster_listKey" + keyPrefix, // 列表
|
|
|
|
|
setKey: "timer:cluster_setKey" + keyPrefix, // 重入集合
|
|
|
|
|
priorityKey: "timer:cluster_priorityKey" + keyPrefix, // 全局优先级的key
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
2024-06-22 15:34:49 +08:00
|
|
|
// 设置锁的超时时间
|
|
|
|
|
lockx.InitOption(lockx.SetTimeout(op.timeout))
|
|
|
|
|
|
2023-09-02 12:19:27 +08:00
|
|
|
// 监听任务
|
|
|
|
|
go clu.watch()
|
|
|
|
|
|
2025-06-11 15:12:08 +08:00
|
|
|
priorityTime := time.NewTicker(time.Second * 10)
|
|
|
|
|
go func(ctx context.Context) {
|
|
|
|
|
clu.setPriority()
|
|
|
|
|
Loop:
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-priorityTime.C:
|
|
|
|
|
clu.setPriority()
|
|
|
|
|
case <-ctx.Done():
|
|
|
|
|
break Loop
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}(ctx)
|
|
|
|
|
|
2023-09-09 23:32:37 +08:00
|
|
|
timer := time.NewTicker(time.Millisecond * 200)
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2024-05-23 23:24:18 +08:00
|
|
|
go func(ctx context.Context) {
|
2023-08-27 23:39:58 +08:00
|
|
|
Loop:
|
|
|
|
|
for {
|
|
|
|
|
select {
|
|
|
|
|
case <-timer.C:
|
2025-06-11 15:12:08 +08:00
|
|
|
if !clu.canRun() {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2023-09-02 12:19:27 +08:00
|
|
|
clu.getTask()
|
|
|
|
|
clu.getNextTime()
|
2023-08-27 23:39:58 +08:00
|
|
|
case <-ctx.Done():
|
|
|
|
|
break Loop
|
|
|
|
|
}
|
|
|
|
|
}
|
2024-05-23 23:24:18 +08:00
|
|
|
}(ctx)
|
2023-08-27 23:39:58 +08:00
|
|
|
})
|
|
|
|
|
return clu
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-11 15:12:08 +08:00
|
|
|
// 判断是否可执行
|
|
|
|
|
func (l *Cluster) canRun() bool {
|
|
|
|
|
// 加缓存
|
|
|
|
|
str, err := l.redis.Get(l.ctx, l.priorityKey).Result()
|
|
|
|
|
|
|
|
|
|
fmt.Println(str, err)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err == redis.Nil && l.priority == 0 {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
l.logger.Errorf(l.ctx, "获取全局优先级失败:%s", err.Error())
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
strPriority, err := strconv.Atoi(str)
|
|
|
|
|
if err != nil {
|
|
|
|
|
l.logger.Errorf(l.ctx, "全局优先级转换失败:%s", err.Error())
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
if l.priority >= strPriority {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 设置全局优先级
|
|
|
|
|
func (l *Cluster) setPriority() bool {
|
|
|
|
|
// redis lua脚本
|
|
|
|
|
// 如果redis的可以不存在则设置值,如果存在且redis内的值比当前大则不处理,如果存在redis内的值比当前小或等于则更新值且更新ttl
|
|
|
|
|
script := `
|
|
|
|
|
-- KEYS[1] 是全局优先级的key
|
|
|
|
|
local priorityKey = KEYS[1]
|
|
|
|
|
-- ARGV[1] 是新的优先级
|
|
|
|
|
local priority = ARGV[1]
|
|
|
|
|
-- ARGV[2] 是过期时间
|
|
|
|
|
local expireTime = ARGV[2]
|
|
|
|
|
|
|
|
|
|
-- 校验参数完整性
|
|
|
|
|
if not priorityKey or not priority or not expireTime then
|
|
|
|
|
return redis.error_reply("Missing required arguments")
|
|
|
|
|
end
|
|
|
|
|
|
|
|
|
|
-- 尝试将字符串转换为数字
|
|
|
|
|
local currentPriority = redis.call('get', priorityKey)
|
|
|
|
|
local currentPriorityNum = tonumber(currentPriority)
|
|
|
|
|
local newPriorityNum = tonumber(priority)
|
|
|
|
|
|
|
|
|
|
if not currentPriority then
|
|
|
|
|
-- 如果当前优先级不存在,则设置新优先级并设置TTL
|
|
|
|
|
redis.call('set', priorityKey, priority, 'ex', expireTime)
|
|
|
|
|
return { "SET", expireTime }
|
|
|
|
|
elseif currentPriorityNum < newPriorityNum then
|
|
|
|
|
-- 如果当前优先级小于新优先级,则更新优先级并更新TTL
|
|
|
|
|
redis.call('set', priorityKey, priority, 'ex', expireTime)
|
|
|
|
|
return { "RESET", expireTime }
|
|
|
|
|
elseif currentPriorityNum == newPriorityNum then
|
|
|
|
|
-- 优先级相同则更新TTL
|
|
|
|
|
redis.call('expire', priorityKey, expireTime)
|
|
|
|
|
return { "UPDATE", expireTime }
|
|
|
|
|
else
|
|
|
|
|
-- 如果当前优先级大于新优先级,则不更新
|
|
|
|
|
return { "NOAUCH", '0' }
|
|
|
|
|
end
|
|
|
|
|
`
|
|
|
|
|
priority := fmt.Sprintf("%d", l.priority)
|
|
|
|
|
|
|
|
|
|
expireTime := (time.Second*30).Seconds() // 设置过期时间为1分钟
|
|
|
|
|
res, err := l.redis.Eval(l.ctx, script, []string{l.priorityKey}, priority, expireTime).Result()
|
|
|
|
|
if err != nil {
|
|
|
|
|
l.logger.Errorf(l.ctx, "设置全局优先级失败:%s", err.Error())
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fmt.Printf("设置全局优先级返回值:%+v", res)
|
|
|
|
|
|
|
|
|
|
// 处理返回值,包含操作结果和 TTL
|
|
|
|
|
resultArray := res.([]interface{})
|
|
|
|
|
if len(resultArray) < 2 {
|
|
|
|
|
l.logger.Errorf(l.ctx, "设置全局优先级失败: 返回值格式不正确")
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
operationResult := resultArray[0].(string)
|
|
|
|
|
ttl := resultArray[1].(string)
|
|
|
|
|
|
|
|
|
|
if operationResult == "SET" || operationResult == "UPDATE" {
|
|
|
|
|
l.logger.Infof(l.ctx, "设置全局优先级成功:%s", priority)
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
_ = ttl
|
|
|
|
|
l.logger.Infof(l.ctx, "设置全局优先级未更新:%s", priority)
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 每月执行一次
|
|
|
|
|
// @param ctx 上下文
|
|
|
|
|
// @param taskId 任务ID
|
|
|
|
|
// @param day 每月的几号
|
|
|
|
|
// @param hour 小时
|
|
|
|
|
// @param minute 分钟
|
|
|
|
|
// @param second 秒
|
|
|
|
|
// @param callback 回调函数
|
|
|
|
|
// @param extendData 扩展数据
|
|
|
|
|
// @return error
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EveryMonth(ctx context.Context, taskId string, day int, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
jobData := JobData{
|
|
|
|
|
JobType: JobTypeEveryMonth,
|
|
|
|
|
CreateTime: nowTime,
|
|
|
|
|
Day: day,
|
|
|
|
|
Hour: hour,
|
|
|
|
|
Minute: minute,
|
|
|
|
|
Second: second,
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 每周执行一次
|
|
|
|
|
// @param ctx context.Context 上下文
|
|
|
|
|
// @param taskId string 任务ID
|
|
|
|
|
// @param week time.Weekday 周
|
|
|
|
|
// @param hour int 小时
|
|
|
|
|
// @param minute int 分钟
|
|
|
|
|
// @param second int 秒
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EveryWeek(ctx context.Context, taskId string, week time.Weekday, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
jobData := JobData{
|
2024-05-22 15:02:39 +08:00
|
|
|
JobType: JobTypeEveryWeek,
|
2024-05-20 09:35:12 +08:00
|
|
|
CreateTime: nowTime,
|
|
|
|
|
Weekday: week,
|
|
|
|
|
Hour: hour,
|
|
|
|
|
Minute: minute,
|
|
|
|
|
Second: second,
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 每天执行一次
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EveryDay(ctx context.Context, taskId string, hour int, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
jobData := JobData{
|
2024-05-22 15:02:39 +08:00
|
|
|
JobType: JobTypeEveryDay,
|
2024-05-20 09:35:12 +08:00
|
|
|
CreateTime: nowTime,
|
|
|
|
|
Hour: hour,
|
|
|
|
|
Minute: minute,
|
|
|
|
|
Second: second,
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 每小时执行一次
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EveryHour(ctx context.Context, taskId string, minute int, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
jobData := JobData{
|
2024-05-22 15:02:39 +08:00
|
|
|
JobType: JobTypeEveryHour,
|
2024-05-20 09:35:12 +08:00
|
|
|
CreateTime: nowTime,
|
|
|
|
|
Minute: minute,
|
|
|
|
|
Second: second,
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 每分钟执行一次
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EveryMinute(ctx context.Context, taskId string, second int, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
jobData := JobData{
|
2024-05-22 15:02:39 +08:00
|
|
|
JobType: JobTypeEveryMinute,
|
2024-05-20 09:35:12 +08:00
|
|
|
CreateTime: nowTime,
|
|
|
|
|
Second: second,
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 特定时间间隔
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) EverySpace(ctx context.Context, taskId string, spaceTime time.Duration, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-05-31 09:52:10 +08:00
|
|
|
nowTime := time.Now().In(c.location)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
|
|
|
|
if spaceTime < 0 {
|
|
|
|
|
c.logger.Errorf(ctx, "间隔时间不能小于0")
|
|
|
|
|
return errors.New("间隔时间不能小于0")
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-22 15:02:39 +08:00
|
|
|
// 获取当天的零点时间
|
|
|
|
|
zeroTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), 0, 0, 0, 0, nowTime.Location())
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
jobData := JobData{
|
2024-05-22 15:02:39 +08:00
|
|
|
JobType: JobTypeInterval,
|
|
|
|
|
BaseTime: zeroTime, // 默认当天的零点
|
2024-05-20 09:35:12 +08:00
|
|
|
CreateTime: nowTime,
|
|
|
|
|
IntervalTime: spaceTime,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return c.addJob(ctx, taskId, jobData, callback, extendData)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-20 09:35:12 +08:00
|
|
|
// 统一添加任务
|
|
|
|
|
// @param ctx context.Context 上下文
|
|
|
|
|
// @param taskId string 任务ID
|
|
|
|
|
// @param jobData *JobData 任务数据
|
|
|
|
|
// @param callback callback 回调函数
|
|
|
|
|
// @param extendData interface{} 扩展数据
|
|
|
|
|
// @return error
|
2024-05-31 13:05:51 +08:00
|
|
|
func (c *Cluster) addJob(ctx context.Context, taskId string, jobData JobData, callback func(ctx context.Context, extendData interface{}) error, extendData interface{}) error {
|
2024-04-04 10:58:57 +08:00
|
|
|
_, ok := clusterWorkerList.Load(taskId)
|
2023-08-27 23:39:58 +08:00
|
|
|
if ok {
|
2024-04-04 10:58:57 +08:00
|
|
|
c.logger.Errorf(ctx, "key已存在:%s", taskId)
|
2023-08-27 23:39:58 +08:00
|
|
|
return errors.New("key已存在")
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-30 11:02:44 +08:00
|
|
|
_, err := GetNextTime(time.Now().In(c.location), jobData)
|
2024-05-20 09:35:12 +08:00
|
|
|
if err != nil {
|
|
|
|
|
c.logger.Errorf(ctx, "获取下次执行时间失败:%s", err.Error())
|
|
|
|
|
return err
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
2024-06-19 16:31:51 +08:00
|
|
|
// ctx, cancel := context.WithCancel(ctx)
|
|
|
|
|
// defer cancel()
|
|
|
|
|
|
|
|
|
|
// lock := lockx.NewGlobalLock(ctx, c.redis, taskId)
|
|
|
|
|
// tB := lock.Try(2)
|
|
|
|
|
// if !tB {
|
|
|
|
|
// c.logger.Errorf(ctx, "添加失败:%s", taskId)
|
|
|
|
|
// return errors.New("添加失败")
|
|
|
|
|
// }
|
|
|
|
|
// defer lock.Unlock()
|
2023-08-27 23:39:58 +08:00
|
|
|
|
|
|
|
|
t := timerStr{
|
2023-11-13 23:49:42 +08:00
|
|
|
Callback: callback,
|
|
|
|
|
ExtendData: extendData,
|
2024-04-04 10:58:57 +08:00
|
|
|
TaskId: taskId,
|
2024-05-20 09:35:12 +08:00
|
|
|
JobData: &jobData,
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
2024-04-04 10:58:57 +08:00
|
|
|
clusterWorkerList.Store(taskId, t)
|
2023-08-27 23:39:58 +08:00
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 计算下一次执行的时间
|
2024-04-04 10:58:57 +08:00
|
|
|
// TODO:注册的任务需放在Redis集中存储,因为本地的话,如果有多个服务,那么就会出现不一致的情况。但是要注意服务如何进行下线,由于是主动上报的,需要有一个机制进行删除过期的任务(添加任务&定时器轮训注册)
|
2024-05-20 09:35:12 +08:00
|
|
|
// TODO:考虑不同实例系统时间不一样,可能计算的下次时间不一致,会有重复执行的可能
|
2023-11-13 23:49:42 +08:00
|
|
|
func (c *Cluster) getNextTime() {
|
2023-09-02 12:19:27 +08:00
|
|
|
|
2024-05-24 09:55:34 +08:00
|
|
|
lock := lockx.NewGlobalLock(c.ctx, c.redis, c.lockKey)
|
2023-08-27 23:39:58 +08:00
|
|
|
// 获取锁
|
|
|
|
|
lockBool := lock.Lock()
|
|
|
|
|
if !lockBool {
|
2023-09-02 12:19:27 +08:00
|
|
|
// log.Println("timer:获取锁失败")
|
2023-08-27 23:39:58 +08:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
|
|
|
|
|
// 计算下一次时间
|
|
|
|
|
|
2024-05-22 15:02:39 +08:00
|
|
|
// p := c.redis.Pipeline()
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2024-05-08 13:01:55 +08:00
|
|
|
// 根据内部注册的任务列表计算下一次执行的时间
|
2023-08-27 23:39:58 +08:00
|
|
|
clusterWorkerList.Range(func(key, value interface{}) bool {
|
|
|
|
|
val := value.(timerStr)
|
2024-05-20 09:35:12 +08:00
|
|
|
|
2024-05-30 11:02:44 +08:00
|
|
|
nextTime, _ := GetNextTime(time.Now().In(c.location), *val.JobData)
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2024-05-22 15:02:39 +08:00
|
|
|
// fmt.Println(val.ExtendData, val.JobData, nextTime)
|
|
|
|
|
|
|
|
|
|
// 内部判定是否重复
|
2024-05-28 15:02:57 +08:00
|
|
|
cacheKey := fmt.Sprintf("%s_%s_%d", c.keyPrefix, val.TaskId, nextTime.UnixMilli())
|
|
|
|
|
cacheVal, err := c.cache.Get(cacheKey)
|
2024-05-22 15:02:39 +08:00
|
|
|
if err == nil {
|
|
|
|
|
// 缓存已有值
|
|
|
|
|
return true
|
|
|
|
|
}
|
2024-05-28 15:02:57 +08:00
|
|
|
valueNum := int(0)
|
|
|
|
|
if cacheVal != nil {
|
|
|
|
|
valueNum = cacheVal.(int)
|
|
|
|
|
}
|
|
|
|
|
if valueNum > 2 {
|
|
|
|
|
// 重试2次还是失败就不执行了
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
// fmt.Println("计算时间1", val.ExtendData, time.UnixMilli(nextTime.UnixMilli()).Format("2006-01-02 15:04:05"))
|
2024-05-22 15:02:39 +08:00
|
|
|
|
|
|
|
|
// redis lua脚本,尝试设置nx锁时间为一分钟,如果能设置进去则添加到有序集合zsetKey
|
|
|
|
|
script := `
|
|
|
|
|
local zsetKey = KEYS[1]
|
|
|
|
|
|
|
|
|
|
local cacheKey = ARGV[1]
|
|
|
|
|
local expireTime = ARGV[2]
|
|
|
|
|
|
|
|
|
|
local score = ARGV[3]
|
|
|
|
|
local member = ARGV[4]
|
|
|
|
|
|
|
|
|
|
local res = redis.call('set', cacheKey, '', 'nx', 'ex', expireTime)
|
|
|
|
|
|
|
|
|
|
if res then
|
|
|
|
|
redis.call('zadd', zsetKey, score, member)
|
|
|
|
|
return "SUCCESS"
|
|
|
|
|
end
|
|
|
|
|
return "ERROR"
|
|
|
|
|
`
|
|
|
|
|
|
|
|
|
|
// TODO:
|
|
|
|
|
expireTime := time.Minute
|
|
|
|
|
|
2024-05-24 09:55:34 +08:00
|
|
|
res, err := c.redis.Eval(c.ctx, script, []string{c.zsetKey}, cacheKey, expireTime.Seconds(), nextTime.UnixMilli(), val.TaskId).Result()
|
2024-05-22 15:02:39 +08:00
|
|
|
|
2024-05-28 15:02:57 +08:00
|
|
|
valueNum++
|
|
|
|
|
|
2024-05-22 15:02:39 +08:00
|
|
|
if err == nil && res.(string) == "SUCCESS" {
|
|
|
|
|
// 设置成功
|
2024-05-28 15:02:57 +08:00
|
|
|
valueNum = 10
|
|
|
|
|
|
|
|
|
|
// fmt.Println("计算时间2", val.ExtendData, time.UnixMilli(nextTime.UnixMilli()).Format("2006-01-02 15:04:05"))
|
2024-05-22 15:02:39 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-28 15:02:57 +08:00
|
|
|
c.cache.Set(cacheKey, valueNum, expireTime)
|
|
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
return true
|
|
|
|
|
})
|
|
|
|
|
|
2024-05-22 15:02:39 +08:00
|
|
|
// _, err := p.Exec(ctx)
|
|
|
|
|
// _ = err
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 获取任务
|
2023-11-13 23:49:42 +08:00
|
|
|
func (c *Cluster) getTask() {
|
2023-08-27 23:39:58 +08:00
|
|
|
// 定时去Redis获取任务
|
2023-09-02 13:32:04 +08:00
|
|
|
script := `
|
|
|
|
|
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
|
|
|
|
|
for i,v in ipairs(token) do
|
|
|
|
|
redis.call('zrem',KEYS[1],v)
|
|
|
|
|
redis.call('lpush',KEYS[2],v)
|
|
|
|
|
end
|
|
|
|
|
return "OK"
|
|
|
|
|
`
|
|
|
|
|
c.redis.Eval(c.ctx, script, []string{c.zsetKey, c.listKey}, 0, time.Now().UnixMilli()).Result()
|
2023-09-02 12:19:27 +08:00
|
|
|
|
|
|
|
|
}
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2023-09-02 12:19:27 +08:00
|
|
|
// 监听任务
|
2023-11-13 23:49:42 +08:00
|
|
|
func (c *Cluster) watch() {
|
2023-09-02 12:19:27 +08:00
|
|
|
// 执行任务
|
2024-04-04 10:58:57 +08:00
|
|
|
go func() {
|
|
|
|
|
for {
|
2025-06-11 15:12:08 +08:00
|
|
|
|
|
|
|
|
if !c.canRun() {
|
|
|
|
|
// 如果全局优先级不满足就不执行
|
|
|
|
|
time.Sleep(time.Second * 5)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-04 10:58:57 +08:00
|
|
|
keys, err := c.redis.BLPop(c.ctx, time.Second*10, c.listKey).Result()
|
|
|
|
|
if err != nil {
|
2024-04-06 19:09:58 +08:00
|
|
|
if err != redis.Nil {
|
|
|
|
|
c.logger.Errorf(c.ctx, "BLPop watch err:%+v", err)
|
|
|
|
|
}
|
2024-04-04 10:58:57 +08:00
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
_, ok := clusterWorkerList.Load(keys[1])
|
|
|
|
|
if !ok {
|
2024-05-22 15:02:39 +08:00
|
|
|
c.logger.Errorf(c.ctx, "watch timer:任务不存在%+v", keys[1])
|
2024-05-27 20:28:11 +08:00
|
|
|
|
|
|
|
|
rd := ReJobData{
|
|
|
|
|
TaskId: keys[1],
|
|
|
|
|
Times: 1,
|
|
|
|
|
}
|
|
|
|
|
rdb, _ := json.Marshal(rd)
|
|
|
|
|
|
|
|
|
|
c.redis.SAdd(c.ctx, c.setKey, string(rdb))
|
2024-04-04 10:58:57 +08:00
|
|
|
continue
|
|
|
|
|
}
|
2024-05-23 23:24:18 +08:00
|
|
|
go c.doTask(c.ctx, keys[1])
|
2023-09-02 12:19:27 +08:00
|
|
|
}
|
2024-04-04 10:58:57 +08:00
|
|
|
}()
|
|
|
|
|
|
2024-05-08 13:01:55 +08:00
|
|
|
// 处理重入任务
|
2024-04-04 10:58:57 +08:00
|
|
|
go func() {
|
|
|
|
|
for {
|
2025-06-11 15:12:08 +08:00
|
|
|
|
|
|
|
|
if !c.canRun() {
|
|
|
|
|
// 如果全局优先级不满足就不执行
|
|
|
|
|
time.Sleep(time.Second * 5)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-27 20:28:11 +08:00
|
|
|
res, err := c.redis.SPop(c.ctx, c.setKey).Result()
|
2024-04-04 10:58:57 +08:00
|
|
|
if err != nil {
|
|
|
|
|
if err == redis.Nil {
|
|
|
|
|
// 已经是空了就不要浪费资源了
|
|
|
|
|
time.Sleep(time.Second)
|
2024-04-06 19:09:58 +08:00
|
|
|
} else {
|
|
|
|
|
c.logger.Errorf(c.ctx, "SPop watch err:%+v", err)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
continue
|
|
|
|
|
}
|
2024-05-27 20:28:11 +08:00
|
|
|
|
|
|
|
|
var rd ReJobData
|
|
|
|
|
err = json.Unmarshal([]byte(res), &rd)
|
|
|
|
|
if err != nil {
|
|
|
|
|
c.logger.Errorf(c.ctx, "json.Unmarshal err:%+v", err)
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_, ok := clusterWorkerList.Load(rd.TaskId)
|
2024-04-04 10:58:57 +08:00
|
|
|
if !ok {
|
2024-05-27 20:28:11 +08:00
|
|
|
c.logger.Errorf(c.ctx, "watch timer:任务不存在%+v", rd.TaskId)
|
|
|
|
|
|
|
|
|
|
if rd.Times >= 3 {
|
|
|
|
|
// 重试3次还是失败就不执行了
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
rd.Times++
|
|
|
|
|
|
|
|
|
|
rdb, _ := json.Marshal(rd)
|
|
|
|
|
|
|
|
|
|
c.redis.SAdd(c.ctx, c.setKey, string(rdb))
|
2024-04-04 10:58:57 +08:00
|
|
|
continue
|
|
|
|
|
}
|
2024-05-27 20:28:11 +08:00
|
|
|
go c.doTask(c.ctx, rd.TaskId)
|
2024-04-04 10:58:57 +08:00
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|
|
|
|
|
|
2024-05-27 20:28:11 +08:00
|
|
|
type ReJobData struct {
|
|
|
|
|
TaskId string
|
|
|
|
|
Times int
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
// 执行任务
|
2024-05-23 23:24:18 +08:00
|
|
|
func (c *Cluster) doTask(ctx context.Context, taskId string) {
|
2023-08-27 23:39:58 +08:00
|
|
|
|
2024-06-22 15:34:49 +08:00
|
|
|
ctx, cancel := context.WithTimeout(ctx, c.timeout)
|
2024-05-24 09:55:34 +08:00
|
|
|
defer cancel()
|
2023-08-27 23:39:58 +08:00
|
|
|
|
|
|
|
|
val, ok := clusterWorkerList.Load(taskId)
|
|
|
|
|
if !ok {
|
2024-05-22 15:02:39 +08:00
|
|
|
c.logger.Errorf(ctx, "doTask timer:任务不存在:%s", taskId)
|
2023-08-27 23:39:58 +08:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
t := val.(timerStr)
|
|
|
|
|
|
|
|
|
|
// 这里加一个全局锁
|
2024-05-23 23:24:18 +08:00
|
|
|
lock := lockx.NewGlobalLock(ctx, c.redis, taskId)
|
2023-08-27 23:39:58 +08:00
|
|
|
tB := lock.Lock()
|
|
|
|
|
if !tB {
|
2024-05-22 15:02:39 +08:00
|
|
|
c.logger.Errorf(ctx, "doTask timer:获取锁失败:%s", taskId)
|
2023-08-27 23:39:58 +08:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
|
2024-05-24 09:55:34 +08:00
|
|
|
defer func() {
|
|
|
|
|
if err := recover(); err != nil {
|
2024-05-28 17:36:18 +08:00
|
|
|
c.logger.Errorf(ctx, "timer:回调任务panic err:%+v stack:%s", err, string(debug.Stack()))
|
2024-05-24 09:55:34 +08:00
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
2025-04-02 20:26:27 +08:00
|
|
|
ctx = context.WithValue(ctx, "trace_id", uuid.NewV4().String())
|
2024-05-28 17:28:20 +08:00
|
|
|
|
2023-08-27 23:39:58 +08:00
|
|
|
// 执行任务
|
2024-04-04 10:58:57 +08:00
|
|
|
t.Callback(ctx, t.ExtendData)
|
2023-08-27 23:39:58 +08:00
|
|
|
}
|