27 Commits

Author SHA1 Message Date
yun 88056ee8e9 修改mod 2024-04-04 21:57:26 +08:00
yun 4d07ce2c09 优化集群定时器的逻辑 2024-04-04 10:58:57 +08:00
yun 43d2798b41 优化定时器的表述 2023-12-27 17:19:52 +08:00
yun 1beafa934c 优化全局单次定时器的冲突问题 2023-12-27 17:11:33 +08:00
yun 362d1f455a 稳定版本 2023-11-27 22:37:33 +08:00
yun fbb74cdd6d 优化部分逻辑 2023-11-13 23:49:42 +08:00
yun 319d6b6db1 修改公共部分 2023-11-11 17:30:54 +08:00
Administrator 3cc3f0400b 更新.gitlab-ci.yml文件 2023-11-11 09:17:40 +00:00
Administrator 5793afbab7 更新.gitlab-ci.yml文件 2023-11-11 09:13:59 +00:00
Administrator 30093d0717 更新.gitlab-ci.yml文件 2023-11-11 09:10:31 +00:00
Administrator a4a0d86d74 更新.gitlab-ci.yml文件 2023-11-11 09:07:37 +00:00
Administrator 80bd6b4327 更新.gitlab-ci.yml文件 2023-11-11 09:03:01 +00:00
Administrator b9444c8bb6 更新.gitlab-ci.yml文件 2023-11-11 08:58:53 +00:00
Administrator 0dbd1ee9c2 更新.gitlab-ci.yml文件 2023-11-11 08:54:10 +00:00
Administrator aabce29211 更新.gitlab-ci.yml文件 2023-11-11 08:52:18 +00:00
Administrator 7475f9cd3b 更新.gitlab-ci.yml文件 2023-11-11 08:50:10 +00:00
Administrator fabf7f65d7 更新.gitlab-ci.yml文件 2023-11-11 08:47:42 +00:00
Administrator e6915aa766 更新.gitlab-ci.yml文件 2023-11-11 08:40:26 +00:00
Administrator de4a9c8f31 更新.gitlab-ci.yml文件 2023-11-11 08:36:44 +00:00
Administrator 356f843747 更新.gitlab-ci.yml文件 2023-11-11 08:34:32 +00:00
Administrator 52ed316cd1 更新.gitlab-ci.yml文件 2023-11-11 08:28:53 +00:00
Administrator 778bf75650 更新.gitlab-ci.yml文件 2023-11-11 08:16:43 +00:00
Administrator a87fee1f38 更新.gitlab-ci.yml文件 2023-11-11 08:15:06 +00:00
Administrator 1a70738e6d 更新.gitlab-ci.yml文件 2023-11-11 07:02:29 +00:00
yun c929d1a57d 提交 2023-11-10 23:43:32 +08:00
yun 4944efbf29 修改单机版的函数调用方式 2023-09-23 11:17:42 +08:00
yun e57f941001 更新mod 2023-09-10 10:02:07 +08:00
16 changed files with 522 additions and 563 deletions
+24 -25
View File
@@ -1,26 +1,25 @@
# You can override the included template(s) by including variable overrides
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/#customizing-settings
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings
# Note that environment variables can be set in several places
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
stages: stages:
- build - deploy
- test
- deploy before_script:
- review - 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'
- dast - eval $(ssh-agent -s)
- staging - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
- canary - mkdir -p ~/.ssh
- production - chmod 700 ~/.ssh
- incremental rollout 10% - '[[ -f /.dockerenv ]] && echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config'
- incremental rollout 25%
- incremental rollout 50% deploy_job:
- incremental rollout 100% stage: deploy
- performance script:
- cleanup - git remote remove github || true
sast: - git remote add github git@github.com:yun-ink/timerx.git
stage: test - git remote -v
include: - git checkout master
- template: Auto-DevOps.gitlab-ci.yml - git fsck --full
- git prune
- git gc --prune=now --aggressive
- git push -u github master -f
only:
- master
- tags
+170 -78
View File
@@ -1,45 +1,61 @@
package timer package timerx
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"log"
"runtime/debug" "runtime/debug"
"sync" "sync"
"time" "time"
"code.yun.ink/open/timer/lockx" "code.yun.ink/pkg/lockx"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
) )
// 功能描述
// 这是基于Redis的定时任务调度器,能够有效的在服务集群里面调度任务,避免了单点压力过高或单点故障问题
// 由于所有的服务代码是一致的,也就是一个定时任务将在所有的服务都有注册,具体调度到哪个服务运行看调度结果
// 暂不支持删除定时器,因为这个定时器的设计是基于全局的,如果删除了,那么其他服务就不知道了
// TODO:如果获取到任务不能处理的,应放回队列(因为可能新旧代码同时上线,新的添加了任务处理)
// 单例模式 // 单例模式
var clusterOnceLimit sync.Once var clusterOnceLimit sync.Once
// 已注册的任务列表 // 已注册的任务列表
var clusterWorkerList sync.Map var clusterWorkerList sync.Map
type cluster struct { type Cluster struct {
ctx context.Context ctx context.Context
redis *redis.Client redis *redis.Client
logger Logger
lockKey string // 全局计算的key lockKey string // 全局计算的key
nextKey string // 下一次执行的key nextKey string // 下一次执行的key
zsetKey string // 有序集合的key zsetKey string // 有序集合的key
listKey string // 可执行的任务列表的key listKey string // 可执行的任务列表的key
setKey string // 重入集合的key
} }
var clu *cluster = nil var clu *Cluster = nil
// 初始化定时器
// 全局只需要初始化一次
func InitCluster(ctx context.Context, red *redis.Client, keyPrefix string, opts ...Option) *Cluster {
op := newOptions(opts...)
func InitCluster(ctx context.Context, red *redis.Client) *cluster {
clusterOnceLimit.Do(func() { clusterOnceLimit.Do(func() {
clu = &cluster{ clu = &Cluster{
ctx: ctx, ctx: ctx,
redis: red, redis: red,
lockKey: "timer:cluster_globalLockKey", logger: op.logger,
nextKey: "timer:cluster_nextKey", lockKey: keyPrefix + "timer:cluster_globalLockKey", // 定时器的全局锁
zsetKey: "timer:cluster_zsetKey", nextKey: keyPrefix + "timer:cluster_nextKey", // 下一次
listKey: "timer:cluster_listKey", zsetKey: keyPrefix + "timer:cluster_zsetKey", // 有序集合
listKey: keyPrefix + "timer:cluster_listKey", // 列表
setKey: keyPrefix + "timer:cluster_setKey", // 重入集合
} }
// 监听任务 // 监听任务
@@ -63,38 +79,108 @@ func InitCluster(ctx context.Context, red *redis.Client) *cluster {
return clu return clu
} }
func (c *cluster) AddTimer(ctx context.Context, uniqueKey string, spaceTime time.Duration, callback callback, extend ExtendParams) error { // TODO:指定执行时间
_, ok := clusterWorkerList.Load(uniqueKey) // 1.每月的1号2点执行(如果当月没有这个号就不执行)
// 2.每周的周一2点执行
// 3.每天的2点执行
// 4.每小时的2分执行
// 5.每分钟的2秒执行
func (c *Cluster) AddEveryMonth(ctx context.Context, taskId string, day int, hour int, minute int, second int, callback callback, extendData interface{}) error {
nowTime := time.Now()
// 计算下一次执行的时间
nextTime := time.Date(nowTime.Year(), nowTime.Month(), day, hour, minute, second, 0, nowTime.Location())
if nextTime.Before(nowTime) {
nextTime = nextTime.AddDate(0, 1, 0)
}
return c.addJob(ctx, taskId, nextTime, time.Hour*24*30, callback, extendData, JobTypeEveryMonth, &JobData{Day: &day, Hour: &hour, Minute: &minute, Second: &second})
}
func (c *Cluster) AddEveryWeek(ctx context.Context, taskId string, week time.Weekday, hour int, minute int, second int, callback callback, extendData interface{}) error {
nowTime := time.Now()
// 计算下一次执行的时间
nextTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), hour, minute, second, 0, nowTime.Location())
for nextTime.Weekday() != week {
nextTime = nextTime.AddDate(0, 0, 1)
}
if nextTime.Before(nowTime) {
nextTime = nextTime.AddDate(0, 0, 7)
}
return c.addJob(ctx, taskId, nextTime, time.Hour*24*7, callback, extendData, JobTypeInterval, nil)
}
func (c *Cluster) AddEveryDay(ctx context.Context, taskId string, hour int, minute int, second int, callback callback, extendData interface{}) error {
nowTime := time.Now()
// 计算下一次执行的时间
nextTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), hour, minute, second, 0, nowTime.Location())
if nextTime.Before(nowTime) {
nextTime = nextTime.AddDate(0, 0, 1)
}
return c.addJob(ctx, taskId, nextTime, time.Hour*24, callback, extendData, JobTypeInterval, nil)
}
func (c *Cluster) AddEveryHour(ctx context.Context, taskId string, minute int, second int, callback callback, extendData interface{}) error {
nowTime := time.Now()
// 计算下一次执行的时间
nextTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), nowTime.Hour(), minute, second, 0, nowTime.Location())
if nextTime.Before(nowTime) {
nextTime = nextTime.Add(time.Hour)
}
return c.addJob(ctx, taskId, nextTime, time.Hour, callback, extendData, JobTypeInterval, nil)
}
func (c *Cluster) AddEveryMinute(ctx context.Context, taskId string, second int, callback callback, extendData interface{}) error {
nowTime := time.Now()
// 计算下一次执行的时间
nextTime := time.Date(nowTime.Year(), nowTime.Month(), nowTime.Day(), nowTime.Hour(), nowTime.Minute(), second, 0, nowTime.Location())
if nextTime.Before(nowTime) {
nextTime = nextTime.Add(time.Minute)
}
return c.addJob(ctx, taskId, nextTime, time.Minute, callback, extendData, JobTypeInterval, nil)
}
func (c *Cluster) Add(ctx context.Context, taskId string, spaceTime time.Duration, callback callback, extendData interface{}) error {
return c.addJob(ctx, taskId, time.Now(), spaceTime, callback, extendData, JobTypeInterval, nil)
}
// 指定时间间隔
// TODO:
// 1.不同服务定的时间间隔不一致问题
// 2.后起的服务计算了时间覆盖前面原有的时间问题
func (c *Cluster) addJob(ctx context.Context, taskId string, beginTime time.Time, spaceTime time.Duration, callback callback, extendData interface{}, jobType JobType, jobData *JobData) error {
_, ok := clusterWorkerList.Load(taskId)
if ok { if ok {
c.logger.Errorf(ctx, "key已存在:%s", taskId)
return errors.New("key已存在") return errors.New("key已存在")
} }
if spaceTime != spaceTime.Abs() { if spaceTime != spaceTime.Abs() {
c.logger.Errorf(ctx, "时间间隔不能为负数:%s", taskId)
return errors.New("时间间隔不能为负数") return errors.New("时间间隔不能为负数")
} }
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
lock := lockx.NewGlobalLock(ctx, c.redis, uniqueKey) lock := lockx.NewGlobalLock(ctx, c.redis, taskId)
tB := lock.Try(10) tB := lock.Try(10)
if !tB { if !tB {
c.logger.Errorf(ctx, "添加失败:%s", taskId)
return errors.New("添加失败") return errors.New("添加失败")
} }
defer lock.Unlock() defer lock.Unlock()
nowTime := time.Now()
t := timerStr{ t := timerStr{
BeginTime: nowTime, BeginTime: beginTime,
NextTime: nowTime, NextTime: beginTime,
SpaceTime: spaceTime, SpaceTime: spaceTime,
Callback: callback, Callback: callback,
Extend: extend, ExtendData: extendData,
UniqueKey: uniqueKey, TaskId: taskId,
JobType: jobType,
JobData: jobData,
} }
clusterWorkerList.Store(uniqueKey, t) clusterWorkerList.Store(taskId, t)
cacheStr, _ := c.redis.Get(ctx, c.nextKey).Result() cacheStr, _ := c.redis.Get(ctx, c.nextKey).Result()
execTime := make(map[string]time.Time) execTime := make(map[string]time.Time)
@@ -104,9 +190,9 @@ func (c *cluster) AddTimer(ctx context.Context, uniqueKey string, spaceTime time
p.ZAdd(ctx, c.zsetKey, &redis.Z{ p.ZAdd(ctx, c.zsetKey, &redis.Z{
Score: float64(nextTime.UnixMilli()), Score: float64(nextTime.UnixMilli()),
Member: uniqueKey, Member: taskId,
}) })
execTime[uniqueKey] = nextTime execTime[taskId] = nextTime
n, _ := json.Marshal(execTime) n, _ := json.Marshal(execTime)
// fmt.Println("execTime:", execTime, string(n)) // fmt.Println("execTime:", execTime, string(n))
p.Set(ctx, c.nextKey, string(n), 0) p.Set(ctx, c.nextKey, string(n), 0)
@@ -119,7 +205,8 @@ func (c *cluster) AddTimer(ctx context.Context, uniqueKey string, spaceTime time
} }
// 计算下一次执行的时间 // 计算下一次执行的时间
func (c *cluster) getNextTime() { // TODO:注册的任务需放在Redis集中存储,因为本地的话,如果有多个服务,那么就会出现不一致的情况。但是要注意服务如何进行下线,由于是主动上报的,需要有一个机制进行删除过期的任务(添加任务&定时器轮训注册)
func (c *Cluster) getNextTime() {
// log.Println("begin computer") // log.Println("begin computer")
ctx, cancel := context.WithCancel(c.ctx) ctx, cancel := context.WithCancel(c.ctx)
@@ -147,18 +234,18 @@ func (c *cluster) getNextTime() {
clusterWorkerList.Range(func(key, value interface{}) bool { clusterWorkerList.Range(func(key, value interface{}) bool {
val := value.(timerStr) val := value.(timerStr)
beforeTime := execTime[val.UniqueKey] beforeTime := execTime[val.TaskId]
if beforeTime.After(nowTime) { if beforeTime.After(nowTime) {
return true return true
} }
nextTime := getNextExecTime(beforeTime, val.SpaceTime) nextTime := getNextExecTime(val)
execTime[val.UniqueKey] = nextTime execTime[val.TaskId] = nextTime
p.ZAdd(ctx, c.zsetKey, &redis.Z{ p.ZAdd(ctx, c.zsetKey, &redis.Z{
Score: float64(nextTime.UnixMilli()), Score: float64(nextTime.UnixMilli()),
Member: val.UniqueKey, Member: val.TaskId,
}) })
// log.Println("computeTime add", c.zsetKey, val.UniqueKey, nextTime.UnixMilli()) // log.Println("computeTime add", c.zsetKey, val.taskId, nextTime.UnixMilli())
return true return true
}) })
@@ -171,44 +258,23 @@ func (c *cluster) getNextTime() {
} }
// 递归遍历获取执行时间 // 递归遍历获取执行时间
func getNextExecTime(beforeTime time.Time, spaceTime time.Duration) time.Time { // TODO:需要根据不同的任务类型计算下次定时时间
func getNextExecTime(ts timerStr) time.Time {
nowTime := time.Now() nowTime := time.Now()
if beforeTime.After(nowTime) { if ts.NextTime.After(nowTime) {
return beforeTime return ts.NextTime
} }
nextTime := beforeTime.Add(spaceTime) nextTime := ts.NextTime.Add(ts.SpaceTime)
ts.NextTime = nextTime
if nextTime.Before(nowTime) { if nextTime.Before(nowTime) {
nextTime = getNextExecTime(nextTime, spaceTime) nextTime = getNextExecTime(ts)
} }
return nextTime return nextTime
} }
// 获取任务 // 获取任务
func (c *cluster) getTask() { func (c *Cluster) getTask() {
// 定时去Redis获取任务 // 定时去Redis获取任务
// zb := redis.ZRangeBy{
// Min: "0",
// Max: fmt.Sprintf("%+v", time.Now().UnixMilli()),
// }
// taskList, _ := c.redis.ZRangeByScore(c.ctx, c.zsetKey, &zb).Result()
// if len(taskList) == 0 {
// return
// }
// p := c.redis.Pipeline()
// for _, val := range taskList {
// // 添加到可执行队列
// p.LPush(c.ctx, c.listKey, val)
// // 删除有序集合
// p.ZRem(c.ctx, c.zsetKey, val)
// }
// _, err := p.Exec(c.ctx)
// // fmt.Println(err)
// _ = err
script := ` script := `
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2]) local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
for i,v in ipairs(token) do for i,v in ipairs(token) do
@@ -222,33 +288,61 @@ func (c *cluster) getTask() {
} }
// 监听任务 // 监听任务
func (c *cluster) watch() { func (c *Cluster) watch() {
// 执行任务 // 执行任务
for { go func() {
keys, err := c.redis.BLPop(c.ctx, time.Second*10, c.listKey).Result() for {
if err != nil { keys, err := c.redis.BLPop(c.ctx, time.Second*10, c.listKey).Result()
fmt.Println("watch err:", err) if err != nil {
continue fmt.Println("watch err:", err)
continue
}
_, ok := clusterWorkerList.Load(keys[1])
if !ok {
fmt.Println("watch timer:任务不存在", keys[1])
c.redis.SAdd(c.ctx, c.setKey, keys[1])
continue
}
go c.doTask(c.ctx, c.redis, keys[1])
} }
go doTask(c.ctx, c.redis, keys[1]) }()
}
go func() {
for {
taskId, err := c.redis.SPop(c.ctx, c.setKey).Result()
if err != nil {
fmt.Println("watch err:", err)
if err == redis.Nil {
// 已经是空了就不要浪费资源了
time.Sleep(time.Second)
}
continue
}
_, ok := clusterWorkerList.Load(taskId)
if !ok {
fmt.Println("watch timer:任务不存在", taskId)
c.redis.SAdd(c.ctx, c.setKey, taskId)
continue
}
go c.doTask(c.ctx, c.redis, taskId)
}
}()
} }
// 执行任务 // 执行任务
func doTask(ctx context.Context, red *redis.Client, taskId string) { func (c *Cluster) doTask(ctx context.Context, red *redis.Client, taskId string) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
defer func() { defer func() {
if err := recover(); err != nil { if err := recover(); err != nil {
fmt.Println("timer:定时器出错", err) c.logger.Errorf(ctx, "timer:定时器出错 err:%+v stack:%s", err, string(debug.Stack()))
log.Println("errStack", string(debug.Stack()))
} }
}() }()
val, ok := clusterWorkerList.Load(taskId) val, ok := clusterWorkerList.Load(taskId)
if !ok { if !ok {
fmt.Println("doTask timer:任务不存在", taskId) c.logger.Errorf(ctx, "doTask timer:任务不存在", taskId)
return return
} }
t := val.(timerStr) t := val.(timerStr)
@@ -257,13 +351,11 @@ func doTask(ctx context.Context, red *redis.Client, taskId string) {
lock := lockx.NewGlobalLock(ctx, red, taskId) lock := lockx.NewGlobalLock(ctx, red, taskId)
tB := lock.Lock() tB := lock.Lock()
if !tB { if !tB {
fmt.Println("doTask timer:获取锁失败", taskId) c.logger.Errorf(ctx, "doTask timer:获取锁失败", taskId)
return return
} }
defer lock.Unlock() defer lock.Unlock()
ctx = context.WithValue(ctx, extendParamKey, t.Extend)
// 执行任务 // 执行任务
t.Callback(ctx) t.Callback(ctx, t.ExtendData)
} }
+1 -1
View File
@@ -1,4 +1,4 @@
package timer package timerx
import ( import (
"fmt" "fmt"
+15 -56
View File
@@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"time" "time"
"code.yun.ink/open/timer" "code.yun.ink/pkg/timerx"
"github.com/go-redis/redis/v8" "github.com/go-redis/redis/v8"
) )
@@ -30,7 +30,7 @@ func main() {
func worker() { func worker() {
client := getRedis() client := getRedis()
w := timer.InitWorker(context.Background(), client, &Worker{}) w := timerx.InitOnce(context.Background(), client, "test", &Worker{})
w.Add("test", "test", 1*time.Second, map[string]interface{}{ w.Add("test", "test", 1*time.Second, map[string]interface{}{
"test": "test", "test": "test",
}) })
@@ -52,11 +52,11 @@ func worker() {
type Worker struct{} type Worker struct{}
func (w *Worker) Worker(uniqueKey string, jobType string,data map[string]interface{}) timer.WorkerCode { func (w *Worker) Worker(jobType string, uniqueKey string, data interface{}) (timerx.WorkerCode, time.Duration) {
fmt.Println("执行时间:", time.Now().Format("2006-01-02 15:04:05")) fmt.Println("执行时间:", time.Now().Format("2006-01-02 15:04:05"))
fmt.Println(uniqueKey, jobType) fmt.Println(uniqueKey, jobType)
fmt.Println(data) fmt.Println(data)
return timer.WorkerCodeAgain return timerx.WorkerCodeAgain, time.Second
} }
func getRedis() *redis.Client { func getRedis() *redis.Client {
@@ -76,63 +76,22 @@ func re() {
client := getRedis() client := getRedis()
ctx := context.Background() ctx := context.Background()
cl := timer.InitCluster(ctx, client) cl := timerx.InitCluster(ctx, client, "kkkk")
cl.AddTimer(ctx, "test1", 1*time.Millisecond, aa, timer.ExtendParams{ cl.Add(ctx, "test1", 1*time.Millisecond, aa, "data")
Params: map[string]interface{}{ cl.Add(ctx, "test2", 1*time.Millisecond, aa, "data")
"test": "text1", cl.Add(ctx, "test3", 1*time.Millisecond, aa, "data")
}, cl.Add(ctx, "test4", 1*time.Millisecond, aa, "data")
}) cl.Add(ctx, "test5", 1*time.Millisecond, aa, "data")
cl.AddTimer(ctx, "test2", 1*time.Millisecond, aa, timer.ExtendParams{ cl.Add(ctx, "test6", 1*time.Millisecond, aa, "data")
Params: map[string]interface{}{
"test": "text2",
},
})
cl.AddTimer(ctx, "test3", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text3",
},
})
cl.AddTimer(ctx, "test4", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text4",
},
})
cl.AddTimer(ctx, "test5", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text5",
},
})
cl.AddTimer(ctx, "test6", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text6",
},
})
cl.AddTimer(ctx, "test7", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text7",
},
})
cl.AddTimer(ctx, "test8", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text8",
},
})
cl.AddTimer(ctx, "test9", 1*time.Millisecond, aa, timer.ExtendParams{
Params: map[string]interface{}{
"test": "text9",
},
})
select {} select {}
} }
func aa(ctx context.Context) bool { func aa(ctx context.Context, data interface{}) error {
// fmt.Println(time.Now().Format(time.RFC3339)) fmt.Println("执行时间:", time.Now().Format("2006-01-02 15:04:05"))
// fmt.Println("gggggggggggggggggggggggggggg") fmt.Println(data)
a, err := timer.GetExtendParams(ctx)
fmt.Printf("%+v %+v \n\n", a, err)
time.Sleep(time.Second * 5) time.Sleep(time.Second * 5)
return true return nil
} }
func d() { func d() {
+2 -2
View File
@@ -1,10 +1,10 @@
module code.yun.ink/open/timer module code.yun.ink/pkg/timerx
go 1.19 go 1.19
require ( require (
code.yun.ink/pkg/lockx v1.0.0
github.com/go-redis/redis/v8 v8.11.5 github.com/go-redis/redis/v8 v8.11.5
github.com/gomodule/redigo v1.8.9
) )
require ( require (
+3 -12
View File
@@ -1,27 +1,18 @@
code.yun.ink/open/timer v1.0.1 h1:ZWecU5K0rFB15p8DZubozTEwo1vrO4mUCRwEoD1tbEQ=
code.yun.ink/pkg/lockx v1.0.0 h1:xoLyf05PrOAhLID2LbJsEXA8YYURJTK/7spEk/hu/Rs=
code.yun.ink/pkg/lockx v1.0.0/go.mod h1:0xUU5xD8fui0Kf7g4TnFmaxUDo59CH2WM+sitko2SLc=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws=
github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-120
View File
@@ -1,120 +0,0 @@
package lockx
import (
"context"
"fmt"
"log"
"time"
"github.com/go-redis/redis/v8"
)
// 全局锁
type globalLock struct {
redis *redis.Client
ctx context.Context
cancel context.CancelFunc
uniqueKey string
value string
}
func NewGlobalLock(ctx context.Context, red *redis.Client, uniqueKey string) *globalLock {
ctx, cancel := context.WithTimeout(ctx, time.Second*30)
return &globalLock{
redis: red,
ctx: ctx,
cancel: cancel,
uniqueKey: uniqueKey,
value: fmt.Sprintf("%d", time.Now().UnixNano()),
}
}
// 获取锁
func (g *globalLock) Lock() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == false
then
return redis.call('set',KEYS[1],ARGV[1],'EX',ARGV[2])
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value, 5).Result()
if resp != "OK" {
_ = err
log.Println("globalLock Lock", resp, err, g.uniqueKey, g.value)
}
if resp == "OK" {
g.refresh()
return true
}
return false
}
// 尝试获取锁
func (g *globalLock) Try(limitTimes int) bool {
for i := 0; i < limitTimes; i++ {
if g.Lock() {
return true
}
time.Sleep(time.Millisecond * 100)
}
return false
}
// 删除锁
func (g *globalLock) Unlock() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == ARGV[1]
then
redis.call('del',KEYS[1])
return 'OK'
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value).Result()
if resp != "OK" {
log.Println("globalLock Unlock", resp, err, g.uniqueKey, g.value)
}
g.cancel()
return false
}
// 刷新锁
func (g *globalLock) refresh() {
go func() {
t := time.NewTicker(time.Second)
for {
select {
case <-t.C:
g.refreshExec()
case <-g.ctx.Done():
t.Stop()
return
}
}
}()
}
func (g *globalLock) refreshExec() bool {
script := `
local token = redis.call('get',KEYS[1])
if token == ARGV[1]
then
redis.call('set',KEYS[1],ARGV[1],'EX',ARGV[2])
return 'OK'
end
return 'ERROR'
`
resp, err := g.redis.Eval(g.ctx, script, []string{g.uniqueKey}, g.value, 5).Result()
if resp != "OK" {
log.Println("globalLock refresh", resp, err, g.uniqueKey, g.value)
}
return resp == "OK"
}
-52
View File
@@ -1,52 +0,0 @@
package lockx_test
import (
"context"
"fmt"
"testing"
"code.yun.ink/open/timer/lockx"
"github.com/go-redis/redis/v8"
)
var Redis *redis.Client
// func TestMain(m *testing.M) {
// client := redis.NewClient(&redis.Options{
// Addr: "127.0.0.1" + ":" + "6379",
// Password: "", // no password set
// DB: 0, // use default DB
// })
// if client == nil {
// fmt.Println("redis init error")
// return
// }
// // fmt.Println("ffff")
// Redis = client
// }
func TestLockx(t *testing.T) {
client := redis.NewClient(&redis.Options{
Addr: "127.0.0.1" + ":" + "6379",
Password: "", // no password set
DB: 0, // use default DB
})
if client == nil {
fmt.Println("redis init error")
return
}
fmt.Println("begin")
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
lock := lockx.NewGlobalLock(ctx, client, "lockx:test")
if !lock.Lock() {
fmt.Println("lock error")
}
defer lock.Unlock()
fmt.Println("ssss")
}
+25
View File
@@ -0,0 +1,25 @@
package timerx
import (
"context"
"log"
)
type Logger interface {
Infof(ctx context.Context, format string, v ...interface{})
Errorf(ctx context.Context, format string, v ...interface{})
}
type defaultLogger struct{}
func NewLogger() *defaultLogger {
return &defaultLogger{}
}
func (l *defaultLogger) Infof(ctx context.Context, format string, v ...interface{}) {
log.Printf("[INFO] "+format, v...)
}
func (l *defaultLogger) Errorf(ctx context.Context, format string, v ...interface{}) {
log.Printf("[ERROR] "+format, v...)
}
+184
View File
@@ -0,0 +1,184 @@
package timerx
import (
"context"
"encoding/json"
"fmt"
"log"
"runtime/debug"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
)
// 功能描述
// 1. 任务可以多节点发布
// 2. 每个任务的执行在全局仅会执行一次
// 3. 任务执行失败支持快捷重新加入队列
// 单次的任务队列
type worker struct {
ctx context.Context
zsetKey string
listKey string
redis *redis.Client
worker Callback
}
type WorkerCode int
const (
WorkerCodeSuccess WorkerCode = 0 // 处理完成(不需要重入)
WorkerCodeAgain WorkerCode = -1 // 需要继续定时,默认原来的时间
)
// 需要考虑执行失败重新放入队列的情况
type Callback interface {
// 任务执行
// uniqueKey: 任务唯一标识
// jobType: 任务类型,用于区分任务
// data: 任务数据
Worker(jobType string, uniqueKey string, data interface{}) (WorkerCode, time.Duration)
}
var wo *worker = nil
var once sync.Once
type extendData struct {
Delay time.Duration
Data interface{}
}
// 初始化
func InitOnce(ctx context.Context, re *redis.Client, jobGlobalName string, jobCallback Callback) *worker {
once.Do(func() {
wo = &worker{
ctx: ctx,
zsetKey: "timer:once_zsetkey" + jobGlobalName,
listKey: "timer:once_listkey" + jobGlobalName,
redis: re,
worker: jobCallback,
}
go wo.getTask()
go wo.watch()
})
return wo
}
// 添加任务
// 重复插入就代表覆盖
func (w *worker) Add(jobType string, uniqueKey string, delayTime time.Duration, data interface{}) error {
if delayTime.Abs() != delayTime {
return fmt.Errorf("时间间隔不能为负数")
}
if delayTime == 0 {
return fmt.Errorf("时间间隔不能为0")
}
redisKey := fmt.Sprintf("%s[:]%s", jobType, uniqueKey)
ed := extendData{
Delay: delayTime,
Data: data,
}
b, _ := json.Marshal(ed)
_, err := w.redis.SetEX(w.ctx, redisKey, b, delayTime+time.Second*5).Result()
if err != nil {
return err
}
_, err = w.redis.ZAdd(w.ctx, w.zsetKey, &redis.Z{
Score: float64(time.Now().Add(delayTime).UnixMilli()),
Member: redisKey,
}).Result()
return err
}
// 删除任务
func (w *worker) Del(jobType string, uniqueKey string) error {
redisKey := fmt.Sprintf("%s[:]%s", jobType, uniqueKey)
w.redis.Del(w.ctx, redisKey).Result()
w.redis.ZRem(w.ctx, w.zsetKey, redisKey).Result()
return nil
}
// 获取任务
func (w *worker) getTask() {
timer := time.NewTicker(time.Millisecond * 200)
defer timer.Stop()
Loop:
for {
select {
case <-timer.C:
script := `
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
for i,v in ipairs(token) do
redis.call('zrem',KEYS[1],v)
redis.call('lpush',KEYS[2],v)
end
return "OK"
`
w.redis.Eval(w.ctx, script, []string{w.zsetKey, w.listKey}, 0, time.Now().UnixMilli()).Result()
// fmt.Println(i, err)
case <-w.ctx.Done():
break Loop
}
}
}
// 监听任务
func (w *worker) watch() {
for {
keys, err := w.redis.BLPop(w.ctx, time.Second*10, w.listKey).Result()
if err != nil {
fmt.Println("watch err:", err)
continue
}
go w.doTask(keys[1])
}
}
func (w *worker) doTask(key string) {
defer func() {
if err := recover(); err != nil {
fmt.Println("timer:定时器出错", err)
log.Println("errStack", string(debug.Stack()))
}
}()
s := strings.Split(key, "[:]")
// 读取数据
str, err := w.redis.Get(w.ctx, key).Result()
if err != nil {
fmt.Println("execJob err:", err)
return
}
ed := extendData{}
json.Unmarshal([]byte(str), &ed)
fmt.Println("开始时间:", time.Now().Format("2006-01-02 15:04:05"))
code, t := w.worker.Worker(s[0], s[1], ed.Data)
if code == WorkerCodeAgain {
// 重新放入队列
fmt.Println("重入时间:", time.Now().Format("2006-01-02 15:04:05"))
if t != 0 && t == t.Abs() {
ed.Delay = t
}
w.Add(s[0], s[1], ed.Delay, ed.Data)
}
}
+2
View File
@@ -0,0 +1,2 @@
package timerx
+27
View File
@@ -0,0 +1,27 @@
package timerx
type Options struct {
logger Logger
}
func defaultOptions() Options {
return Options{
logger: NewLogger(),
}
}
type Option func(*Options)
func newOptions(opts ...Option) Options {
o := defaultOptions()
for _, opt := range opts {
opt(&o)
}
return o
}
func SetLogger(log Logger) Option {
return func(o *Options) {
o.logger = log
}
}
+26 -46
View File
@@ -1,4 +1,4 @@
package timer package timerx
// 作者:黄新云 // 作者:黄新云
@@ -13,26 +13,25 @@ import (
) )
// 定时器 // 定时器
// 原理:每毫秒的时间触发 // 1. 这个定时器的作用范围是本机
// uuid -> timerStr // uuid -> timerStr
var timerMap = make(map[string]*timerStr) var timerMap = make(map[string]*timerStr)
var timerMapMux sync.Mutex var timerMapMux sync.Mutex
var timerCount int // 当前定时数目 var timerCount int // 当前定时数目
var onceLimit sync.Once // 实现单例 var onceLimit sync.Once // 实现单例
var nextTime = time.Now() // 下一次执行的时间
type ContextValueKey string // 定义context 传递的Key类型 type Single struct{}
const ( var sin *Single = nil
extendParamKey ContextValueKey = "extend_param"
)
// 定时器类 // 定时器类
func InitSingle(ctx context.Context) { func InitSingle(ctx context.Context) *Single {
onceLimit.Do(func() { onceLimit.Do(func() {
timer := time.NewTicker( time.Millisecond*200) sin = &Single{}
timer := time.NewTicker(time.Millisecond * 200)
go func(ctx context.Context) { go func(ctx context.Context) {
Loop: Loop:
for { for {
@@ -43,7 +42,7 @@ func InitSingle(ctx context.Context) {
continue continue
} }
// 迭代定时器 // 迭代定时器
iteratorTimer(ctx, t) sin.iterator(ctx, t)
// fmt.Println("timer: 执行") // fmt.Println("timer: 执行")
case <-ctx.Done(): case <-ctx.Done():
// 跳出循环 // 跳出循环
@@ -53,10 +52,17 @@ func InitSingle(ctx context.Context) {
log.Println("timer: initend") log.Println("timer: initend")
}(ctx) }(ctx)
}) })
return sin
} }
// 间隔定时器 // 间隔定时器
func AddTimer(space time.Duration, call callback, extend ExtendParams) (int, error) { // @param space 间隔时间
// @param call 回调函数
// @param extend 附加参数
// @return int 定时器索引
// @return error 错误
func (s *Single) Add(space time.Duration, call callback, extend interface{}) (int, error) {
timerMapMux.Lock() timerMapMux.Lock()
defer timerMapMux.Unlock() defer timerMapMux.Unlock()
@@ -74,8 +80,7 @@ func AddTimer(space time.Duration, call callback, extend ExtendParams) (int, err
NextTime: nowTime, // nowTime.Add(space), // 添加任务的时候就执行一次 NextTime: nowTime, // nowTime.Add(space), // 添加任务的时候就执行一次
SpaceTime: space, SpaceTime: space,
CanRunning: make(chan struct{}, 1), CanRunning: make(chan struct{}, 1),
UniqueKey: "", ExtendData: extend,
Extend: extend,
} }
timerMap[fmt.Sprintf("%d", timerCount)] = &t timerMap[fmt.Sprintf("%d", timerCount)] = &t
@@ -88,21 +93,15 @@ func AddTimer(space time.Duration, call callback, extend ExtendParams) (int, err
return timerCount, nil return timerCount, nil
} }
// 添加需要定时的规则 // 删除定时器
func AddToTimer(space time.Duration, call callback) int { func (s *Single) Del(index string) {
extend := ExtendParams{}
count, _ := AddTimer(space, call, extend)
return count
}
func DelToTimer(index string) {
timerMapMux.Lock() timerMapMux.Lock()
defer timerMapMux.Unlock() defer timerMapMux.Unlock()
delete(timerMap, index) delete(timerMap, index)
} }
// 迭代定时器列表 // 迭代定时器列表
func iteratorTimer(ctx context.Context, nowTime time.Time) { func (s *Single) iterator(ctx context.Context, nowTime time.Time) {
timerMapMux.Lock() timerMapMux.Lock()
defer timerMapMux.Unlock() defer timerMapMux.Unlock()
@@ -151,7 +150,7 @@ func iteratorTimer(ctx context.Context, nowTime time.Time) {
} }
}() }()
// fmt.Printf("timer: 准备执行 %v %v \n", k, v.Tag) // fmt.Printf("timer: 准备执行 %v %v \n", k, v.Tag)
timerAction(ctx, v.Callback, v.UniqueKey, v.Extend) s.doTask(ctx, v.Callback, v.ExtendData)
default: default:
// fmt.Printf("timer: 已在执行 %v %v \n", k, v.Tag) // fmt.Printf("timer: 已在执行 %v %v \n", k, v.Tag)
return return
@@ -174,33 +173,14 @@ func iteratorTimer(ctx context.Context, nowTime time.Time) {
// fmt.Println("timer: one finish") // fmt.Println("timer: one finish")
} }
// 定义各个回调函数
type callback func(ctx context.Context) bool
// 定时器操作类 // 定时器操作类
// 这里不应painc // 这里不应painc
func timerAction(ctx context.Context, call callback, uniqueKey string, extend ExtendParams) bool { func (s *Single) doTask(ctx context.Context, call callback, extend interface{}) error {
defer func() { defer func() {
if err := recover(); err != nil { if err := recover(); err != nil {
fmt.Println("timer:定时器出错", err) fmt.Println("timer:定时器出错", err)
log.Println("errStack", string(debug.Stack())) log.Println("errStack", string(debug.Stack()))
} }
}() }()
ctx, cancel := context.WithCancel(ctx) return call(ctx, extend)
defer cancel()
// 附加数据
ctx = context.WithValue(ctx, extendParamKey, extend)
return call(ctx)
}
// 快捷方法
func GetExtendParams(ctx context.Context) (*ExtendParams, error) {
val := ctx.Value(extendParamKey)
params, ok := val.(ExtendParams)
if !ok {
return nil, errors.New("没找到参数")
}
return &params, nil
} }
+10 -5
View File
@@ -1,15 +1,20 @@
package timer_test package timerx_test
import "testing" import (
"fmt"
"testing"
)
// 单元测试 // 单元测试
func TestHelloWorld(t *testing.T) { func TestHelloWorld(t *testing.T) {
// 日志 // 日志
t.Log("hello world") // t.Log("hello world")
s := "ddd" fmt.Println("hello world")
t.Logf("Log测试%s", s)
// s := "ddd"
// t.Logf("Log测试%s", s)
// t.Errorf("ErrorF %s", s) // t.Errorf("ErrorF %s", s)
// 标记错误(继续运行) // 标记错误(继续运行)
+33 -7
View File
@@ -1,6 +1,9 @@
package timer package timerx
import "time" import (
"context"
"time"
)
type timerStr struct { type timerStr struct {
Callback callback // 需要回调的方法 Callback callback // 需要回调的方法
@@ -8,11 +11,34 @@ type timerStr struct {
BeginTime time.Time // 初始化任务的时间 BeginTime time.Time // 初始化任务的时间
NextTime time.Time // [删]下一次执行的时间 NextTime time.Time // [删]下一次执行的时间
SpaceTime time.Duration // 任务间隔时间 SpaceTime time.Duration // 任务间隔时间
UniqueKey string // 全局唯一键 TaskId string // 任务ID 全局唯一键
Extend ExtendParams // 附加参数 ExtendData interface{} // 附加参数
JobType JobType // 任务类型
JobData *JobData // 任务时间数据
} }
// 扩展参数 type JobType string
type ExtendParams struct {
Params map[string]interface{} // 带出去的参数 const (
JobTypeEveryDay JobType = "every_day"
JobTypeEveryHour JobType = "every_hour"
JobTypeEveryMinute JobType = "every_minute"
JobTypeEverySecond JobType = "every_second"
JobTypeEveryMonth JobType = "every_month"
// 根据间隔时间执行
JobTypeInterval JobType = "interval"
)
type JobData struct {
Month *time.Month // 每年的第几个月
Weekday *time.Weekday // 每周的周几
Day *int // 每月的第几天
Hour *int // 每天的第几个小时
Minute *int // 每小时的第几分钟
Second *int // 每分钟的第几秒
} }
var nextTime = time.Now() // 下一次执行的时间
// 定义各个回调函数
type callback func(ctx context.Context, extendData interface{}) error
-159
View File
@@ -1,159 +0,0 @@
package timer
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"time"
"github.com/go-redis/redis/v8"
)
// 单次的任务队列
type worker struct {
ctx context.Context
zsetKey string
listKey string
redis *redis.Client
worker WorkerInterface
}
type WorkerCode int
const (
WorkerCodeSuccess WorkerCode = 0
WorkerCodeAgain WorkerCode = -1
)
// 需要考虑执行失败重新放入队列的情况
type WorkerInterface interface {
Worker(uniqueKey string, jobType string, data map[string]interface{}) WorkerCode
}
var wo *worker = nil
var once sync.Once
type extendData struct {
Delay time.Duration
Data map[string]interface{}
}
func InitWorker(ctx context.Context, re *redis.Client, w WorkerInterface) *worker {
once.Do(func() {
wo = &worker{
ctx: ctx,
zsetKey: "timer:job_zsetkey",
listKey: "timer:job_listkey",
redis: re,
worker: w,
}
go wo.getTask()
go wo.execTask()
})
return wo
}
// 添加任务
// 重复插入就代表覆盖
func (w *worker) Add(uniqueKey string, jobType string, delayTime time.Duration, data map[string]interface{}) error {
if delayTime.Abs() != delayTime {
return fmt.Errorf("时间间隔不能为负数")
}
if delayTime == 0 {
return fmt.Errorf("时间间隔不能为0")
}
redisKey := fmt.Sprintf("%s[:]%s", uniqueKey, jobType)
ed := extendData{
Delay: delayTime,
Data: data,
}
b, _ := json.Marshal(ed)
_, err := w.redis.SetEX(w.ctx, redisKey, b, delayTime+time.Second*5).Result()
if err != nil {
return err
}
_, err = w.redis.ZAdd(w.ctx, w.zsetKey, &redis.Z{
Score: float64(time.Now().Add(delayTime).UnixMilli()),
Member: redisKey,
}).Result()
return err
}
// 删除任务
func (w *worker) Del(uniqueKey string, jobType string) error {
redisKey := fmt.Sprintf("%s[:]%s", uniqueKey, jobType)
w.redis.Del(w.ctx, redisKey).Result()
w.redis.ZRem(w.ctx, w.zsetKey, redisKey).Result()
return nil
}
// 获取任务
func (w *worker) getTask() {
timer := time.NewTicker(time.Millisecond * 200)
defer timer.Stop()
Loop:
for {
select {
case <-timer.C:
script := `
local token = redis.call('zrangebyscore',KEYS[1],ARGV[1],ARGV[2])
for i,v in ipairs(token) do
redis.call('zrem',KEYS[1],v)
redis.call('lpush',KEYS[2],v)
end
return "OK"
`
w.redis.Eval(w.ctx, script, []string{w.zsetKey, w.listKey}, 0, time.Now().UnixMilli()).Result()
// fmt.Println(i, err)
case <-w.ctx.Done():
break Loop
}
}
}
// 执行任务
func (w *worker) execTask() {
for {
keys, err := w.redis.BLPop(w.ctx, time.Second*10, w.listKey).Result()
if err != nil {
fmt.Println("watch err:", err)
continue
}
go func() {
s := strings.Split(keys[1], "[:]")
// 读取数据
str, err := w.redis.Get(w.ctx, keys[1]).Result()
if err != nil {
fmt.Println("execJob err:", err)
return
}
ed := extendData{}
json.Unmarshal([]byte(str), &ed)
fmt.Println("开始时间:", time.Now().Format("2006-01-02 15:04:05"))
code := w.worker.Worker(s[0], s[1], ed.Data)
if code == WorkerCodeAgain {
// 重新放入队列
fmt.Println("重入时间:", time.Now().Format("2006-01-02 15:04:05"))
w.Add(s[0], s[1], ed.Delay, ed.Data)
}
}()
}
}