feat: use robot to migrate code

Signed-off-by: kubbot & kubecub <3293172751ysy@gmail.com>
This commit is contained in:
kubbot & kubecub
2023-06-30 09:45:02 +08:00
parent 2d41819008
commit 539e0fdfb6
529 changed files with 64588 additions and 54413 deletions
-176
View File
@@ -1,176 +0,0 @@
package batcher
import (
"Open_IM/pkg/common/log"
"context"
"errors"
"hash/crc32"
"sync"
"time"
)
var (
ErrorNotSetFunction = errors.New("not set do function")
)
var (
DefaultSize = 100
DefaultBuffer = 100
DefaultWorker = 5
DefaultInterval = time.Second
)
type DoFuntion func(ctx context.Context, val map[string][]interface{})
type Option func(c *Config)
type Config struct {
size int //Number of message aggregations
buffer int //The number of caches running in a single coroutine
worker int //Number of coroutines processed in parallel
interval time.Duration //Time of message aggregations
}
func newDefaultConfig() *Config {
return &Config{
size: DefaultSize,
buffer: DefaultBuffer,
worker: DefaultWorker,
interval: DefaultInterval,
}
}
type Batcher struct {
config Config
Do func(ctx context.Context, val map[string][]interface{})
Sharding func(key string) int
chans []chan *msg
wait sync.WaitGroup
}
type msg struct {
key string
val interface{}
}
func NewBatcher(fn DoFuntion, opts ...Option) *Batcher {
b := &Batcher{}
b.Do = fn
config := newDefaultConfig()
for _, o := range opts {
o(config)
}
b.chans = make([]chan *msg, b.config.worker)
for i := 0; i < b.config.worker; i++ {
b.chans[i] = make(chan *msg, b.config.buffer)
}
return b
}
func WithSize(s int) Option {
return func(c *Config) {
c.size = s
}
}
func WithBuffer(b int) Option {
return func(c *Config) {
c.buffer = b
}
}
func WithWorker(w int) Option {
return func(c *Config) {
c.worker = w
}
}
func WithInterval(i time.Duration) Option {
return func(c *Config) {
c.interval = i
}
}
func (b *Batcher) Start() error {
if b.Do == nil {
return ErrorNotSetFunction
}
if b.Sharding == nil {
b.Sharding = func(key string) int {
hasCode := int(crc32.ChecksumIEEE([]byte(key)))
return hasCode % b.config.worker
}
}
b.wait.Add(len(b.chans))
for i, ch := range b.chans {
go b.merge(i, ch)
}
return nil
}
func (b *Batcher) Add(key string, val interface{}) error {
ch, msg := b.add(key, val)
select {
case ch <- msg:
default:
return ErrFull
}
return nil
}
func (b *Batcher) add(key string, val interface{}) (chan *msg, *msg) {
sharding := b.Sharding(key) % b.opts.worker
ch := b.chans[sharding]
msg := &msg{key: key, val: val}
return ch, msg
}
func (b *Batcher) merge(idx int, ch <-chan *msg) {
defer b.wait.Done()
var (
msg *msg
count int
closed bool
lastTicker = true
interval = b.opts.interval
vals = make(map[string][]interface{}, b.opts.size)
)
if idx > 0 {
interval = time.Duration(int64(idx) * (int64(b.opts.interval) / int64(b.opts.worker)))
}
ticker := time.NewTicker(interval)
for {
select {
case msg = <-ch:
if msg == nil {
closed = true
break
}
count++
vals[msg.key] = append(vals[msg.key], msg.val)
if count >= b.opts.size {
break
}
continue
case <-ticker.C:
if lastTicker {
ticker.Stop()
ticker = time.NewTicker(b.opts.interval)
lastTicker = false
}
}
if len(vals) > 0 {
ctx := context.Background()
b.Do(ctx, vals)
vals = make(map[string][]interface{}, b.opts.size)
count = 0
}
if closed {
ticker.Stop()
return
}
}
}
func (b *Batcher) Close() {
for _, ch := range b.chans {
ch <- nil
}
b.wait.Wait()
}
-184
View File
@@ -1,184 +0,0 @@
package retry
import (
"context"
"errors"
"fmt"
"runtime/debug"
"time"
)
var (
ErrorAbort = errors.New("stop retry")
ErrorTimeout = errors.New("retry timeout")
ErrorContextDeadlineExceed = errors.New("context deadline exceeded")
ErrorEmptyRetryFunc = errors.New("empty retry function")
ErrorTimeFormat = errors.New("time out err")
)
type RetriesFunc func() error
type Option func(c *Config)
type HookFunc func()
type RetriesChecker func(err error) (needRetry bool)
type Config struct {
MaxRetryTimes int
Timeout time.Duration
RetryChecker RetriesChecker
Strategy Strategy
RecoverPanic bool
BeforeTry HookFunc
AfterTry HookFunc
}
var (
DefaultMaxRetryTimes = 3
DefaultTimeout = time.Minute
DefaultInterval = time.Second * 2
DefaultRetryChecker = func(err error) bool {
return !errors.Is(err, ErrorAbort) // not abort error, should continue retry
}
)
func newDefaultConfig() *Config {
return &Config{
MaxRetryTimes: DefaultMaxRetryTimes,
RetryChecker: DefaultRetryChecker,
Timeout: DefaultTimeout,
Strategy: NewLinear(DefaultInterval),
BeforeTry: func() {},
AfterTry: func() {},
}
}
func WithTimeout(timeout time.Duration) Option {
return func(c *Config) {
c.Timeout = timeout
}
}
func WithMaxRetryTimes(times int) Option {
return func(c *Config) {
c.MaxRetryTimes = times
}
}
func WithRecoverPanic() Option {
return func(c *Config) {
c.RecoverPanic = true
}
}
func WithBeforeHook(hook HookFunc) Option {
return func(c *Config) {
c.BeforeTry = hook
}
}
func WithAfterHook(hook HookFunc) Option {
return func(c *Config) {
c.AfterTry = hook
}
}
func WithRetryChecker(checker RetriesChecker) Option {
return func(c *Config) {
c.RetryChecker = checker
}
}
func WithBackOffStrategy(s BackoffStrategy, duration time.Duration) Option {
return func(c *Config) {
switch s {
case StrategyConstant:
c.Strategy = NewConstant(duration)
case StrategyLinear:
c.Strategy = NewLinear(duration)
case StrategyFibonacci:
c.Strategy = NewFibonacci(duration)
}
}
}
func WithCustomStrategy(s Strategy) Option {
return func(c *Config) {
c.Strategy = s
}
}
func Do(ctx context.Context, fn RetriesFunc, opts ...Option) error {
if fn == nil {
return ErrorEmptyRetryFunc
}
var (
abort = make(chan struct{}, 1) // caller choose to abort retry
run = make(chan error, 1)
panicInfoChan = make(chan string, 1)
timer *time.Timer
runErr error
)
config := newDefaultConfig()
for _, o := range opts {
o(config)
}
if config.Timeout > 0 {
timer = time.NewTimer(config.Timeout)
} else {
return ErrorTimeFormat
}
go func() {
var err error
defer func() {
if e := recover(); e == nil {
return
} else {
panicInfoChan <- fmt.Sprintf("retry function panic has occured, err=%v, stack:%s", e, string(debug.Stack()))
}
}()
for i := 0; i < config.MaxRetryTimes; i++ {
config.BeforeTry()
err = fn()
config.AfterTry()
if err == nil {
run <- nil
return
}
// check whether to retry
if config.RetryChecker != nil {
needRetry := config.RetryChecker(err)
if !needRetry {
abort <- struct{}{}
return
}
}
if config.Strategy != nil {
interval := config.Strategy.Sleep(i + 1)
<-time.After(interval)
}
}
run <- err
}()
select {
case <-ctx.Done():
// context deadline exceed
return ErrorContextDeadlineExceed
case <-timer.C:
// timeout
return ErrorTimeout
case <-abort:
// caller abort
return ErrorAbort
case msg := <-panicInfoChan:
// panic occurred
if !config.RecoverPanic {
panic(msg)
}
runErr = fmt.Errorf("panic occurred=%s", msg)
case e := <-run:
// normal run
if e != nil {
runErr = fmt.Errorf("retry failed, err=%w", e)
}
}
return runErr
}
-56
View File
@@ -1,56 +0,0 @@
package retry
import "time"
type BackoffStrategy int
const (
StrategyConstant BackoffStrategy = iota
StrategyLinear
StrategyFibonacci
)
type Strategy interface {
Sleep(times int) time.Duration
}
type Constant struct {
startInterval time.Duration
}
func NewConstant(d time.Duration) *Constant {
return &Constant{startInterval: d}
}
type Linear struct {
startInterval time.Duration
}
func NewLinear(d time.Duration) *Linear {
return &Linear{startInterval: d}
}
type Fibonacci struct {
startInterval time.Duration
}
func NewFibonacci(d time.Duration) *Fibonacci {
return &Fibonacci{startInterval: d}
}
func (c *Constant) Sleep(_ int) time.Duration {
return c.startInterval
}
func (l *Linear) Sleep(times int) time.Duration {
return l.startInterval * time.Duration(times)
}
func (f *Fibonacci) Sleep(times int) time.Duration {
return f.startInterval * time.Duration(fibonacciNumber(times))
}
func fibonacciNumber(n int) int {
if n == 0 || n == 1 {
return n
}
return fibonacciNumber(n-1) + fibonacciNumber(n-2)
}
-28
View File
@@ -1,28 +0,0 @@
package tools
type SplitResult struct {
Item []string
}
type Splitter struct {
splitCount int
data []string
}
func NewSplitter(splitCount int, data []string) *Splitter {
return &Splitter{splitCount: splitCount, data: data}
}
func (s *Splitter) GetSplitResult() (result []*SplitResult) {
remain := len(s.data) % s.splitCount
integer := len(s.data) / s.splitCount
for i := 0; i < integer; i++ {
r := new(SplitResult)
r.Item = s.data[i*s.splitCount : (i+1)*s.splitCount]
result = append(result, r)
}
if remain > 0 {
r := new(SplitResult)
r.Item = s.data[integer*s.splitCount:]
result = append(result, r)
}
return result
}