Compare commits

..

1 Commits

Author SHA1 Message Date
mo3et 0496e1cdf6 Update CHANGELOG for release v3.8.3-patch.7 2025-07-29 09:07:27 +00:00
12 changed files with 49 additions and 69 deletions
+6 -4
View File
@@ -1,10 +1,12 @@
## [v3.8.3-patch.9](https://github.com/openimsdk/open-im-server/releases/tag/v3.8.3-patch.9) (2025-08-19)
## [v3.8.3-patch.7](https://github.com/openimsdk/open-im-server/releases/tag/v3.8.3-patch.7) (2025-07-29)
### New Features
* feat: enable redis aof-use-rdb-preamble && disable auto rdb [Created [#3535](https://github.com/openimsdk/open-im-server/pull/3535)
* feat: add filtering for invalid messages and invalid conversations to… [#3483](https://github.com/openimsdk/open-im-server/pull/3483)
### Bug Fixes
* fix: fill in the most recent sendTime for a gap message to prevent th… [#3523](https://github.com/openimsdk/open-im-server/pull/3523)
* fix: correctly aggregate read seqs [#3482](https://github.com/openimsdk/open-im-server/pull/3482)
* fix: import friends send notification in v3.8.3-patch [#3488](https://github.com/openimsdk/open-im-server/pull/3488)
* fix: solve redis config db field in v3.8.3-patch [#3490](https://github.com/openimsdk/open-im-server/pull/3490)
**Full Changelog**: [v3.8.3-patch.8...v3.8.3-patch.9](https://github.com/openimsdk/open-im-server/compare/v3.8.3-patch.8...v3.8.3-patch.9)
**Full Changelog**: [v3.8.3-patch.6...v3.8.3-patch.7](https://github.com/openimsdk/open-im-server/compare/v3.8.3-patch.6...v3.8.3-patch.7)
+1 -6
View File
@@ -63,12 +63,7 @@ services:
restart: always
sysctls:
net.core.somaxconn: 1024
command: >
redis-server
--requirepass openIM123
--appendonly yes
--aof-use-rdb-preamble yes
--save ""
command: redis-server /usr/local/redis/config/redis.conf --requirepass openIM123 --appendonly yes
networks:
- openim
-2
View File
@@ -219,8 +219,6 @@ func (m *MessageApi) getSendMsgReq(c *gin.Context, req apistruct.SendMsg) (sendM
data = &apistruct.CustomElem{}
case constant.MarkdownText:
data = &apistruct.MarkdownTextElem{}
case constant.Quote:
data = &apistruct.QuoteElem{}
case constant.OANotification:
data = &apistruct.OANotificationElem{}
req.SessionType = constant.NotificationChatType
-5
View File
@@ -97,11 +97,6 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
case BestSpeed:
r.Use(gzip.Gzip(gzip.BestSpeed))
}
if config.Standalone() {
r.Use(func(c *gin.Context) {
c.Set(authverify.CtxAdminUserIDsKey, cfg.Share.IMAdminUser.UserIDs)
})
}
r.Use(api.GinLogger(), prommetricsGin(), gin.RecoveryWithWriter(gin.DefaultErrorWriter, mw.GinPanicErr), mw.CorsHandler(),
mw.GinParseOperationID(), GinParseToken(rpcli.NewAuthClient(authConn)), setGinIsAdmin(cfg.Share.IMAdminUser.UserIDs))
+1 -1
View File
@@ -24,7 +24,7 @@ var (
func NewMsgCache(cache database.Cache, msgDocDatabase database.Msg) cache.MsgCache {
initMemMsgCache.Do(func() {
memMsgCache = lru.NewLazyLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
memMsgCache = lru.NewLayLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
})
return &msgCache{
cache: cache,
+5 -4
View File
@@ -165,15 +165,16 @@ func (c *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, t
}
func (c *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error {
key := cachekey.GetTokenKey(userID, platformID)
if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil {
return errs.Wrap(err)
}
for _, f := range fields {
k := cachekey.GetTemporaryTokenKey(userID, platformID, f)
if err := c.rdb.Set(ctx, k, "", time.Minute*5).Err(); err != nil {
return errs.Wrap(err)
}
}
key := cachekey.GetTokenKey(userID, platformID)
if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil {
return errs.Wrap(err)
}
return nil
}
+6 -7
View File
@@ -5,11 +5,6 @@ import (
"fmt"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"github.com/openimsdk/protocol/constant"
@@ -19,6 +14,10 @@ import (
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/jsonutil"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
func NewMsgMongo(db *mongo.Database) (database.Msg, error) {
@@ -1155,7 +1154,7 @@ func (m *MsgMgo) findBeforeDocSendTime(ctx context.Context, docID string, limit
if err != nil {
return 0, 0, err
}
for i := len(res) - 1; i >= 0; i-- {
for i := len(res) - 1; i > 0; i-- {
v := res[i]
if v.Msgs != nil && v.Msgs.Msg != nil && v.Msgs.Msg.SendTime > 0 {
return v.Msgs.Msg.Seq, v.Msgs.Msg.SendTime, nil
@@ -1170,7 +1169,7 @@ func (m *MsgMgo) findBeforeSendTime(ctx context.Context, conversationID string,
limit := int64(-1)
if first {
first = false
limit = m.model.GetLimitForSingleDoc(seq)
limit = m.model.GetMsgIndex(seq)
}
docID := m.model.BuildDocIDByIndex(conversationID, i)
msgSeq, msgSendTime, err := m.findBeforeDocSendTime(ctx, docID, limit)
-4
View File
@@ -132,10 +132,6 @@ func (*MsgDocModel) GetMsgIndex(seq int64) int64 {
return (seq - 1) % singleGocMsgNum
}
func (*MsgDocModel) GetLimitForSingleDoc(seq int64) int64 {
return seq % singleGocMsgNum
}
func (*MsgDocModel) indexGen(conversationID string, seqSuffix int64) string {
return conversationID + ":" + strconv.FormatInt(seqSuffix, 10)
}
+1 -1
View File
@@ -49,7 +49,7 @@ func New[V any](opts ...Option) Cache[V] {
if opt.expirationEvict {
return lru.NewExpirationLRU[string, V](opt.localSlotSize, opt.localSuccessTTL, opt.localFailedTTL, opt.target, c.onEvict)
} else {
return lru.NewLazyLRU[string, V](opt.localSlotSize, opt.localSuccessTTL, opt.localFailedTTL, opt.target, c.onEvict)
return lru.NewLayLRU[string, V](opt.localSlotSize, opt.localSuccessTTL, opt.localFailedTTL, opt.target, c.onEvict)
}
}
if opt.localSlotNum == 1 {
+25 -31
View File
@@ -21,25 +21,25 @@ import (
"github.com/hashicorp/golang-lru/v2/simplelru"
)
type lazyLruItem[V any] struct {
type layLruItem[V any] struct {
lock sync.Mutex
expires int64
err error
value V
}
func NewLazyLRU[K comparable, V any](size int, successTTL, failedTTL time.Duration, target Target, onEvict EvictCallback[K, V]) *LazyLRU[K, V] {
var cb simplelru.EvictCallback[K, *lazyLruItem[V]]
func NewLayLRU[K comparable, V any](size int, successTTL, failedTTL time.Duration, target Target, onEvict EvictCallback[K, V]) *LayLRU[K, V] {
var cb simplelru.EvictCallback[K, *layLruItem[V]]
if onEvict != nil {
cb = func(key K, value *lazyLruItem[V]) {
cb = func(key K, value *layLruItem[V]) {
onEvict(key, value.value)
}
}
core, err := simplelru.NewLRU[K, *lazyLruItem[V]](size, cb)
core, err := simplelru.NewLRU[K, *layLruItem[V]](size, cb)
if err != nil {
panic(err)
}
return &LazyLRU[K, V]{
return &LayLRU[K, V]{
core: core,
successTTL: successTTL,
failedTTL: failedTTL,
@@ -47,15 +47,15 @@ func NewLazyLRU[K comparable, V any](size int, successTTL, failedTTL time.Durati
}
}
type LazyLRU[K comparable, V any] struct {
type LayLRU[K comparable, V any] struct {
lock sync.Mutex
core *simplelru.LRU[K, *lazyLruItem[V]]
core *simplelru.LRU[K, *layLruItem[V]]
successTTL time.Duration
failedTTL time.Duration
target Target
}
func (x *LazyLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
x.lock.Lock()
v, ok := x.core.Get(key)
if ok {
@@ -68,7 +68,7 @@ func (x *LazyLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return value, err
}
} else {
v = &lazyLruItem[V]{}
v = &layLruItem[V]{}
x.core.Add(key, v)
v.lock.Lock()
x.lock.Unlock()
@@ -88,15 +88,15 @@ func (x *LazyLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
return v.value, v.err
}
func (x *LazyLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
err error
once sync.Once
)
res := make(map[K]V)
queries := make([]K, 0, len(keys))
queries := make([]K, 0)
setVs := make(map[K]*layLruItem[V])
for _, key := range keys {
x.lock.Lock()
v, ok := x.core.Get(key)
@@ -118,20 +118,14 @@ func (x *LazyLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)
}
queries = append(queries, key)
}
if len(queries) == 0 {
return res, err
}
values, fetchErr := fetch(queries)
if fetchErr != nil {
values, err1 := fetch(queries)
if err1 != nil {
once.Do(func() {
err = fetchErr
err = err1
})
}
for key, val := range values {
v := &lazyLruItem[V]{}
v := &layLruItem[V]{}
v.value = val
if err == nil {
@@ -141,7 +135,7 @@ func (x *LazyLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)
v.expires = time.Now().Add(x.failedTTL).UnixMilli()
x.target.IncrGetFailed()
}
setVs[key] = v
x.lock.Lock()
x.core.Add(key, v)
x.lock.Unlock()
@@ -151,29 +145,29 @@ func (x *LazyLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)
return res, err
}
//func (x *LazyLRU[K, V]) Has(key K) bool {
//func (x *LayLRU[K, V]) Has(key K) bool {
// x.lock.Lock()
// defer x.lock.Unlock()
// return x.core.Contains(key)
//}
func (x *LazyLRU[K, V]) Set(key K, value V) {
func (x *LayLRU[K, V]) Set(key K, value V) {
x.lock.Lock()
defer x.lock.Unlock()
x.core.Add(key, &lazyLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
}
func (x *LazyLRU[K, V]) SetHas(key K, value V) bool {
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
x.lock.Lock()
defer x.lock.Unlock()
if x.core.Contains(key) {
x.core.Add(key, &lazyLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
return true
}
return false
}
func (x *LazyLRU[K, V]) Del(key K) bool {
func (x *LayLRU[K, V]) Del(key K) bool {
x.lock.Lock()
ok := x.core.Remove(key)
x.lock.Unlock()
@@ -185,6 +179,6 @@ func (x *LazyLRU[K, V]) Del(key K) bool {
return ok
}
func (x *LazyLRU[K, V]) Stop() {
func (x *LayLRU[K, V]) Stop() {
}
+3 -3
View File
@@ -35,7 +35,7 @@ type slotLRU[K comparable, V any] struct {
func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
var (
slotKeys = make(map[uint64][]K)
kVs = make(map[K]V)
vs = make(map[K]V)
)
for _, k := range keys {
@@ -49,10 +49,10 @@ func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)
return nil, err
}
for key, value := range batches {
kVs[key] = value
vs[key] = value
}
}
return kVs, nil
return vs, nil
}
func (x *slotLRU[K, V]) getIndex(k K) uint64 {
+1 -1
View File
@@ -64,7 +64,7 @@ func NewOnlineCache(client *rpcli.UserClient, group *GroupLocalCache, rdb redis.
case false:
log.ZDebug(ctx, "fullUserCache is false")
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
return lru.NewLazyLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
})
x.CurrentPhase.Store(DoSubscribeOver)
x.Cond.Broadcast()