mirror of
https://github.com/openimsdk/open-im-server.git
synced 2026-05-14 22:15:59 +08:00
Fix err (#2608)
* refactor: refactor workflows contents.
* add tool workflows.
* update field.
* fix: remove chat error.
* Fix err.
* fix error.
* remove cn comment.
* update workflows files.
* update infra config.
* move workflows.
* feat: update bot.
* fix: solve uncorrect outdated msg get.
* update get docIDs logic.
* update
* update skip logic.
* fix
* update.
* fix: delay deleteObject func.
* remove unused content.
* update log type.
* feat: implement request batch count limit.
* update
* update
* feat: add rocksTimeout
* feat: wrap logs
* feat: add logs
* feat: listen config
* feat: enable listen TIME_WAIT port
* feat: add logs
* feat: cache batch
* chore: enable fullUserCache
* feat: push rpc num
* feat: push err
* feat: with operationID
* feat: sleep
* feat: change 1s
* feat: change log
* feat: implement Getbatch in rpcCache.
* feat: print getOnline cost
* feat: change log
* feat: change kafka and push config
* feat: del interface
* feat: fix err
* feat: change config
* feat: go mod
* feat: change config
* feat: change config
* feat: add sleep in push
* feat: warn logs
* feat: logs
* feat: logs
* feat: change port
* feat: start config
* feat: remove port reuse
* feat: prometheus config
* feat: prometheus config
* feat: prometheus config
* feat: add long time send msg to grafana
* feat: init
* feat: init
* feat: implement offline push.
* feat: batch get user online
* feat: implement batch Push spilt
* update go mod
* Revert "feat: change port"
This reverts commit 06d5e944
* feat: change port
* feat: change config
* feat: implement kafka producer and consumer.
* update format,
* add PushMQ log.
* feat: get all online users and init push
* feat: lock in online cache
* feat: config
* fix: init online status
* fix: add logs
* fix: userIDs
* fix: add logs
* feat: update Handler logic.
* update MQ logic.
* update
* update
* fix: method name
* fix: update OfflinePushConsumerHandler.
* fix: prommetrics
* fix: add logs
* fix: ctx
* fix: log
* fix: config
* feat: change port
* fix: atomic online cache status
---------
Co-authored-by: Monet Lee <monet_lee@163.com>
This commit is contained in:
@@ -58,7 +58,7 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
)
|
||||
|
||||
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
|
||||
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
|
||||
longServer.online, _ = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -265,7 +265,7 @@ func (ws *WsServer) registerClient(client *Client) {
|
||||
if clientOK {
|
||||
ws.clients.Set(client.UserID, client)
|
||||
// There is already a connection to the platform
|
||||
log.ZInfo(client.ctx, "repeat login", "userID", client.UserID, "platformID",
|
||||
log.ZDebug(client.ctx, "repeat login", "userID", client.UserID, "platformID",
|
||||
client.PlatformID, "old remote addr", getRemoteAdders(oldClients))
|
||||
ws.onlineUserConnNum.Add(1)
|
||||
} else {
|
||||
@@ -293,7 +293,7 @@ func (ws *WsServer) registerClient(client *Client) {
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.ZInfo(
|
||||
log.ZDebug(
|
||||
client.ctx,
|
||||
"user online",
|
||||
"online user Num",
|
||||
@@ -360,7 +360,7 @@ func (ws *WsServer) unregisterClient(client *Client) {
|
||||
ws.onlineUserConnNum.Add(-1)
|
||||
ws.subscription.DelClient(client)
|
||||
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
|
||||
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
|
||||
log.ZDebug(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
|
||||
ws.onlineUserNum.Load(), "online user conn Num",
|
||||
ws.onlineUserConnNum.Load(),
|
||||
)
|
||||
|
||||
@@ -111,7 +111,7 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
msgTransfer := &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
|
||||
@@ -238,6 +238,7 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
|
||||
log.ZInfo(ctx, "handle storage msg")
|
||||
for _, storageMsg := range storageList {
|
||||
log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String())
|
||||
}
|
||||
@@ -254,16 +255,20 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
|
||||
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "BatchInsertChat2Cache end")
|
||||
|
||||
if isNewConversation {
|
||||
switch msg.SessionType {
|
||||
case constant.ReadGroupChatType:
|
||||
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
|
||||
log.ZDebug(ctx, "group chat first create conversation", "conversationID",
|
||||
conversationID)
|
||||
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "get group member ids error", err, "conversationID",
|
||||
conversationID)
|
||||
} else {
|
||||
log.ZInfo(ctx, "GetGroupMemberIDs end")
|
||||
|
||||
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
|
||||
msg.GroupID, userIDs); err != nil {
|
||||
log.ZWarn(ctx, "single chat first create conversation error", err,
|
||||
@@ -282,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
|
||||
}
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "success incr to next topic")
|
||||
log.ZInfo(ctx, "success incr to next topic")
|
||||
err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
|
||||
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
|
||||
}
|
||||
log.ZInfo(ctx, "MsgToMongoMQ end")
|
||||
|
||||
och.toPushTopic(ctx, key, conversationID, storageList)
|
||||
log.ZInfo(ctx, "toPushTopic end")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,7 +327,7 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
|
||||
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
|
||||
for _, v := range msgs {
|
||||
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
|
||||
och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
|
||||
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
|
||||
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
|
||||
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
|
||||
session.MarkMessage(lastMessage, "")
|
||||
|
||||
@@ -57,7 +57,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
|
||||
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
|
||||
err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
|
||||
if err != nil {
|
||||
log.ZError(
|
||||
|
||||
@@ -28,6 +28,6 @@ type Dummy struct {
|
||||
}
|
||||
|
||||
func (d *Dummy) Push(ctx context.Context, userIDs []string, title, content string, opts *options.Opts) error {
|
||||
log.ZInfo(ctx, "dummy push")
|
||||
log.ZDebug(ctx, "dummy push")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
@@ -100,7 +99,6 @@ func (g *Client) Push(ctx context.Context, userIDs []string, title, content stri
|
||||
if err = g.batchPush(ctx, token, userIDs[i:end], pushReq); err != nil {
|
||||
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
|
||||
}
|
||||
|
||||
}
|
||||
if err = g.batchPush(ctx, token, userIDs, pushReq); err != nil {
|
||||
log.ZError(ctx, "batchPush failed", err, "index", index, "token", token, "req", pushReq)
|
||||
|
||||
@@ -63,7 +63,7 @@ func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
|
||||
func (o *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, content string, opts *options.Opts, err error) {
|
||||
type AtTextElem struct {
|
||||
Text string `json:"text,omitempty"`
|
||||
AtUserList []string `json:"atUserList,omitempty"`
|
||||
@@ -108,12 +108,12 @@ func (c *OfflinePushConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (ti
|
||||
return
|
||||
}
|
||||
|
||||
func (c *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
|
||||
title, content, opts, err := c.getOfflinePushInfos(msg)
|
||||
func (o *OfflinePushConsumerHandler) offlinePushMsg(ctx context.Context, msg *sdkws.MsgData, offlinePushUserIDs []string) error {
|
||||
title, content, opts, err := o.getOfflinePushInfos(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
|
||||
err = o.offlinePusher.Push(ctx, offlinePushUserIDs, title, content, opts)
|
||||
if err != nil {
|
||||
prommetrics.MsgOfflinePushFailedCounter.Inc()
|
||||
return err
|
||||
|
||||
@@ -27,12 +27,12 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
|
||||
|
||||
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
|
||||
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
|
||||
log.ZWarn(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
|
||||
return nil, nil
|
||||
}
|
||||
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
|
||||
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
|
||||
log.ZWarn(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,9 @@ import (
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ConsumerHandler struct {
|
||||
@@ -55,6 +58,7 @@ func NewConsumerHandler(config *Config, database controller.PushDatabase, offlin
|
||||
}
|
||||
|
||||
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
|
||||
consumerHandler.offlinePusher = offlinePusher
|
||||
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
||||
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
|
||||
@@ -65,7 +69,10 @@ func NewConsumerHandler(config *Config, database controller.PushDatabase, offlin
|
||||
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
||||
consumerHandler.config = config
|
||||
consumerHandler.pushDatabase = database
|
||||
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
|
||||
consumerHandler.onlineCache, err = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, config.RpcConfig.FullUserCache, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &consumerHandler, nil
|
||||
}
|
||||
|
||||
@@ -108,6 +115,14 @@ func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
c.onlineCache.Lock.Lock()
|
||||
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
|
||||
c.onlineCache.Cond.Wait()
|
||||
}
|
||||
c.onlineCache.Lock.Unlock()
|
||||
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
|
||||
log.ZInfo(ctx, "begin consume messages")
|
||||
|
||||
for msg := range claim.Messages() {
|
||||
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
|
||||
c.handleMs2PsChat(ctx, msg.Value)
|
||||
@@ -118,20 +133,27 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
|
||||
|
||||
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
|
||||
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
|
||||
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
|
||||
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
|
||||
defer func(duration time.Time) {
|
||||
t := time.Since(duration)
|
||||
log.ZInfo(ctx, "Get msg from msg_transfer And push msg", "msg", msg.String(), "time cost", t)
|
||||
}(time.Now())
|
||||
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZInfo(ctx, "webhookBeforeOnlinePush end")
|
||||
|
||||
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
|
||||
log.ZInfo(ctx, "single and notification push result", "result", wsResults, "msg", msg, "push_to_userID", userIDs)
|
||||
|
||||
if !c.shouldPushOffline(ctx, msg) {
|
||||
return nil
|
||||
}
|
||||
log.ZInfo(ctx, "shouldPushOffline end")
|
||||
|
||||
for _, v := range wsResults {
|
||||
//message sender do not need offline push
|
||||
@@ -150,7 +172,7 @@ func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *
|
||||
offlinePushUserID, msg, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "webhookBeforeOfflinePush end")
|
||||
err = c.offlinePushMsg(ctx, msg, offlinePushUserID)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "offlinePushMsg failed", err, "offlinePushUserID", offlinePushUserID, "msg", msg)
|
||||
@@ -172,21 +194,11 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
|
||||
var (
|
||||
onlineUserIDs []string
|
||||
offlineUserIDs []string
|
||||
)
|
||||
for _, userID := range pushToUserIDs {
|
||||
online, err := c.onlineCache.GetUserOnline(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if online {
|
||||
onlineUserIDs = append(onlineUserIDs, userID)
|
||||
} else {
|
||||
offlineUserIDs = append(offlineUserIDs, userID)
|
||||
}
|
||||
onlineUserIDs, offlineUserIDs, err := c.onlineCache.GetUsersOnline(ctx, pushToUserIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "GetConnsAndOnlinePush online cache", "sendID", msg.SendID, "recvID", msg.RecvID, "groupID", msg.GroupID, "sessionType", msg.SessionType, "clientMsgID", msg.ClientMsgID, "serverMsgID", msg.ServerMsgID, "offlineUserIDs", offlineUserIDs, "onlineUserIDs", onlineUserIDs)
|
||||
var result []*msggateway.SingleMsgToUserResults
|
||||
if len(onlineUserIDs) > 0 {
|
||||
@@ -205,35 +217,42 @@ func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
|
||||
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
|
||||
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
|
||||
defer func(duration time.Time) {
|
||||
t := time.Since(duration)
|
||||
log.ZInfo(ctx, "Get group msg from msg_transfer and push msg end", "msg", msg.String(), "groupID", groupID, "time cost", t)
|
||||
}(time.Now())
|
||||
var pushToUserIDs []string
|
||||
if err = c.webhookBeforeGroupOnlinePush(ctx, &c.config.WebhooksConfig.BeforeGroupOnlinePush, groupID, msg,
|
||||
&pushToUserIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZInfo(ctx, "webhookBeforeGroupOnlinePush end")
|
||||
|
||||
err = c.groupMessagesHandler(ctx, groupID, &pushToUserIDs, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZInfo(ctx, "groupMessagesHandler end")
|
||||
|
||||
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "group push result", "result", wsResults, "msg", msg)
|
||||
log.ZInfo(ctx, "group push result", "result", wsResults, "msg", msg)
|
||||
|
||||
if !c.shouldPushOffline(ctx, msg) {
|
||||
return nil
|
||||
}
|
||||
needOfflinePushUserIDs := c.onlinePusher.GetOnlinePushFailedUserIDs(ctx, msg, wsResults, &pushToUserIDs)
|
||||
|
||||
log.ZInfo(ctx, "GetOnlinePushFailedUserIDs end")
|
||||
//filter some user, like don not disturb or don't need offline push etc.
|
||||
needOfflinePushUserIDs, err = c.filterGroupMessageOfflinePush(ctx, groupID, msg, needOfflinePushUserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZInfo(ctx, "filterGroupMessageOfflinePush end")
|
||||
|
||||
// Use offline push messaging
|
||||
if len(needOfflinePushUserIDs) > 0 {
|
||||
@@ -295,7 +314,7 @@ func (c *ConsumerHandler) groupMessagesHandler(ctx context.Context, groupID stri
|
||||
if unmarshalNotificationElem(msg.Content, &tips) != nil {
|
||||
return err
|
||||
}
|
||||
log.ZInfo(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
|
||||
log.ZDebug(ctx, "GroupDismissedNotificationInfo****", "groupID", groupID, "num", len(*pushToUserIDs), "list", pushToUserIDs)
|
||||
if len(c.config.Share.IMAdminUserID) > 0 {
|
||||
ctx = mcontext.WithOpUserIDContext(ctx, c.config.Share.IMAdminUserID[0])
|
||||
}
|
||||
@@ -379,6 +398,7 @@ func (c *ConsumerHandler) getOfflinePushInfos(msg *sdkws.MsgData) (title, conten
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context, groupID string, userIDs []string) error {
|
||||
conversationID := msgprocessor.GetConversationIDBySessionType(constant.ReadGroupChatType, groupID)
|
||||
maxSeq, err := c.msgRpcClient.GetConversationMaxSeq(ctx, conversationID)
|
||||
@@ -387,6 +407,7 @@ func (c *ConsumerHandler) DeleteMemberAndSetConversationSeq(ctx context.Context,
|
||||
}
|
||||
return c.conversationRpcClient.SetConversationMaxSeq(ctx, userIDs, conversationID, maxSeq)
|
||||
}
|
||||
|
||||
func unmarshalNotificationElem(bytes []byte, t any) error {
|
||||
var notification sdkws.NotificationElem
|
||||
if err := json.Unmarshal(bytes, ¬ification); err != nil {
|
||||
|
||||
@@ -67,7 +67,7 @@ func (m *msgServer) ClearMsg(ctx context.Context, req *msg.ClearMsgReq) (_ *msg.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
|
||||
log.ZDebug(ctx, "clearing message", "docNum", docNum, "msgNum", msgNum, "cost", time.Since(start))
|
||||
|
||||
return &msg.ClearMsgResp{}, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package user
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
)
|
||||
@@ -80,3 +82,22 @@ func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUse
|
||||
}
|
||||
return &pbuser.SetUserOnlineStatusResp{}, nil
|
||||
}
|
||||
|
||||
func (s *userServer) GetAllOnlineUsers(ctx context.Context, req *pbuser.GetAllOnlineUsersReq) (*pbuser.GetAllOnlineUsersResp, error) {
|
||||
resMap, nextCursor, err := s.online.GetAllOnlineUsers(ctx, req.Cursor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &pbuser.GetAllOnlineUsersResp{
|
||||
StatusList: make([]*pbuser.OnlineStatus, 0, len(resMap)),
|
||||
NextCursor: nextCursor,
|
||||
}
|
||||
for userID, plats := range resMap {
|
||||
resp.StatusList = append(resp.StatusList, &pbuser.OnlineStatus{
|
||||
UserID: userID,
|
||||
Status: int32(datautil.If(len(plats) > 0, constant.Online, constant.Offline)),
|
||||
PlatformIDs: plats,
|
||||
})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -79,13 +79,13 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
now := time.Now()
|
||||
deltime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.RetainChatRecords))
|
||||
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deltime.UnixMilli()))
|
||||
log.ZInfo(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
|
||||
log.ZDebug(ctx, "clear chat records", "deltime", deltime, "timestamp", deltime.UnixMilli())
|
||||
|
||||
if _, err := msgClient.ClearMsg(ctx, &msg.ClearMsgReq{Timestamp: deltime.UnixMilli()}); err != nil {
|
||||
log.ZError(ctx, "cron clear chat records failed", err, "deltime", deltime, "cont", time.Since(now))
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
|
||||
log.ZDebug(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
|
||||
}
|
||||
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearMsgFunc); err != nil {
|
||||
return errs.Wrap(err)
|
||||
@@ -95,7 +95,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
msgDestructFunc := func() {
|
||||
now := time.Now()
|
||||
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), now.UnixMilli()))
|
||||
log.ZInfo(ctx, "msg destruct cron start", "now", now)
|
||||
log.ZDebug(ctx, "msg destruct cron start", "now", now)
|
||||
|
||||
conversations, err := conversationClient.GetConversationsNeedDestructMsgs(ctx, &pbconversation.GetConversationsNeedDestructMsgsReq{})
|
||||
if err != nil {
|
||||
@@ -108,7 +108,7 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
return
|
||||
}
|
||||
}
|
||||
log.ZInfo(ctx, "msg destruct cron task completed", "cont", time.Since(now))
|
||||
log.ZDebug(ctx, "msg destruct cron task completed", "cont", time.Since(now))
|
||||
}
|
||||
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, msgDestructFunc); err != nil {
|
||||
return errs.Wrap(err)
|
||||
@@ -119,18 +119,18 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
|
||||
// now := time.Now()
|
||||
// deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
|
||||
// ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
|
||||
// log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
|
||||
// log.ZDebug(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
|
||||
// if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
|
||||
// log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
|
||||
// return
|
||||
// }
|
||||
// log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
|
||||
// log.ZDebug(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
|
||||
// }
|
||||
// if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteObjectFunc); err != nil {
|
||||
// return errs.Wrap(err)
|
||||
// }
|
||||
|
||||
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
|
||||
log.ZDebug(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
|
||||
crontab.Start()
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
|
||||
Reference in New Issue
Block a user