* refactor: refactor workflows contents.

* add tool workflows.

* update field.

* fix: remove chat error.

* Fix err.

* fix error.

* remove cn comment.

* update workflows files.

* update infra config.

* move workflows.

* feat: update bot.

* fix: solve uncorrect outdated msg get.

* update get docIDs logic.

* update

* update skip logic.

* fix

* update.

* fix: delay deleteObject func.

* remove unused content.

* update log type.

* feat: implement request batch count limit.

* update

* update

* feat: add rocksTimeout

* feat: wrap logs

* feat: add logs

* feat: listen config

* feat: enable listen TIME_WAIT port

* feat: add logs

* feat: cache batch

* chore: enable fullUserCache

* feat: push rpc num

* feat: push err

* feat: with operationID

* feat: sleep

* feat: change 1s

* feat: change log

* feat: implement Getbatch in rpcCache.

* feat: print getOnline cost

* feat: change log

* feat: change kafka and push config

* feat: del interface

* feat: fix err

* feat: change config

* feat: go mod

* feat: change config

* feat: change config

* feat: add sleep in push

* feat: warn logs

* feat: logs

* feat: logs

* feat: change port

* feat: start config

* feat: remove port reuse

* feat: prometheus config

* feat: prometheus config

* feat: prometheus config

* feat: add long time send msg to grafana

* feat: init

* feat: init

* feat: implement offline push.

* feat: batch get user online

* feat: implement batch Push spilt

* update go mod

* Revert "feat: change port"

This reverts commit 06d5e944

* feat: change port

* feat: change config

* feat: implement kafka producer and consumer.

* update format,

* add PushMQ log.

* feat: get all online users and init push

* feat: lock in online cache

* feat: config

* fix: init online status

* fix: add logs

* fix: userIDs

* fix: add logs

* feat: update Handler logic.

* update MQ logic.

* update

* update

* fix: method name

* fix: update OfflinePushConsumerHandler.

* fix: prommetrics

* fix: add logs

* fix: ctx

* fix: log

* fix: config

* feat: change port

* fix: atomic online cache status

---------

Co-authored-by: Monet Lee <monet_lee@163.com>
This commit is contained in:
icey-yu
2024-09-12 10:38:17 +08:00
committed by GitHub
parent b703449615
commit 0276c7df60
43 changed files with 2639 additions and 2105 deletions
+1 -1
View File
@@ -111,7 +111,7 @@ func Start(ctx context.Context, index int, config *Config) error {
if err != nil {
return err
}
msgTransfer := &MsgTransfer{
historyCH: historyCH,
historyMongoCH: historyMongoCH,
@@ -238,6 +238,7 @@ func (och *OnlineHistoryRedisConsumerHandler) categorizeMessageLists(totalMsgs [
}
func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key, conversationID string, storageList, notStorageList []*ContextMsg) {
log.ZInfo(ctx, "handle storage msg")
for _, storageMsg := range storageList {
log.ZDebug(ctx, "handle storage msg", "msg", storageMsg.message.String())
}
@@ -254,16 +255,20 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
log.ZError(ctx, "batch data insert to redis err", err, "storageMsgList", storageMessageList)
return
}
log.ZInfo(ctx, "BatchInsertChat2Cache end")
if isNewConversation {
switch msg.SessionType {
case constant.ReadGroupChatType:
log.ZInfo(ctx, "group chat first create conversation", "conversationID",
log.ZDebug(ctx, "group chat first create conversation", "conversationID",
conversationID)
userIDs, err := och.groupRpcClient.GetGroupMemberIDs(ctx, msg.GroupID)
if err != nil {
log.ZWarn(ctx, "get group member ids error", err, "conversationID",
conversationID)
} else {
log.ZInfo(ctx, "GetGroupMemberIDs end")
if err := och.conversationRpcClient.GroupChatFirstCreateConversation(ctx,
msg.GroupID, userIDs); err != nil {
log.ZWarn(ctx, "single chat first create conversation error", err,
@@ -282,13 +287,16 @@ func (och *OnlineHistoryRedisConsumerHandler) handleMsg(ctx context.Context, key
}
}
log.ZDebug(ctx, "success incr to next topic")
log.ZInfo(ctx, "success incr to next topic")
err = och.msgTransferDatabase.MsgToMongoMQ(ctx, key, conversationID, storageMessageList, lastSeq)
if err != nil {
log.ZError(ctx, "Msg To MongoDB MQ error", err, "conversationID",
conversationID, "storageList", storageMessageList, "lastSeq", lastSeq)
}
log.ZInfo(ctx, "MsgToMongoMQ end")
och.toPushTopic(ctx, key, conversationID, storageList)
log.ZInfo(ctx, "toPushTopic end")
}
}
@@ -319,7 +327,7 @@ func (och *OnlineHistoryRedisConsumerHandler) handleNotification(ctx context.Con
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
for _, v := range msgs {
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
}
}
@@ -344,7 +352,7 @@ func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSess
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
log.ZInfo(context.Background(), "online new session msg come", "highWaterMarkOffset",
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
session.MarkMessage(lastMessage, "")
@@ -57,7 +57,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
return
}
log.ZInfo(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
err = mc.msgTransferDatabase.BatchInsertChat2DB(ctx, msgFromMQ.ConversationID, msgFromMQ.MsgData, msgFromMQ.LastSeq)
if err != nil {
log.ZError(