Files
open-im-server/pkg/common/db/controller/msg.go
T

725 lines
29 KiB
Go
Raw Normal View History

2023-02-10 11:03:03 +08:00
package controller
2023-02-10 15:46:29 +08:00
import (
2023-02-23 18:17:17 +08:00
"fmt"
2023-04-18 14:43:54 +08:00
"sync"
"time"
2023-03-16 10:46:06 +08:00
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/config"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/constant"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/cache"
unRelationTb "github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/table/unrelation"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/unrelation"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/kafka"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/log"
2023-03-21 12:28:21 +08:00
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/mcontext"
2023-03-16 10:46:06 +08:00
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/prome"
2023-02-15 15:52:32 +08:00
"github.com/gogo/protobuf/sortkeys"
2023-02-10 15:46:29 +08:00
"context"
2023-02-15 15:52:32 +08:00
"errors"
2023-04-18 14:43:54 +08:00
2023-03-16 10:46:06 +08:00
pbMsg "github.com/OpenIMSDK/Open-IM-Server/pkg/proto/msg"
"github.com/OpenIMSDK/Open-IM-Server/pkg/proto/sdkws"
"github.com/OpenIMSDK/Open-IM-Server/pkg/utils"
2023-02-15 15:52:32 +08:00
"github.com/go-redis/redis/v8"
"go.mongodb.org/mongo-driver/mongo"
"github.com/golang/protobuf/proto"
2023-02-10 15:46:29 +08:00
)
2023-02-24 10:47:36 +08:00
type MsgDatabase interface {
2023-02-15 15:52:32 +08:00
// 批量插入消息
2023-02-16 15:20:59 +08:00
BatchInsertChat2DB(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq int64) error
2023-02-15 15:52:32 +08:00
// 刪除redis中消息缓存
2023-02-16 15:20:59 +08:00
DeleteMessageFromCache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ) error
2023-02-15 15:52:32 +08:00
// incrSeq然后批量插入缓存
2023-04-28 18:33:33 +08:00
BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq int64) (int64, error)
2023-04-26 18:57:41 +08:00
// incrSeq通知seq然后批量插入缓存
NotificationBatchInsertChat2Cache(ctx context.Context, sourceID string, msgs []*pbMsg.MsgDataToMQ) (int64, error)
2023-02-15 15:52:32 +08:00
// 删除消息 返回不存在的seqList
2023-02-16 15:20:59 +08:00
DelMsgBySeqs(ctx context.Context, userID string, seqs []int64) (totalUnExistSeqs []int64, err error)
2023-02-10 19:45:24 +08:00
// 获取群ID或者UserID最新一条在mongo里面的消息
2023-02-15 15:52:32 +08:00
// 通过seqList获取mongo中写扩散消息
2023-02-16 15:20:59 +08:00
GetMsgBySeqs(ctx context.Context, userID string, seqs []int64) (seqMsg []*sdkws.MsgData, err error)
2023-02-15 15:52:32 +08:00
// 通过seqList获取大群在 mongo里面的消息
2023-02-16 15:20:59 +08:00
GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []int64) (seqMsg []*sdkws.MsgData, err error)
2023-02-10 19:45:24 +08:00
// 删除用户所有消息/redis/mongo然后重置seq
2023-02-15 17:00:43 +08:00
CleanUpUserMsg(ctx context.Context, userID string) error
2023-02-15 15:52:32 +08:00
// 删除大群消息重置群成员最小群seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除 redis cache)
2023-02-16 15:20:59 +08:00
DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userIDs []string, remainTime int64) error
2023-02-15 15:52:32 +08:00
// 删除用户消息重置最小seq, remainTime为消息保留的时间单位秒,超时消息删除, 传0删除所有消息(此方法不删除redis cache)
DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error
2023-02-16 15:20:59 +08:00
// 获取用户 seq mongo和redis
GetUserMinMaxSeqInMongoAndCache(ctx context.Context, userID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error)
// 获取群 seq mongo和redis
GetSuperGroupMinMaxSeqInMongoAndCache(ctx context.Context, groupID string) (minSeqMongo, maxSeqMongo, maxSeqCache int64, err error)
// 设置群用户最小seq 直接调用cache
SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq int64) (err error)
2023-02-24 11:01:33 +08:00
GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (int64, error)
2023-02-16 15:20:59 +08:00
// 设置用户最小seq 直接调用cache
SetUserMinSeq(ctx context.Context, userID string, minSeq int64) (err error)
2023-02-10 19:45:24 +08:00
2023-03-03 17:42:26 +08:00
JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error)
2023-02-23 11:26:46 +08:00
SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error
SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error)
GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (*pbMsg.ExtendMsg, error)
2023-02-23 11:57:54 +08:00
InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error)
GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error)
DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error
DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensionList map[string]*sdkws.KeyValue) error
2023-02-23 18:17:17 +08:00
SetSendMsgStatus(ctx context.Context, id string, status int32) error
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
2023-02-23 11:57:54 +08:00
GetUserMaxSeq(ctx context.Context, userID string) (int64, error)
GetUserMinSeq(ctx context.Context, userID string) (int64, error)
GetGroupMaxSeq(ctx context.Context, groupID string) (int64, error)
GetGroupMinSeq(ctx context.Context, groupID string) (int64, error)
2023-03-13 15:39:47 +08:00
MsgToMQ(ctx context.Context, key string, msg2mq *pbMsg.MsgDataToMQ) error
2023-04-26 18:57:41 +08:00
MsgToModifyMQ(ctx context.Context, aggregationID string, messages []*pbMsg.MsgDataToMQ) error
2023-03-22 18:35:21 +08:00
MsgToPushMQ(ctx context.Context, sourceID string, msg2mq *pbMsg.MsgDataToMQ) (int32, int64, error)
2023-04-26 18:57:41 +08:00
MsgToMongoMQ(ctx context.Context, aggregationID string, messages []*pbMsg.MsgDataToMQ, lastSeq int64) error
2023-02-23 11:26:46 +08:00
}
2023-02-23 18:17:17 +08:00
2023-03-03 17:42:26 +08:00
func NewMsgDatabase(msgDocModel unRelationTb.MsgDocModelInterface, cacheModel cache.Model) MsgDatabase {
return &msgDatabase{
2023-03-13 15:39:47 +08:00
msgDocDatabase: msgDocModel,
cache: cacheModel,
producer: kafka.NewKafkaProducer(config.Config.Kafka.Ws2mschat.Addr, config.Config.Kafka.Ws2mschat.Topic),
producerToMongo: kafka.NewKafkaProducer(config.Config.Kafka.MsgToMongo.Addr, config.Config.Kafka.MsgToMongo.Topic),
producerToPush: kafka.NewKafkaProducer(config.Config.Kafka.Ms2pschat.Addr, config.Config.Kafka.Ms2pschat.Topic),
producerToModify: kafka.NewKafkaProducer(config.Config.Kafka.MsgToModify.Addr, config.Config.Kafka.MsgToModify.Topic),
2023-03-03 17:42:26 +08:00
}
}
func InitMsgDatabase(rdb redis.UniversalClient, database *mongo.Database) MsgDatabase {
cacheModel := cache.NewCacheModel(rdb)
msgDocModel := unrelation.NewMsgMongoDriver(database)
msgDatabase := NewMsgDatabase(msgDocModel, cacheModel)
return msgDatabase
2023-02-23 18:17:17 +08:00
}
2023-02-24 10:47:36 +08:00
type msgDatabase struct {
2023-03-06 16:23:16 +08:00
msgDocDatabase unRelationTb.MsgDocModelInterface
extendMsgDatabase unRelationTb.ExtendMsgSetModelInterface
cache cache.Model
2023-03-08 16:35:18 +08:00
producer *kafka.Producer
2023-03-13 15:39:47 +08:00
producerToMongo *kafka.Producer
producerToModify *kafka.Producer
producerToPush *kafka.Producer
2023-03-08 16:35:18 +08:00
// model
2023-03-06 16:23:16 +08:00
msg unRelationTb.MsgDocModel
extendMsgSetModel unRelationTb.ExtendMsgSetModel
2023-02-10 19:45:24 +08:00
}
2023-03-03 17:42:26 +08:00
func (db *msgDatabase) JudgeMessageReactionExist(ctx context.Context, clientMsgID string, sessionType int32) (bool, error) {
return db.cache.JudgeMessageReactionExist(ctx, clientMsgID, sessionType)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) SetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey, value string) error {
2023-02-23 18:17:17 +08:00
return db.cache.SetMessageTypeKeyValue(ctx, clientMsgID, sessionType, typeKey, value)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) SetMessageReactionExpire(ctx context.Context, clientMsgID string, sessionType int32, expiration time.Duration) (bool, error) {
2023-02-23 18:17:17 +08:00
return db.cache.SetMessageReactionExpire(ctx, clientMsgID, sessionType, expiration)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetMessageTypeKeyValue(ctx context.Context, clientMsgID string, sessionType int32, typeKey string) (string, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetMessageTypeKeyValue(ctx, clientMsgID, sessionType, typeKey)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetOneMessageAllReactionList(ctx context.Context, clientMsgID string, sessionType int32) (map[string]string, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetOneMessageAllReactionList(ctx, clientMsgID, sessionType)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DeleteOneMessageKey(ctx context.Context, clientMsgID string, sessionType int32, subKey string) error {
2023-02-23 18:17:17 +08:00
return db.cache.DeleteOneMessageKey(ctx, clientMsgID, sessionType, subKey)
2023-02-23 11:57:54 +08:00
}
2023-03-03 17:42:26 +08:00
func (db *msgDatabase) InsertOrUpdateReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensions map[string]*sdkws.KeyValue) error {
2023-03-06 16:23:16 +08:00
return db.extendMsgDatabase.InsertOrUpdateReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, db.extendMsgSetModel.Pb2Model(reactionExtensions))
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetExtendMsg(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, maxMsgUpdateTime int64) (*pbMsg.ExtendMsg, error) {
2023-03-06 16:23:16 +08:00
extendMsgSet, err := db.extendMsgDatabase.GetExtendMsgSet(ctx, sourceID, sessionType, maxMsgUpdateTime)
2023-02-23 18:17:17 +08:00
if err != nil {
return nil, err
}
extendMsg, ok := extendMsgSet.ExtendMsgs[clientMsgID]
if !ok {
return nil, errors.New(fmt.Sprintf("cant find client msg id: %s", clientMsgID))
}
reactionExtensionList := make(map[string]*pbMsg.KeyValueResp)
for key, model := range extendMsg.ReactionExtensionList {
reactionExtensionList[key] = &pbMsg.KeyValueResp{
KeyValue: &sdkws.KeyValue{
TypeKey: model.TypeKey,
Value: model.Value,
LatestUpdateTime: model.LatestUpdateTime,
},
}
}
return &pbMsg.ExtendMsg{
2023-03-03 17:42:26 +08:00
ReactionExtensions: reactionExtensionList,
ClientMsgID: extendMsg.ClientMsgID,
MsgFirstModifyTime: extendMsg.MsgFirstModifyTime,
AttachedInfo: extendMsg.AttachedInfo,
Ex: extendMsg.Ex,
2023-02-23 18:17:17 +08:00
}, nil
2023-02-23 11:57:54 +08:00
}
2023-03-03 17:42:26 +08:00
func (db *msgDatabase) DeleteReactionExtendMsgSet(ctx context.Context, sourceID string, sessionType int32, clientMsgID string, msgFirstModifyTime int64, reactionExtensions map[string]*sdkws.KeyValue) error {
2023-03-06 16:23:16 +08:00
return db.extendMsgDatabase.DeleteReactionExtendMsgSet(ctx, sourceID, sessionType, clientMsgID, msgFirstModifyTime, db.extendMsgSetModel.Pb2Model(reactionExtensions))
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
2023-02-23 18:17:17 +08:00
return db.cache.SetSendMsgStatus(ctx, id, status)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetSendMsgStatus(ctx, id)
2023-02-23 11:57:54 +08:00
}
2023-03-08 16:35:18 +08:00
func (db *msgDatabase) MsgToMQ(ctx context.Context, key string, msg2mq *pbMsg.MsgDataToMQ) error {
_, _, err := db.producer.SendMessage(ctx, key, msg2mq)
return err
2023-02-23 11:57:54 +08:00
}
2023-04-26 18:57:41 +08:00
func (db *msgDatabase) MsgToModifyMQ(ctx context.Context, aggregationID string, messages []*pbMsg.MsgDataToMQ) error {
2023-03-13 15:39:47 +08:00
if len(messages) > 0 {
2023-04-26 18:57:41 +08:00
_, _, err := db.producerToModify.SendMessage(ctx, aggregationID, &pbMsg.MsgDataToModifyByMQ{AggregationID: aggregationID, Messages: messages})
2023-03-13 15:39:47 +08:00
return err
}
return nil
}
2023-03-22 18:35:21 +08:00
func (db *msgDatabase) MsgToPushMQ(ctx context.Context, key string, msg2mq *pbMsg.MsgDataToMQ) (int32, int64, error) {
2023-03-13 15:39:47 +08:00
mqPushMsg := pbMsg.PushMsgDataToMQ{MsgData: msg2mq.MsgData, SourceID: key}
2023-04-26 18:57:41 +08:00
partition, offset, err := db.producerToPush.SendMessage(ctx, key, &mqPushMsg)
if err != nil {
log.ZError(ctx, "MsgToPushMQ", err, "key", key, "msg2mq", msg2mq)
}
return partition, offset, err
2023-03-13 15:39:47 +08:00
}
2023-04-26 18:57:41 +08:00
func (db *msgDatabase) MsgToMongoMQ(ctx context.Context, aggregationID string, messages []*pbMsg.MsgDataToMQ, lastSeq int64) error {
2023-03-13 15:39:47 +08:00
if len(messages) > 0 {
2023-04-26 18:57:41 +08:00
_, _, err := db.producerToModify.SendMessage(ctx, aggregationID, &pbMsg.MsgDataToMongoByMQ{LastSeq: lastSeq, AggregationID: aggregationID, Messages: messages})
2023-03-13 15:39:47 +08:00
return err
}
return nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetUserMaxSeq(ctx context.Context, userID string) (int64, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetUserMaxSeq(ctx, userID)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetUserMinSeq(ctx context.Context, userID string) (int64, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetUserMinSeq(ctx, userID)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetGroupMaxSeq(ctx context.Context, groupID string) (int64, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetGroupMaxSeq(ctx, groupID)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetGroupMinSeq(ctx context.Context, groupID string) (int64, error) {
2023-02-23 18:17:17 +08:00
return db.cache.GetGroupMinSeq(ctx, groupID)
2023-02-23 11:57:54 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) BatchInsertChat2DB(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq int64) error {
2023-02-15 15:52:32 +08:00
//newTime := utils.GetCurrentTimestampByMill()
2023-02-16 15:20:59 +08:00
if int64(len(msgList)) > db.msg.GetSingleGocMsgNum() {
2023-02-15 15:52:32 +08:00
return errors.New("too large")
}
2023-02-16 15:20:59 +08:00
var remain int64
blk0 := db.msg.GetSingleGocMsgNum() - 1
2023-02-15 15:52:32 +08:00
//currentMaxSeq 4998
2023-02-16 15:20:59 +08:00
if currentMaxSeq < db.msg.GetSingleGocMsgNum() {
2023-02-15 15:52:32 +08:00
remain = blk0 - currentMaxSeq //1
} else {
excludeBlk0 := currentMaxSeq - blk0 //=1
//(5000-1)%5000 == 4999
2023-02-16 15:20:59 +08:00
remain = (db.msg.GetSingleGocMsgNum() - (excludeBlk0 % db.msg.GetSingleGocMsgNum())) % db.msg.GetSingleGocMsgNum()
2023-02-15 15:52:32 +08:00
}
//remain=1
2023-02-16 15:20:59 +08:00
var insertCounter int64
2023-02-15 15:52:32 +08:00
msgsToMongo := make([]unRelationTb.MsgInfoModel, 0)
msgsToMongoNext := make([]unRelationTb.MsgInfoModel, 0)
docID := ""
docIDNext := ""
var err error
for _, m := range msgList {
//log.Debug(operationID, "msg node ", m.String(), m.MsgData.ClientMsgID)
currentMaxSeq++
sMsg := unRelationTb.MsgInfoModel{}
sMsg.SendTime = m.MsgData.SendTime
2023-02-16 15:20:59 +08:00
m.MsgData.Seq = currentMaxSeq
2023-02-15 15:52:32 +08:00
if sMsg.Msg, err = proto.Marshal(m.MsgData); err != nil {
return utils.Wrap(err, "")
}
if insertCounter < remain {
msgsToMongo = append(msgsToMongo, sMsg)
insertCounter++
2023-02-16 15:20:59 +08:00
docID = db.msg.GetDocID(sourceID, currentMaxSeq)
2023-02-15 15:52:32 +08:00
//log.Debug(operationID, "msgListToMongo ", seqUid, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
} else {
msgsToMongoNext = append(msgsToMongoNext, sMsg)
2023-02-16 15:20:59 +08:00
docIDNext = db.msg.GetDocID(sourceID, currentMaxSeq)
2023-02-15 15:52:32 +08:00
//log.Debug(operationID, "msgListToMongoNext ", seqUidNext, m.MsgData.Seq, m.MsgData.ClientMsgID, insertCounter, remain, "userID: ", userID)
}
}
if docID != "" {
//filter := bson.M{"uid": seqUid}
//log.NewDebug(operationID, "filter ", seqUid, "list ", msgListToMongo, "userID: ", userID)
//err := c.FindOneAndUpdate(ctx, filter, bson.M{"$push": bson.M{"msg": bson.M{"$each": msgsToMongo}}}).Err()
2023-03-06 16:23:16 +08:00
err = db.msgDocDatabase.PushMsgsToDoc(ctx, docID, msgsToMongo)
2023-02-15 15:52:32 +08:00
if err != nil {
if err == mongo.ErrNoDocuments {
doc := &unRelationTb.MsgDocModel{}
doc.DocID = docID
doc.Msg = msgsToMongo
2023-03-06 16:23:16 +08:00
if err = db.msgDocDatabase.Create(ctx, doc); err != nil {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoFailedCounter)
2023-02-15 15:52:32 +08:00
//log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoSuccessCounter)
2023-02-15 15:52:32 +08:00
} else {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoFailedCounter)
2023-02-15 15:52:32 +08:00
//log.Error(operationID, "FindOneAndUpdate failed ", err.Error(), filter)
return utils.Wrap(err, "")
}
} else {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoSuccessCounter)
2023-02-15 15:52:32 +08:00
}
}
if docIDNext != "" {
nextDoc := &unRelationTb.MsgDocModel{}
nextDoc.DocID = docIDNext
nextDoc.Msg = msgsToMongoNext
//log.NewDebug(operationID, "filter ", seqUidNext, "list ", msgListToMongoNext, "userID: ", userID)
2023-03-06 16:23:16 +08:00
if err = db.msgDocDatabase.Create(ctx, nextDoc); err != nil {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoFailedCounter)
2023-02-15 15:52:32 +08:00
//log.NewError(operationID, "InsertOne failed", filter, err.Error(), sChat)
return utils.Wrap(err, "")
}
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertMongoSuccessCounter)
2023-02-15 15:52:32 +08:00
}
//log.Debug(operationID, "batch mgo cost time ", mongo2.getCurrentTimestampByMill()-newTime, userID, len(msgList))
return nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DeleteMessageFromCache(ctx context.Context, userID string, msgs []*pbMsg.MsgDataToMQ) error {
2023-02-16 15:20:59 +08:00
return db.cache.DeleteMessageFromCache(ctx, userID, msgs)
2023-02-15 15:52:32 +08:00
}
2023-04-26 18:57:41 +08:00
func (db *msgDatabase) NotificationBatchInsertChat2Cache(ctx context.Context, sourceID string, msgs []*pbMsg.MsgDataToMQ) (int64, error) {
return 0, nil
}
2023-04-28 18:33:33 +08:00
func (db *msgDatabase) BatchInsertChat2Cache(ctx context.Context, sourceID string, msgList []*pbMsg.MsgDataToMQ, currentMaxSeq int64) (int64, error) {
2023-02-15 15:52:32 +08:00
//newTime := utils.GetCurrentTimestampByMill()
lenList := len(msgList)
2023-02-16 15:20:59 +08:00
if int64(lenList) > db.msg.GetSingleGocMsgNum() {
2023-02-15 15:52:32 +08:00
return 0, errors.New("too large")
}
if lenList < 1 {
return 0, errors.New("too short as 0")
}
// judge sessionType to get seq
lastMaxSeq := currentMaxSeq
for _, m := range msgList {
currentMaxSeq++
2023-02-16 15:20:59 +08:00
m.MsgData.Seq = currentMaxSeq
2023-02-15 15:52:32 +08:00
//log.Debug(operationID, "cache msg node ", m.String(), m.MsgData.ClientMsgID, "userID: ", sourceID, "seq: ", currentMaxSeq)
}
//log.Debug(operationID, "SetMessageToCache ", sourceID, len(msgList))
2023-02-16 15:20:59 +08:00
failedNum, err := db.cache.SetMessageToCache(ctx, sourceID, msgList)
2023-02-15 15:52:32 +08:00
if err != nil {
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgInsertRedisFailedCounter, failedNum)
2023-02-15 15:52:32 +08:00
//log.Error(operationID, "setMessageToCache failed, continue ", err.Error(), len(msgList), sourceID)
} else {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.MsgInsertRedisSuccessCounter)
2023-02-15 15:52:32 +08:00
}
//log.Debug(operationID, "batch to redis cost time ", mongo2.getCurrentTimestampByMill()-newTime, sourceID, len(msgList))
if msgList[0].MsgData.SessionType == constant.SuperGroupChatType {
2023-02-16 15:20:59 +08:00
err = db.cache.SetGroupMaxSeq(ctx, sourceID, currentMaxSeq)
2023-02-15 15:52:32 +08:00
} else {
2023-02-16 15:20:59 +08:00
err = db.cache.SetUserMaxSeq(ctx, sourceID, currentMaxSeq)
2023-02-15 15:52:32 +08:00
}
if err != nil {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.SeqSetFailedCounter)
2023-02-15 15:52:32 +08:00
} else {
2023-02-24 11:13:16 +08:00
prome.Inc(prome.SeqSetSuccessCounter)
2023-02-15 15:52:32 +08:00
}
return lastMaxSeq, utils.Wrap(err, "")
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DelMsgBySeqs(ctx context.Context, userID string, seqs []int64) (totalUnExistSeqs []int64, err error) {
2023-02-16 15:20:59 +08:00
sortkeys.Int64s(seqs)
2023-02-15 15:52:32 +08:00
docIDSeqsMap := db.msg.GetDocIDSeqsMap(userID, seqs)
lock := sync.Mutex{}
var wg sync.WaitGroup
wg.Add(len(docIDSeqsMap))
for k, v := range docIDSeqsMap {
2023-02-16 15:20:59 +08:00
go func(docID string, seqs []int64) {
2023-02-15 15:52:32 +08:00
defer wg.Done()
unExistSeqList, err := db.DelMsgBySeqsInOneDoc(ctx, docID, seqs)
if err != nil {
return
}
lock.Lock()
totalUnExistSeqs = append(totalUnExistSeqs, unExistSeqList...)
lock.Unlock()
}(k, v)
}
return totalUnExistSeqs, nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DelMsgBySeqsInOneDoc(ctx context.Context, docID string, seqs []int64) (unExistSeqs []int64, err error) {
2023-02-15 15:52:32 +08:00
seqMsgs, indexes, unExistSeqs, err := db.GetMsgAndIndexBySeqsInOneDoc(ctx, docID, seqs)
if err != nil {
return nil, err
}
for i, v := range seqMsgs {
2023-03-06 16:23:16 +08:00
if err = db.msgDocDatabase.UpdateMsgStatusByIndexInOneDoc(ctx, docID, v, indexes[i], constant.MsgDeleted); err != nil {
2023-02-15 15:52:32 +08:00
return nil, err
}
}
return unExistSeqs, nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetMsgAndIndexBySeqsInOneDoc(ctx context.Context, docID string, seqs []int64) (seqMsgs []*sdkws.MsgData, indexes []int, unExistSeqs []int64, err error) {
2023-03-06 16:23:16 +08:00
doc, err := db.msgDocDatabase.FindOneByDocID(ctx, docID)
2023-02-15 15:52:32 +08:00
if err != nil {
return nil, nil, nil, err
}
singleCount := 0
2023-02-16 15:20:59 +08:00
var hasSeqList []int64
2023-02-15 15:52:32 +08:00
for i := 0; i < len(doc.Msg); i++ {
msgPb, err := db.unmarshalMsg(&doc.Msg[i])
if err != nil {
return nil, nil, nil, err
}
2023-02-16 17:08:24 +08:00
if utils.Contain(msgPb.Seq, seqs...) {
2023-02-15 15:52:32 +08:00
indexes = append(indexes, i)
seqMsgs = append(seqMsgs, msgPb)
hasSeqList = append(hasSeqList, msgPb.Seq)
singleCount++
if singleCount == len(seqs) {
break
}
}
}
for _, i := range seqs {
2023-02-16 17:08:24 +08:00
if utils.Contain(i, hasSeqList...) {
2023-02-15 15:52:32 +08:00
continue
}
unExistSeqs = append(unExistSeqs, i)
}
return seqMsgs, indexes, unExistSeqs, nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetNewestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) {
2023-03-06 16:23:16 +08:00
msgInfo, err := db.msgDocDatabase.GetNewestMsg(ctx, sourceID)
2023-02-15 15:52:32 +08:00
if err != nil {
return nil, err
}
return db.unmarshalMsg(msgInfo)
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetOldestMsg(ctx context.Context, sourceID string) (msgPb *sdkws.MsgData, err error) {
2023-03-06 16:23:16 +08:00
msgInfo, err := db.msgDocDatabase.GetOldestMsg(ctx, sourceID)
2023-02-15 15:52:32 +08:00
if err != nil {
return nil, err
}
return db.unmarshalMsg(msgInfo)
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) unmarshalMsg(msgInfo *unRelationTb.MsgInfoModel) (msgPb *sdkws.MsgData, err error) {
2023-02-15 15:52:32 +08:00
msgPb = &sdkws.MsgData{}
err = proto.Unmarshal(msgInfo.Msg, msgPb)
if err != nil {
return nil, utils.Wrap(err, "")
}
return msgPb, nil
}
2023-02-24 11:37:40 +08:00
func (db *msgDatabase) getMsgBySeqs(ctx context.Context, sourceID string, seqs []int64, diffusionType int) (seqMsgs []*sdkws.MsgData, err error) {
2023-02-16 15:20:59 +08:00
var hasSeqs []int64
2023-02-15 15:52:32 +08:00
singleCount := 0
m := db.msg.GetDocIDSeqsMap(sourceID, seqs)
for docID, value := range m {
2023-03-06 16:23:16 +08:00
doc, err := db.msgDocDatabase.FindOneByDocID(ctx, docID)
2023-02-15 15:52:32 +08:00
if err != nil {
//log.NewError(operationID, "not find seqUid", seqUid, value, uid, seqList, err.Error())
continue
}
singleCount = 0
for i := 0; i < len(doc.Msg); i++ {
msgPb, err := db.unmarshalMsg(&doc.Msg[i])
if err != nil {
//log.NewError(operationID, "Unmarshal err", seqUid, value, uid, seqList, err.Error())
return nil, err
}
2023-02-16 17:08:24 +08:00
if utils.Contain(msgPb.Seq, value...) {
2023-02-24 11:37:40 +08:00
seqMsgs = append(seqMsgs, msgPb)
2023-02-15 15:52:32 +08:00
hasSeqs = append(hasSeqs, msgPb.Seq)
singleCount++
if singleCount == len(value) {
break
}
}
}
}
if len(hasSeqs) != len(seqs) {
2023-02-16 15:20:59 +08:00
var diff []int64
2023-02-15 15:52:32 +08:00
var exceptionMsg []*sdkws.MsgData
diff = utils.Difference(hasSeqs, seqs)
if diffusionType == constant.WriteDiffusion {
exceptionMsg = db.msg.GenExceptionMessageBySeqs(diff)
} else if diffusionType == constant.ReadDiffusion {
exceptionMsg = db.msg.GenExceptionSuperGroupMessageBySeqs(diff, sourceID)
}
2023-02-24 11:37:40 +08:00
seqMsgs = append(seqMsgs, exceptionMsg...)
2023-02-15 15:52:32 +08:00
}
2023-02-24 11:37:40 +08:00
return seqMsgs, nil
2023-02-15 15:52:32 +08:00
}
2023-02-24 11:37:40 +08:00
func (db *msgDatabase) GetMsgBySeqs(ctx context.Context, userID string, seqs []int64) (successMsgs []*sdkws.MsgData, err error) {
2023-02-23 19:20:58 +08:00
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, userID, seqs)
2023-02-15 17:00:43 +08:00
if err != nil {
if err != redis.Nil {
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
2023-03-21 12:28:21 +08:00
log.Error(mcontext.GetOperationID(ctx), "get message from redis exception", err.Error(), failedSeqs)
2023-02-15 17:00:43 +08:00
}
}
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
2023-02-15 17:00:43 +08:00
if len(failedSeqs) > 0 {
mongoMsgs, err := db.getMsgBySeqs(ctx, userID, seqs, constant.WriteDiffusion)
if err != nil {
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
2023-02-15 17:00:43 +08:00
return nil, err
}
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
2023-02-15 17:00:43 +08:00
successMsgs = append(successMsgs, mongoMsgs...)
}
return successMsgs, nil
2023-02-10 19:45:24 +08:00
}
2023-02-24 11:37:40 +08:00
func (db *msgDatabase) GetSuperGroupMsgBySeqs(ctx context.Context, groupID string, seqs []int64) (successMsgs []*sdkws.MsgData, err error) {
2023-02-23 19:20:58 +08:00
successMsgs, failedSeqs, err := db.cache.GetMessagesBySeq(ctx, groupID, seqs)
2023-02-15 15:52:32 +08:00
if err != nil {
2023-02-15 17:00:43 +08:00
if err != redis.Nil {
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromRedisFailedCounter, len(failedSeqs))
2023-03-21 12:28:21 +08:00
log.Error(mcontext.GetOperationID(ctx), "get message from redis exception", err.Error(), failedSeqs)
2023-02-15 17:00:43 +08:00
}
2023-02-15 15:52:32 +08:00
}
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromRedisSuccessCounter, len(successMsgs))
2023-02-15 17:00:43 +08:00
if len(failedSeqs) > 0 {
mongoMsgs, err := db.getMsgBySeqs(ctx, groupID, seqs, constant.ReadDiffusion)
if err != nil {
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromMongoFailedCounter, len(failedSeqs))
2023-02-15 17:00:43 +08:00
return nil, err
}
2023-02-24 11:13:16 +08:00
prome.Add(prome.MsgPullFromMongoSuccessCounter, len(mongoMsgs))
2023-02-15 17:00:43 +08:00
successMsgs = append(successMsgs, mongoMsgs...)
2023-02-15 15:52:32 +08:00
}
2023-02-15 17:00:43 +08:00
return successMsgs, nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) CleanUpUserMsg(ctx context.Context, userID string) error {
2023-02-15 17:00:43 +08:00
err := db.DeleteUserMsgsAndSetMinSeq(ctx, userID, 0)
2023-02-15 15:52:32 +08:00
if err != nil {
return err
}
2023-02-16 15:20:59 +08:00
err = db.cache.CleanUpOneUserAllMsg(ctx, userID)
2023-02-15 15:52:32 +08:00
return utils.Wrap(err, "")
2023-02-10 19:45:24 +08:00
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DeleteUserSuperGroupMsgsAndSetMinSeq(ctx context.Context, groupID string, userIDs []string, remainTime int64) error {
2023-02-15 15:52:32 +08:00
var delStruct delMsgRecursionStruct
minSeq, err := db.deleteMsgRecursion(ctx, groupID, unRelationTb.OldestList, &delStruct, remainTime)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), groupID, "deleteMsg failed")
}
if minSeq == 0 {
return nil
}
//log.NewDebug(operationID, utils.GetSelfFuncName(), "delMsgIDList:", delStruct, "minSeq", minSeq)
for _, userID := range userIDs {
2023-02-16 15:20:59 +08:00
userMinSeq, err := db.cache.GetGroupUserMinSeq(ctx, groupID, userID)
2023-02-15 15:52:32 +08:00
if err != nil && err != redis.Nil {
//log.NewError(operationID, utils.GetSelfFuncName(), "GetGroupUserMinSeq failed", groupID, userID, err.Error())
continue
}
2023-02-16 15:20:59 +08:00
if userMinSeq > minSeq {
err = db.cache.SetGroupUserMinSeq(ctx, groupID, userID, userMinSeq)
2023-02-15 15:52:32 +08:00
} else {
2023-02-16 15:20:59 +08:00
err = db.cache.SetGroupUserMinSeq(ctx, groupID, userID, minSeq)
2023-02-15 15:52:32 +08:00
}
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), groupID, userID, userMinSeq, minSeq)
}
}
return nil
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) DeleteUserMsgsAndSetMinSeq(ctx context.Context, userID string, remainTime int64) error {
2023-02-15 15:52:32 +08:00
var delStruct delMsgRecursionStruct
minSeq, err := db.deleteMsgRecursion(ctx, userID, unRelationTb.OldestList, &delStruct, remainTime)
if err != nil {
return utils.Wrap(err, "")
}
if minSeq == 0 {
return nil
}
2023-02-16 15:20:59 +08:00
return db.cache.SetUserMinSeq(ctx, userID, minSeq)
2023-02-15 15:52:32 +08:00
}
// this is struct for recursion
type delMsgRecursionStruct struct {
2023-03-01 15:32:26 +08:00
minSeq int64
delDocIDs []string
2023-02-15 15:52:32 +08:00
}
2023-02-16 15:20:59 +08:00
func (d *delMsgRecursionStruct) getSetMinSeq() int64 {
2023-02-15 15:52:32 +08:00
return d.minSeq
}
2023-02-10 19:45:24 +08:00
2023-02-15 15:52:32 +08:00
// index 0....19(del) 20...69
// seq 70
// set minSeq 21
// recursion 删除list并且返回设置的最小seq
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) deleteMsgRecursion(ctx context.Context, sourceID string, index int64, delStruct *delMsgRecursionStruct, remainTime int64) (int64, error) {
2023-02-15 15:52:32 +08:00
// find from oldest list
2023-03-06 16:23:16 +08:00
msgs, err := db.msgDocDatabase.GetMsgsByIndex(ctx, sourceID, index)
2023-02-15 15:52:32 +08:00
if err != nil || msgs.DocID == "" {
if err != nil {
if err == unrelation.ErrMsgListNotExist {
2023-03-21 12:28:21 +08:00
log.NewDebug(mcontext.GetOperationID(ctx), utils.GetSelfFuncName(), "ID:", sourceID, "index:", index, err.Error())
2023-02-15 15:52:32 +08:00
} else {
//log.NewError(operationID, utils.GetSelfFuncName(), "GetUserMsgListByIndex failed", err.Error(), index, ID)
}
}
2023-03-10 16:47:22 +08:00
// 获取报错,或者获取不到了,物理删除并且返回seq delMongoMsgsPhysical(delStruct.delDocIDList), 结束递归
2023-03-06 16:23:16 +08:00
err = db.msgDocDatabase.Delete(ctx, delStruct.delDocIDs)
2023-02-15 15:52:32 +08:00
if err != nil {
return 0, err
}
return delStruct.getSetMinSeq() + 1, nil
}
//log.NewDebug(operationID, "ID:", sourceID, "index:", index, "uid:", msgs.UID, "len:", len(msgs.Msg))
2023-02-16 15:20:59 +08:00
if int64(len(msgs.Msg)) > db.msg.GetSingleGocMsgNum() {
2023-04-18 14:43:54 +08:00
log.ZWarn(ctx, "msgs too large", nil, "lenth", len(msgs.Msg), "docID:", msgs.DocID)
2023-02-15 15:52:32 +08:00
}
if msgs.Msg[len(msgs.Msg)-1].SendTime+(remainTime*1000) < utils.GetCurrentTimestampByMill() && msgs.IsFull() {
2023-03-01 15:32:26 +08:00
delStruct.delDocIDs = append(delStruct.delDocIDs, msgs.DocID)
2023-02-15 15:52:32 +08:00
lastMsgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msgs.Msg[len(msgs.Msg)-1].Msg, lastMsgPb)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
delStruct.minSeq = lastMsgPb.Seq
} else {
var hasMarkDelFlag bool
for _, msg := range msgs.Msg {
msgPb := &sdkws.MsgData{}
err = proto.Unmarshal(msg.Msg, msgPb)
if err != nil {
//log.NewError(operationID, utils.GetSelfFuncName(), err.Error(), len(msgs.Msg)-1, msgs.UID)
return 0, utils.Wrap(err, "proto.Unmarshal failed")
}
if utils.GetCurrentTimestampByMill() > msg.SendTime+(remainTime*1000) {
msgPb.Status = constant.MsgDeleted
bytes, _ := proto.Marshal(msgPb)
msg.Msg = bytes
msg.SendTime = 0
hasMarkDelFlag = true
} else {
2023-03-10 16:47:22 +08:00
// 到本条消息不需要删除, minSeq置为这条消息的seq
2023-03-06 16:23:16 +08:00
if err := db.msgDocDatabase.Delete(ctx, delStruct.delDocIDs); err != nil {
2023-02-15 15:52:32 +08:00
return 0, err
}
if hasMarkDelFlag {
2023-03-06 16:23:16 +08:00
if err := db.msgDocDatabase.UpdateOneDoc(ctx, msgs); err != nil {
2023-02-15 15:52:32 +08:00
return delStruct.getSetMinSeq(), utils.Wrap(err, "")
}
}
2023-03-10 16:47:22 +08:00
return msgPb.Seq, nil
2023-02-15 15:52:32 +08:00
}
}
}
// 继续递归 index+1
seq, err := db.deleteMsgRecursion(ctx, sourceID, index+1, delStruct, remainTime)
return seq, utils.Wrap(err, "deleteMsg failed")
2023-02-10 15:46:29 +08:00
}
2023-02-16 15:20:59 +08:00
2023-02-24 11:15:40 +08:00
func (db *msgDatabase) GetUserMinMaxSeqInMongoAndCache(ctx context.Context, userID string) (minSeqMongo, maxSeqMongo, minSeqCache, maxSeqCache int64, err error) {
2023-02-16 15:20:59 +08:00
minSeqMongo, maxSeqMongo, err = db.GetMinMaxSeqMongo(ctx, userID)
if err != nil {
return 0, 0, 0, 0, err
}
// from cache
minSeqCache, err = db.cache.GetUserMinSeq(ctx, userID)
if err != nil {
return 0, 0, 0, 0, err
}
maxSeqCache, err = db.cache.GetUserMaxSeq(ctx, userID)
if err != nil {
return 0, 0, 0, 0, err
}
return
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetSuperGroupMinMaxSeqInMongoAndCache(ctx context.Context, groupID string) (minSeqMongo, maxSeqMongo, maxSeqCache int64, err error) {
2023-02-16 15:20:59 +08:00
minSeqMongo, maxSeqMongo, err = db.GetMinMaxSeqMongo(ctx, groupID)
if err != nil {
return 0, 0, 0, err
}
maxSeqCache, err = db.cache.GetGroupMaxSeq(ctx, groupID)
if err != nil {
return 0, 0, 0, err
}
return
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) GetMinMaxSeqMongo(ctx context.Context, sourceID string) (minSeqMongo, maxSeqMongo int64, err error) {
2023-03-06 16:23:16 +08:00
oldestMsgMongo, err := db.msgDocDatabase.GetOldestMsg(ctx, sourceID)
2023-02-16 15:20:59 +08:00
if err != nil {
return 0, 0, err
}
msgPb, err := db.unmarshalMsg(oldestMsgMongo)
if err != nil {
return 0, 0, err
}
minSeqMongo = msgPb.Seq
2023-03-06 16:23:16 +08:00
newestMsgMongo, err := db.msgDocDatabase.GetNewestMsg(ctx, sourceID)
2023-02-16 15:20:59 +08:00
if err != nil {
return 0, 0, err
}
msgPb, err = db.unmarshalMsg(newestMsgMongo)
if err != nil {
return 0, 0, err
}
maxSeqMongo = msgPb.Seq
return
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) SetGroupUserMinSeq(ctx context.Context, groupID, userID string, minSeq int64) (err error) {
2023-02-16 15:20:59 +08:00
return db.cache.SetGroupUserMinSeq(ctx, groupID, userID, minSeq)
}
2023-02-24 10:47:36 +08:00
func (db *msgDatabase) SetUserMinSeq(ctx context.Context, userID string, minSeq int64) (err error) {
2023-02-16 15:20:59 +08:00
return db.cache.SetUserMinSeq(ctx, userID, minSeq)
}
2023-02-24 11:01:33 +08:00
func (db *msgDatabase) GetGroupUserMinSeq(ctx context.Context, groupID, userID string) (int64, error) {
return db.cache.GetGroupUserMinSeq(ctx, groupID, userID)
}