feat: new features merged (#2409)

* fix: GroupApplicationAcceptedNotification

* fix: GroupApplicationAcceptedNotification

* fix: NotificationUserInfoUpdate

* cicd: robot automated Change

* fix: component

* fix: getConversationInfo

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* feat: cron task

* fix: minio config url recognition error

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* new mongo

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* friend incr sync

* mage

* optimization version log

* optimization version log

* sync

* sync

* sync

* group sync

* sync option

* sync option

* refactor: replace `friend` package with `realtion`.

* refactor: update lastest commit to relation.

* sync option

* sync option

* sync option

* sync

* sync

* go.mod

* seq

* update: go mod

* refactor: change incremental to full

* feat: get full friend user ids

* feat: api and config

* seq

* group version

* merge

* seq

* seq

* seq

* fix: sort by id avoid unstable sort friends.

* group

* group

* group

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* fix: sort by id avoid unstable sort friends.

* user version

* seq

* seq

* seq user

* user online

* implement minio expire delete.

* user online

* config

* fix

* fix

* implement minio expire delete logic.

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* online cache

* feat: implement scheduled delete outdated object in minio.

* update gomake version

* update gomake version

* implement FindExpires pagination.

* remove unnesseary incr.

* fix uncorrect args call.

* online push

* online push

* online push

* resolving conflicts

* resolving conflicts

* test

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* api prommetrics

* rpc prommetrics

* rpc prommetrics

* online status

* online status

* online status

* online status

* sub

* conversation version incremental

* merge seq

* merge online

* merge online

* merge online

* merge seq

* GetOwnerConversation

* fix: change incremental syncer router name.

* rockscache batch get

* rockscache seq batch get

* fix: GetMsgDocModelByIndex bug

* update go.mod

* update go.mod

* merge

* feat: prometheus

* feat: prometheus

---------

Co-authored-by: withchao <withchao@users.noreply.github.com>
Co-authored-by: Monet Lee <monet_lee@163.com>
Co-authored-by: OpenIM-Gordon <46924906+FGadvancer@users.noreply.github.com>
Co-authored-by: icey-yu <1186114839@qq.com>
This commit is contained in:
chao
2024-07-16 10:46:21 +08:00
committed by GitHub
parent 5f52fa19bd
commit 4aaf496086
97 changed files with 3743 additions and 1587 deletions
-1
View File
@@ -58,7 +58,6 @@ func (o *FriendApi) GetFriendList(c *gin.Context) {
func (o *FriendApi) GetDesignatedFriends(c *gin.Context) {
a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c)
//a2r.Call(relation.FriendClient.GetDesignatedFriends, o.Client, c, a2r.NewNilReplaceOption(relation.FriendClient.GetDesignatedFriends))
}
func (o *FriendApi) SetFriendRemark(c *gin.Context) {
+2 -5
View File
@@ -29,7 +29,6 @@ import (
"time"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
ginprom "github.com/openimsdk/open-im-server/v3/pkg/common/ginprometheus"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/errs"
@@ -72,10 +71,8 @@ func Start(ctx context.Context, index int, config *Config) error {
netDone <- struct{}{}
return
}
p := ginprom.NewPrometheus("app", prommetrics.GetGinCusMetrics("Api"))
p.SetListenAddress(fmt.Sprintf(":%d", prometheusPort))
if err = p.Use(router); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, fmt.Sprintf("prometheus start err: %d", prometheusPort))
if err := prommetrics.ApiInit(prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
netDone <- struct{}{}
}
}()
+19 -4
View File
@@ -2,15 +2,13 @@ package api
import (
"fmt"
"net/http"
"strings"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
"github.com/go-playground/validator/v10"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
@@ -18,8 +16,25 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"net/http"
"strings"
)
func prommetricsGin() gin.HandlerFunc {
return func(c *gin.Context) {
c.Next()
path := c.FullPath()
if c.Writer.Status() == http.StatusNotFound {
prommetrics.HttpCall("<404>", c.Request.Method, c.Writer.Status())
} else {
prommetrics.HttpCall(path, c.Request.Method, c.Writer.Status())
}
if resp := apiresp.GetGinApiResponse(c); resp != nil {
prommetrics.APICall(path, c.Request.Method, resp.ErrCode)
}
}
}
func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.Engine {
disCov.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
@@ -38,7 +53,7 @@ func newGinRouter(disCov discovery.SvcDiscoveryRegistry, config *Config) *gin.En
authRpc := rpcclient.NewAuth(disCov, config.Share.RpcRegisterName.Auth)
thirdRpc := rpcclient.NewThird(disCov, config.Share.RpcRegisterName.Third, config.API.Prometheus.GrafanaURL)
r.Use(gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
r.Use(prommetricsGin(), gin.Recovery(), mw.CorsHandler(), mw.GinParseOperationID(), GinParseToken(authRpc))
u := NewUserApi(*userRpc)
m := NewMessageApi(messageRpc, userRpc, config.Share.IMAdminUserID)
userRouterGroup := r.Group("/user")
+4
View File
@@ -75,6 +75,8 @@ type Client struct {
token string
hbCtx context.Context
hbCancel context.CancelFunc
subLock sync.Mutex
subUserIDs map[string]struct{}
}
// ResetClient updates the client's state with new connection and context information.
@@ -216,6 +218,8 @@ func (c *Client) handleMessage(message []byte) error {
resp, messageErr = c.longConnServer.UserLogout(ctx, binaryReq)
case WsSetBackgroundStatus:
resp, messageErr = c.setAppBackgroundStatus(ctx, binaryReq)
case WsSubUserOnlineStatus:
resp, messageErr = c.longConnServer.SubUserOnlineStatus(ctx, c, binaryReq)
default:
return fmt.Errorf(
"ReqIdentifier failed,sendID:%s,msgIncr:%s,reqIdentifier:%d",
+7 -2
View File
@@ -16,10 +16,10 @@ package msggateway
import (
"crypto/rand"
"github.com/stretchr/testify/assert"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"unsafe"
)
func mockRandom() []byte {
@@ -132,3 +132,8 @@ func BenchmarkDecompressWithSyncPool(b *testing.B) {
assert.Equal(b, nil, err)
}
}
func TestName(t *testing.T) {
t.Log(unsafe.Sizeof(Client{}))
}
+1
View File
@@ -43,6 +43,7 @@ const (
WSKickOnlineMsg = 2002
WsLogoutMsg = 2003
WsSetBackgroundStatus = 2004
WsSubUserOnlineStatus = 2005
WSDataError = 3001
)
+9 -1
View File
@@ -19,6 +19,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/msggateway"
"github.com/openimsdk/tools/discovery"
@@ -31,6 +32,10 @@ import (
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
s.LongConnServer.SetDiscoveryRegistry(disCov, config)
msggateway.RegisterMsgGatewayServer(server, s)
s.userRcp = rpcclient.NewUserRpcClient(disCov, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
if s.ready != nil {
return s.ready(s)
}
return nil
}
@@ -50,18 +55,21 @@ type Server struct {
LongConnServer LongConnServer
config *Config
pushTerminal map[int]struct{}
ready func(srv *Server) error
userRcp rpcclient.UserRpcClient
}
func (s *Server) SetLongConnServer(LongConnServer LongConnServer) {
s.LongConnServer = LongConnServer
}
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config) *Server {
func NewServer(rpcPort int, longConnServer LongConnServer, conf *Config, ready func(srv *Server) error) *Server {
s := &Server{
rpcPort: rpcPort,
LongConnServer: longConnServer,
pushTerminal: make(map[int]struct{}),
config: conf,
ready: ready,
}
s.pushTerminal[constant.IOSPlatformID] = struct{}{}
s.pushTerminal[constant.AndroidPlatformID] = struct{}{}
+15 -5
View File
@@ -17,6 +17,8 @@ package msggateway
import (
"context"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
"github.com/openimsdk/tools/db/redisutil"
"github.com/openimsdk/tools/utils/datautil"
"time"
@@ -26,6 +28,7 @@ import (
type Config struct {
MsgGateway config.MsgGateway
Share config.Share
RedisConfig config.Redis
WebhooksConfig config.Webhooks
Discovery config.Discovery
}
@@ -42,18 +45,25 @@ func Start(ctx context.Context, index int, conf *Config) error {
if err != nil {
return err
}
longServer, err := NewWsServer(
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
if err != nil {
return err
}
longServer := NewWsServer(
conf,
WithPort(wsPort),
WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
)
if err != nil {
return err
}
hubServer := NewServer(rpcPort, longServer, conf)
hubServer := NewServer(rpcPort, longServer, conf, func(srv *Server) error {
longServer.online = rpccache.NewOnlineCache(srv.userRcp, nil, rdb, longServer.subscriberUserOnlineStatusChanges)
return nil
})
go longServer.ChangeOnlineStatus(4)
netDone := make(chan error)
go func() {
err = hubServer.Start(ctx, index, conf)
+112
View File
@@ -0,0 +1,112 @@
package msggateway
import (
"context"
"crypto/md5"
"encoding/binary"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
pbuser "github.com/openimsdk/protocol/user"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/utils/datautil"
"math/rand"
"strconv"
"time"
)
func (ws *WsServer) ChangeOnlineStatus(concurrent int) {
if concurrent < 1 {
concurrent = 1
}
const renewalTime = cachekey.OnlineExpire / 3
//const renewalTime = time.Second * 10
renewalTicker := time.NewTicker(renewalTime)
requestChs := make([]chan *pbuser.SetUserOnlineStatusReq, concurrent)
changeStatus := make([][]UserState, concurrent)
for i := 0; i < concurrent; i++ {
requestChs[i] = make(chan *pbuser.SetUserOnlineStatusReq, 64)
changeStatus[i] = make([]UserState, 0, 100)
}
mergeTicker := time.NewTicker(time.Second)
local2pb := func(u UserState) *pbuser.UserOnlineStatus {
return &pbuser.UserOnlineStatus{
UserID: u.UserID,
Online: u.Online,
Offline: u.Offline,
}
}
rNum := rand.Uint64()
pushUserState := func(us ...UserState) {
for _, u := range us {
sum := md5.Sum([]byte(u.UserID))
i := (binary.BigEndian.Uint64(sum[:]) + rNum) % uint64(concurrent)
changeStatus[i] = append(changeStatus[i], u)
status := changeStatus[i]
if len(status) == cap(status) {
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
}
pushAllUserState := func() {
for i, status := range changeStatus {
if len(status) == 0 {
continue
}
req := &pbuser.SetUserOnlineStatusReq{
Status: datautil.Slice(status, local2pb),
}
changeStatus[i] = status[:0]
select {
case requestChs[i] <- req:
default:
log.ZError(context.Background(), "user online processing is too slow", nil)
}
}
}
opIdCtx := mcontext.SetOperationID(context.Background(), "r"+strconv.FormatUint(rNum, 10))
doRequest := func(req *pbuser.SetUserOnlineStatusReq) {
ctx, cancel := context.WithTimeout(opIdCtx, time.Second*5)
defer cancel()
if _, err := ws.userClient.Client.SetUserOnlineStatus(ctx, req); err != nil {
log.ZError(ctx, "update user online status", err)
}
}
for i := 0; i < concurrent; i++ {
go func(ch <-chan *pbuser.SetUserOnlineStatusReq) {
for req := range ch {
doRequest(req)
}
}(requestChs[i])
}
for {
select {
case <-mergeTicker.C:
pushAllUserState()
case now := <-renewalTicker.C:
deadline := now.Add(-cachekey.OnlineExpire / 3)
users := ws.clients.GetAllUserStatus(deadline, now)
log.ZDebug(context.Background(), "renewal ticker", "deadline", deadline, "nowtime", now, "num", len(users))
pushUserState(users...)
case state := <-ws.clients.UserState():
log.ZDebug(context.Background(), "OnlineCache user online change", "userID", state.UserID, "online", state.Online, "offline", state.Offline)
pushUserState(state)
}
}
}
+181
View File
@@ -0,0 +1,181 @@
package msggateway
import (
"context"
"encoding/json"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"github.com/openimsdk/tools/utils/idutil"
"google.golang.org/protobuf/proto"
"sync"
"time"
)
func (ws *WsServer) subscriberUserOnlineStatusChanges(ctx context.Context, userID string, platformIDs []int32) {
if ws.clients.RecvSubChange(userID, platformIDs) {
log.ZDebug(ctx, "gateway receive subscription message and go back online", "userID", userID, "platformIDs", platformIDs)
} else {
log.ZDebug(ctx, "gateway ignore user online status changes", "userID", userID, "platformIDs", platformIDs)
}
ws.pushUserIDOnlineStatus(ctx, userID, platformIDs)
}
func (ws *WsServer) SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error) {
var sub sdkws.SubUserOnlineStatus
if err := proto.Unmarshal(data.Data, &sub); err != nil {
return nil, err
}
ws.subscription.Sub(client, sub.SubscribeUserID, sub.UnsubscribeUserID)
var resp sdkws.SubUserOnlineStatusTips
if len(sub.SubscribeUserID) > 0 {
resp.Subscribers = make([]*sdkws.SubUserOnlineStatusElem, 0, len(sub.SubscribeUserID))
for _, userID := range sub.SubscribeUserID {
platformIDs, err := ws.online.GetUserOnlinePlatform(ctx, userID)
if err != nil {
return nil, err
}
resp.Subscribers = append(resp.Subscribers, &sdkws.SubUserOnlineStatusElem{
UserID: userID,
OnlinePlatformIDs: platformIDs,
})
}
}
return proto.Marshal(&resp)
}
type subClient struct {
clients map[string]*Client
}
func newSubscription() *Subscription {
return &Subscription{
userIDs: make(map[string]*subClient),
}
}
type Subscription struct {
lock sync.RWMutex
userIDs map[string]*subClient
}
func (s *Subscription) GetClient(userID string) []*Client {
s.lock.RLock()
defer s.lock.RUnlock()
cs, ok := s.userIDs[userID]
if !ok {
return nil
}
clients := make([]*Client, 0, len(cs.clients))
for _, client := range cs.clients {
clients = append(clients, client)
}
return clients
}
func (s *Subscription) DelClient(client *Client) {
client.subLock.Lock()
userIDs := datautil.Keys(client.subUserIDs)
for _, userID := range userIDs {
delete(client.subUserIDs, userID)
}
client.subLock.Unlock()
if len(userIDs) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for _, userID := range userIDs {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
}
func (s *Subscription) Sub(client *Client, addUserIDs, delUserIDs []string) {
if len(addUserIDs)+len(delUserIDs) == 0 {
return
}
var (
del = make(map[string]struct{})
add = make(map[string]struct{})
)
client.subLock.Lock()
for _, userID := range delUserIDs {
if _, ok := client.subUserIDs[userID]; !ok {
continue
}
del[userID] = struct{}{}
delete(client.subUserIDs, userID)
}
for _, userID := range addUserIDs {
delete(del, userID)
if _, ok := client.subUserIDs[userID]; ok {
continue
}
client.subUserIDs[userID] = struct{}{}
}
client.subLock.Unlock()
if len(del)+len(add) == 0 {
return
}
addr := client.ctx.GetRemoteAddr()
s.lock.Lock()
defer s.lock.Unlock()
for userID := range del {
sub, ok := s.userIDs[userID]
if !ok {
continue
}
delete(sub.clients, addr)
if len(sub.clients) == 0 {
delete(s.userIDs, userID)
}
}
for userID := range add {
sub, ok := s.userIDs[userID]
if !ok {
sub = &subClient{clients: make(map[string]*Client)}
s.userIDs[userID] = sub
}
sub.clients[addr] = client
}
}
func (ws *WsServer) pushUserIDOnlineStatus(ctx context.Context, userID string, platformIDs []int32) {
clients := ws.subscription.GetClient(userID)
if len(clients) == 0 {
return
}
msgContent, err := json.Marshal(platformIDs)
if err != nil {
log.ZError(ctx, "pushUserIDOnlineStatus json.Marshal", err)
return
}
now := time.Now().UnixMilli()
msgID := idutil.GetMsgIDByMD5(userID)
msg := &sdkws.MsgData{
SendID: userID,
ClientMsgID: msgID,
ServerMsgID: msgID,
SenderPlatformID: constant.AdminPlatformID,
SessionType: constant.NotificationChatType,
ContentType: constant.UserSubscribeOnlineStatusNotification,
Content: msgContent,
SendTime: now,
CreateTime: now,
}
for _, client := range clients {
msg.RecvID = client.UserID
if err := client.PushMessage(ctx, msg); err != nil {
log.ZError(ctx, "UserSubscribeOnlineStatusNotification push failed", err, "userID", client.UserID, "platformID", client.PlatformID, "changeUserID", userID, "content", msgContent)
}
}
}
+160 -110
View File
@@ -1,135 +1,185 @@
// Copyright © 2023 OpenIM. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package msggateway
import (
"context"
"sync"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/utils/datautil"
"sync"
"time"
)
type UserMap struct {
m sync.Map
type UserMap interface {
GetAll(userID string) ([]*Client, bool)
Get(userID string, platformID int) ([]*Client, bool, bool)
Set(userID string, v *Client)
DeleteClients(userID string, clients []*Client) (isDeleteUser bool)
UserState() <-chan UserState
GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState
RecvSubChange(userID string, platformIDs []int32) bool
}
func newUserMap() *UserMap {
return &UserMap{}
type UserState struct {
UserID string
Online []int32
Offline []int32
}
func (u *UserMap) GetAll(key string) ([]*Client, bool) {
allClients, ok := u.m.Load(key)
type UserPlatform struct {
Time time.Time
Clients []*Client
}
func (u *UserPlatform) PlatformIDs() []int32 {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make([]int32, 0, len(u.Clients))
for _, client := range u.Clients {
platformIDs = append(platformIDs, int32(client.PlatformID))
}
return platformIDs
}
func (u *UserPlatform) PlatformIDSet() map[int32]struct{} {
if len(u.Clients) == 0 {
return nil
}
platformIDs := make(map[int32]struct{})
for _, client := range u.Clients {
platformIDs[int32(client.PlatformID)] = struct{}{}
}
return platformIDs
}
func newUserMap() UserMap {
return &userMap{
data: make(map[string]*UserPlatform),
ch: make(chan UserState, 10000),
}
}
type userMap struct {
lock sync.RWMutex
data map[string]*UserPlatform
ch chan UserState
}
func (u *userMap) RecvSubChange(userID string, platformIDs []int32) bool {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return false
}
localPlatformIDs := result.PlatformIDSet()
for _, platformID := range platformIDs {
delete(localPlatformIDs, platformID)
}
if len(localPlatformIDs) == 0 {
return false
}
u.push(userID, result, nil)
return true
}
func (u *userMap) push(userID string, userPlatform *UserPlatform, offline []int32) bool {
select {
case u.ch <- UserState{UserID: userID, Online: userPlatform.PlatformIDs(), Offline: offline}:
userPlatform.Time = time.Now()
return true
default:
return false
}
}
func (u *userMap) GetAll(userID string) ([]*Client, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false
}
return result.Clients, true
}
func (u *userMap) Get(userID string, platformID int) ([]*Client, bool, bool) {
u.lock.RLock()
defer u.lock.RUnlock()
result, ok := u.data[userID]
if !ok {
return nil, false, false
}
var clients []*Client
for _, client := range result.Clients {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
return clients, true, len(clients) > 0
}
func (u *userMap) Set(userID string, client *Client) {
u.lock.Lock()
defer u.lock.Unlock()
result, ok := u.data[userID]
if ok {
return allClients.([]*Client), ok
}
return nil, ok
}
func (u *UserMap) Get(key string, platformID int) ([]*Client, bool, bool) {
allClients, userExisted := u.m.Load(key)
if userExisted {
var clients []*Client
for _, client := range allClients.([]*Client) {
if client.PlatformID == platformID {
clients = append(clients, client)
}
}
if len(clients) > 0 {
return clients, userExisted, true
}
return clients, userExisted, false
}
return nil, userExisted, false
}
// Set adds a client to the map.
func (u *UserMap) Set(key string, v *Client) {
allClients, existed := u.m.Load(key)
if existed {
log.ZDebug(context.Background(), "Set existed", "user_id", key, "client_user_id", v.UserID)
oldClients := allClients.([]*Client)
oldClients = append(oldClients, v)
u.m.Store(key, oldClients)
result.Clients = append(result.Clients, client)
} else {
log.ZDebug(context.Background(), "Set not existed", "user_id", key, "client_user_id", v.UserID)
var clients []*Client
clients = append(clients, v)
u.m.Store(key, clients)
result = &UserPlatform{
Clients: []*Client{client},
}
u.data[userID] = result
}
u.push(client.UserID, result, nil)
}
func (u *UserMap) delete(key string, connRemoteAddr string) (isDeleteUser bool) {
// Attempt to load the clients associated with the key.
allClients, existed := u.m.Load(key)
if !existed {
// Return false immediately if the key does not exist.
func (u *userMap) DeleteClients(userID string, clients []*Client) (isDeleteUser bool) {
if len(clients) == 0 {
return false
}
// Convert allClients to a slice of *Client.
oldClients := allClients.([]*Client)
var remainingClients []*Client
for _, client := range oldClients {
// Keep clients that do not match the connRemoteAddr.
if client.ctx.GetRemoteAddr() != connRemoteAddr {
remainingClients = append(remainingClients, client)
}
u.lock.Lock()
defer u.lock.Unlock()
result, ok := u.data[userID]
if !ok {
return false
}
// If no clients remain after filtering, delete the key from the map.
if len(remainingClients) == 0 {
u.m.Delete(key)
return true
}
// Otherwise, update the key with the remaining clients.
u.m.Store(key, remainingClients)
return false
}
func (u *UserMap) deleteClients(key string, clients []*Client) (isDeleteUser bool) {
m := datautil.SliceToMapAny(clients, func(c *Client) (string, struct{}) {
return c.ctx.GetRemoteAddr(), struct{}{}
offline := make([]int32, 0, len(clients))
deleteAddr := datautil.SliceSetAny(clients, func(client *Client) string {
return client.ctx.GetRemoteAddr()
})
allClients, existed := u.m.Load(key)
if !existed {
// If the key doesn't exist, return false.
return false
}
// Filter out clients that are in the deleteMap.
oldClients := allClients.([]*Client)
var remainingClients []*Client
for _, client := range oldClients {
if _, shouldBeDeleted := m[client.ctx.GetRemoteAddr()]; !shouldBeDeleted {
remainingClients = append(remainingClients, client)
tmp := result.Clients
result.Clients = result.Clients[:0]
for _, client := range tmp {
if _, delCli := deleteAddr[client.ctx.GetRemoteAddr()]; delCli {
offline = append(offline, int32(client.PlatformID))
} else {
result.Clients = append(result.Clients, client)
}
}
// Update or delete the key based on the remaining clients.
if len(remainingClients) == 0 {
u.m.Delete(key)
return true
defer u.push(userID, result, offline)
if len(result.Clients) > 0 {
return false
}
u.m.Store(key, remainingClients)
return false
delete(u.data, userID)
return true
}
func (u *UserMap) DeleteAll(key string) {
u.m.Delete(key)
func (u *userMap) GetAllUserStatus(deadline time.Time, nowtime time.Time) []UserState {
u.lock.RLock()
defer u.lock.RUnlock()
result := make([]UserState, 0, len(u.data))
for userID, userPlatform := range u.data {
if userPlatform.Time.Before(deadline) {
continue
}
userPlatform.Time = nowtime
online := make([]int32, 0, len(userPlatform.Clients))
for _, client := range userPlatform.Clients {
online = append(online, int32(client.PlatformID))
}
result = append(result, UserState{UserID: userID, Online: online})
}
return result
}
func (u *userMap) UserState() <-chan UserState {
return u.ch
}
+34 -24
View File
@@ -18,6 +18,7 @@ import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
pbAuth "github.com/openimsdk/protocol/auth"
"github.com/openimsdk/tools/mcontext"
"net/http"
@@ -48,6 +49,7 @@ type LongConnServer interface {
KickUserConn(client *Client) error
UnRegister(c *Client)
SetKickHandlerInfo(i *kickHandler)
SubUserOnlineStatus(ctx context.Context, client *Client, data *Req) ([]byte, error)
Compressor
Encoder
MessageHandler
@@ -60,7 +62,9 @@ type WsServer struct {
registerChan chan *Client
unregisterChan chan *Client
kickHandlerChan chan *kickHandler
clients *UserMap
clients UserMap
online *rpccache.OnlineCache
subscription *Subscription
clientPool sync.Pool
onlineUserNum atomic.Int64
onlineUserConnNum atomic.Int64
@@ -90,18 +94,18 @@ func (ws *WsServer) SetDiscoveryRegistry(disCov discovery.SvcDiscoveryRegistry,
ws.disCov = disCov
}
func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
if err != nil {
log.ZWarn(ctx, "SetUserStatus err", err)
}
switch status {
case constant.Online:
ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
case constant.Offline:
ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
}
}
//func (ws *WsServer) SetUserOnlineStatus(ctx context.Context, client *Client, status int32) {
// err := ws.userClient.SetUserStatus(ctx, client.UserID, status, client.PlatformID)
// if err != nil {
// log.ZWarn(ctx, "SetUserStatus err", err)
// }
// switch status {
// case constant.Online:
// ws.webhookAfterUserOnline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOnline, client.UserID, client.PlatformID, client.IsBackground, client.ctx.GetConnID())
// case constant.Offline:
// ws.webhookAfterUserOffline(ctx, &ws.msgGatewayConfig.WebhooksConfig.AfterUserOffline, client.UserID, client.PlatformID, client.ctx.GetConnID())
// }
//}
func (ws *WsServer) UnRegister(c *Client) {
ws.unregisterChan <- c
@@ -119,11 +123,13 @@ func (ws *WsServer) GetUserPlatformCons(userID string, platform int) ([]*Client,
return ws.clients.Get(userID, platform)
}
func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
var config configs
for _, o := range opts {
o(&config)
}
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
v := validator.New()
return &WsServer{
msgGatewayConfig: msgGatewayConfig,
@@ -141,10 +147,11 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) (*WsServer, error) {
kickHandlerChan: make(chan *kickHandler, 1000),
validate: v,
clients: newUserMap(),
subscription: newSubscription(),
Compressor: NewGzipCompressor(),
Encoder: NewGobEncoder(),
webhookClient: webhook.NewWebhookClient(msgGatewayConfig.WebhooksConfig.URL),
}, nil
}
}
func (ws *WsServer) Run(done chan error) error {
@@ -278,11 +285,11 @@ func (ws *WsServer) registerClient(client *Client) {
}()
}
wg.Add(1)
go func() {
defer wg.Done()
ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
}()
//wg.Add(1)
//go func() {
// defer wg.Done()
// ws.SetUserOnlineStatus(client.ctx, client, constant.Online)
//}()
wg.Wait()
@@ -309,7 +316,7 @@ func getRemoteAdders(client []*Client) string {
}
func (ws *WsServer) KickUserConn(client *Client) error {
ws.clients.deleteClients(client.UserID, []*Client{client})
ws.clients.DeleteClients(client.UserID, []*Client{client})
return client.KickOnlineMessage()
}
@@ -325,7 +332,7 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
if !clientOK {
return
}
ws.clients.deleteClients(newClient.UserID, oldClients)
ws.clients.DeleteClients(newClient.UserID, oldClients)
for _, c := range oldClients {
err := c.KickOnlineMessage()
if err != nil {
@@ -345,13 +352,16 @@ func (ws *WsServer) multiTerminalLoginChecker(clientOK bool, oldClients []*Clien
func (ws *WsServer) unregisterClient(client *Client) {
defer ws.clientPool.Put(client)
isDeleteUser := ws.clients.delete(client.UserID, client.ctx.GetRemoteAddr())
isDeleteUser := ws.clients.DeleteClients(client.UserID, []*Client{client})
if isDeleteUser {
ws.onlineUserNum.Add(-1)
prommetrics.OnlineUserGauge.Dec()
}
ws.onlineUserConnNum.Add(-1)
ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
client.subLock.Lock()
clear(client.subUserIDs)
client.subLock.Unlock()
//ws.SetUserOnlineStatus(client.ctx, client, constant.Offline)
log.ZInfo(client.ctx, "user offline", "close reason", client.closedErr, "online user Num",
ws.onlineUserNum.Load(), "online user conn Num",
ws.onlineUserConnNum.Load(),
+14 -14
View File
@@ -17,6 +17,7 @@ package msgtransfer
import (
"context"
"fmt"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
"github.com/openimsdk/tools/db/mongoutil"
@@ -29,16 +30,12 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mw"
"github.com/openimsdk/tools/system/program"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
@@ -82,12 +79,21 @@ func Start(ctx context.Context, index int, config *Config) error {
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
msgModel := redis.NewMsgCache(rdb)
seqModel := redis.NewSeqCache(rdb)
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
if err != nil {
return err
}
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil {
return err
}
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
return err
}
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
@@ -130,14 +136,8 @@ func (m *MsgTransfer) Start(index int, config *Config) error {
netDone <- struct{}{}
return
}
proreg := prometheus.NewRegistry()
proreg.MustRegister(
collectors.NewGoCollector(),
)
proreg.MustRegister(prommetrics.GetGrpcCusMetrics("Transfer", &config.Share)...)
http.Handle("/metrics", promhttp.HandlerFor(proreg, promhttp.HandlerOpts{Registry: proreg}))
err = http.ListenAndServe(fmt.Sprintf(":%d", prometheusPort), nil)
if err != nil && err != http.ErrServerClosed {
if err := prommetrics.TransferInit(prometheusPort); err != nil && err != http.ErrServerClosed {
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
netDone <- struct{}{}
}
+40 -5
View File
@@ -28,6 +28,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
"github.com/openimsdk/protocol/constant"
pbchat "github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/msggateway"
pbpush "github.com/openimsdk/protocol/push"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/tools/discovery"
@@ -45,6 +46,7 @@ type ConsumerHandler struct {
pushConsumerGroup *kafka.MConsumerGroup
offlinePusher offlinepush.OfflinePusher
onlinePusher OnlinePusher
onlineCache *rpccache.OnlineCache
groupLocalCache *rpccache.GroupLocalCache
conversationLocalCache *rpccache.ConversationLocalCache
msgRpcClient rpcclient.MessageRpcClient
@@ -63,16 +65,17 @@ func NewConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher,
if err != nil {
return nil, err
}
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
consumerHandler.offlinePusher = offlinePusher
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
consumerHandler.groupRpcClient = rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.msgRpcClient = rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
consumerHandler.conversationRpcClient = rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient,
&config.LocalCacheConfig, rdb)
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationRpcClient, &config.LocalCacheConfig, rdb)
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
consumerHandler.config = config
consumerHandler.onlineCache = rpccache.NewOnlineCache(userRpcClient, consumerHandler.groupLocalCache, rdb, nil)
return &consumerHandler, nil
}
@@ -125,12 +128,12 @@ func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim s
}
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) error {
func (c *ConsumerHandler) Push2User(ctx context.Context, userIDs []string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get msg from msg_transfer And push msg", "userIDs", userIDs, "msg", msg.String())
if err := c.webhookBeforeOnlinePush(ctx, &c.config.WebhooksConfig.BeforeOnlinePush, userIDs, msg); err != nil {
return err
}
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, userIDs)
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, userIDs)
if err != nil {
return err
}
@@ -179,6 +182,38 @@ func (c *ConsumerHandler) shouldPushOffline(_ context.Context, msg *sdkws.MsgDat
return true
}
func (c *ConsumerHandler) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) ([]*msggateway.SingleMsgToUserResults, error) {
var (
onlineUserIDs []string
offlineUserIDs []string
)
for _, userID := range pushToUserIDs {
online, err := c.onlineCache.GetUserOnline(ctx, userID)
if err != nil {
return nil, err
}
if online {
onlineUserIDs = append(onlineUserIDs, userID)
} else {
offlineUserIDs = append(offlineUserIDs, userID)
}
}
var result []*msggateway.SingleMsgToUserResults
if len(onlineUserIDs) > 0 {
var err error
result, err = c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return nil, err
}
}
for _, userID := range offlineUserIDs {
result = append(result, &msggateway.SingleMsgToUserResults{
UserID: userID,
})
}
return result, nil
}
func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *sdkws.MsgData) (err error) {
log.ZDebug(ctx, "Get group msg from msg_transfer and push msg", "msg", msg.String(), "groupID", groupID)
var pushToUserIDs []string
@@ -192,7 +227,7 @@ func (c *ConsumerHandler) Push2Group(ctx context.Context, groupID string, msg *s
return err
}
wsResults, err := c.onlinePusher.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
wsResults, err := c.GetConnsAndOnlinePush(ctx, msg, pushToUserIDs)
if err != nil {
return err
}
+11 -2
View File
@@ -86,12 +86,21 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
return err
}
msgModel := redis.NewMsgCache(rdb)
seqModel := redis.NewSeqCache(rdb)
conversationClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
groupRpcClient := rpcclient.NewGroupRpcClient(client, config.Share.RpcRegisterName.Group)
friendRpcClient := rpcclient.NewFriendRpcClient(client, config.Share.RpcRegisterName.Friend)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqModel, &config.KafkaConfig)
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
if err != nil {
return err
}
seqConversationCache := redis.NewSeqConversationCacheRedis(rdb, seqConversation)
seqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
if err != nil {
return err
}
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
if err != nil {
return err
}
+51 -6
View File
@@ -19,13 +19,17 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"path"
"strconv"
"time"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
"go.mongodb.org/mongo-driver/mongo"
"github.com/google/uuid"
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
"github.com/openimsdk/protocol/sdkws"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/errs"
"github.com/openimsdk/tools/log"
@@ -283,6 +287,52 @@ func (t *thirdServer) apiAddress(prefix, name string) string {
return prefix + name
}
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
var conf config.Third
expireTime := time.UnixMilli(req.ExpireTime)
findPagination := &sdkws.RequestPagination{
PageNumber: 1,
ShowNumber: 1000,
}
for {
total, models, err := t.s3dataBase.FindByExpires(ctx, expireTime, findPagination)
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
return nil, errs.Wrap(err)
}
needDelObjectKeys := make([]string, 0)
for _, model := range models {
needDelObjectKeys = append(needDelObjectKeys, model.Key)
}
needDelObjectKeys = datautil.Distinct(needDelObjectKeys)
for _, key := range needDelObjectKeys {
count, err := t.s3dataBase.FindNotDelByS3(ctx, key, expireTime)
if err != nil && errs.Unwrap(err) != mongo.ErrNoDocuments {
return nil, errs.Wrap(err)
}
if int(count) < 1 && t.minio != nil {
thumbnailKey, err := t.getMinioImageThumbnailKey(ctx, key)
if err != nil {
return nil, errs.Wrap(err)
}
t.s3dataBase.DeleteObject(ctx, thumbnailKey)
t.s3dataBase.DelS3Key(ctx, conf.Object.Enable, needDelObjectKeys...)
t.s3dataBase.DeleteObject(ctx, key)
}
}
for _, model := range models {
err := t.s3dataBase.DeleteSpecifiedData(ctx, model.Engine, model.Name)
if err != nil {
return nil, errs.Wrap(err)
}
}
if total < int64(findPagination.ShowNumber) {
break
}
}
return &third.DeleteOutdatedDataResp{}, nil
}
type FormDataMate struct {
Name string `json:"name"`
Size int64 `json:"size"`
@@ -290,8 +340,3 @@ type FormDataMate struct {
Group string `json:"group"`
Key string `json:"key"`
}
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
//TODO implement me
panic("implement me")
}
+13 -3
View File
@@ -31,9 +31,9 @@ import (
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/s3"
"github.com/openimsdk/tools/s3/cos"
"github.com/openimsdk/tools/s3/kodo"
"github.com/openimsdk/tools/s3/minio"
"github.com/openimsdk/tools/s3/oss"
"github.com/openimsdk/tools/s3/kodo"
"google.golang.org/grpc"
)
@@ -43,6 +43,7 @@ type thirdServer struct {
userRpcClient rpcclient.UserRpcClient
defaultExpire time.Duration
config *Config
minio *minio.Minio
}
type Config struct {
@@ -75,10 +76,14 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
}
// Select the oss method according to the profile policy
enable := config.RpcConfig.Object.Enable
var o s3.Interface
var (
o s3.Interface
minioCli *minio.Minio
)
switch enable {
case "minio":
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
minioCli, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
o = minioCli
case "cos":
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
case "oss":
@@ -98,10 +103,15 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
s3dataBase: controller.NewS3Database(rdb, o, s3db),
defaultExpire: time.Hour * 24 * 7,
config: config,
minio: minioCli,
})
return nil
}
func (t *thirdServer) getMinioImageThumbnailKey(ctx context.Context, name string) (string, error) {
return t.minio.GetImageThumbnailKey(ctx, name)
}
func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) {
err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime)
if err != nil {
+122
View File
@@ -0,0 +1,122 @@
package user
import (
"context"
"github.com/openimsdk/protocol/constant"
"github.com/openimsdk/protocol/sdkws"
pbuser "github.com/openimsdk/protocol/user"
)
func (s *userServer) getUserOnlineStatus(ctx context.Context, userID string) (*pbuser.OnlineStatus, error) {
platformIDs, err := s.online.GetOnline(ctx, userID)
if err != nil {
return nil, err
}
status := pbuser.OnlineStatus{
UserID: userID,
PlatformIDs: platformIDs,
}
if len(platformIDs) > 0 {
status.Status = constant.Online
} else {
status.Status = constant.Offline
}
return &status, nil
}
func (s *userServer) getUsersOnlineStatus(ctx context.Context, userIDs []string) ([]*pbuser.OnlineStatus, error) {
res := make([]*pbuser.OnlineStatus, 0, len(userIDs))
for _, userID := range userIDs {
status, err := s.getUserOnlineStatus(ctx, userID)
if err != nil {
return nil, err
}
res = append(res, status)
}
return res, nil
}
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (*pbuser.SubscribeOrCancelUsersStatusResp, error) {
if req.Genre == constant.SubscriberUser {
err := s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err := s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (*pbuser.GetUserStatusResp, error) {
res, err := s.getUsersOnlineStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: res}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (*pbuser.SetUserStatusResp, error) {
var (
online []int32
offline []int32
)
switch req.Status {
case constant.Online:
online = []int32{req.PlatformID}
case constant.Offline:
online = []int32{req.PlatformID}
}
if err := s.online.SetUserOnline(ctx, req.UserID, online, offline); err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context, req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.getUsersOnlineStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
for _, status := range req.Status {
if err := s.online.SetUserOnline(ctx, status.UserID, status.Online, status.Offline); err != nil {
return nil, err
}
}
return &pbuser.SetUserOnlineStatusResp{}, nil
}
+3 -75
View File
@@ -19,6 +19,7 @@ import (
"errors"
"github.com/openimsdk/open-im-server/v3/internal/rpc/friend"
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
@@ -50,6 +51,7 @@ import (
)
type userServer struct {
online cache.OnlineCache
db controller.UserDatabase
friendNotificationSender *friend.FriendNotificationSender
userNotificationSender *UserNotificationSender
@@ -98,6 +100,7 @@ func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegi
msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
localcache.InitLocalCache(&config.LocalCacheConfig)
u := &userServer{
online: redis.NewUserOnline(rdb),
db: database,
RegisterCenter: client,
friendRpcClient: &friendRpcClient,
@@ -329,76 +332,6 @@ func (s *userServer) GetAllUserID(ctx context.Context, req *pbuser.GetAllUserIDR
return &pbuser.GetAllUserIDResp{Total: int32(total), UserIDs: userIDs}, nil
}
// SubscribeOrCancelUsersStatus Subscribe online or cancel online users.
func (s *userServer) SubscribeOrCancelUsersStatus(ctx context.Context, req *pbuser.SubscribeOrCancelUsersStatusReq) (resp *pbuser.SubscribeOrCancelUsersStatusResp, err error) {
if req.Genre == constant.SubscriberUser {
err = s.db.SubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
var status []*pbuser.OnlineStatus
status, err = s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.SubscribeOrCancelUsersStatusResp{StatusList: status}, nil
} else if req.Genre == constant.Unsubscribe {
err = s.db.UnsubscribeUsersStatus(ctx, req.UserID, req.UserIDs)
if err != nil {
return nil, err
}
}
return &pbuser.SubscribeOrCancelUsersStatusResp{}, nil
}
// GetUserStatus Get the online status of the user.
func (s *userServer) GetUserStatus(ctx context.Context, req *pbuser.GetUserStatusReq) (resp *pbuser.GetUserStatusResp,
err error) {
onlineStatusList, err := s.db.GetUserStatus(ctx, req.UserIDs)
if err != nil {
return nil, err
}
return &pbuser.GetUserStatusResp{StatusList: onlineStatusList}, nil
}
// SetUserStatus Synchronize user's online status.
func (s *userServer) SetUserStatus(ctx context.Context, req *pbuser.SetUserStatusReq) (resp *pbuser.SetUserStatusResp,
err error) {
err = s.db.SetUserStatus(ctx, req.UserID, req.Status, req.PlatformID)
if err != nil {
return nil, err
}
list, err := s.db.GetSubscribedList(ctx, req.UserID)
if err != nil {
return nil, err
}
for _, userID := range list {
tips := &sdkws.UserStatusChangeTips{
FromUserID: req.UserID,
ToUserID: userID,
Status: req.Status,
PlatformID: req.PlatformID,
}
s.userNotificationSender.UserStatusChangeNotification(ctx, tips)
}
return &pbuser.SetUserStatusResp{}, nil
}
// GetSubscribeUsersStatus Get the online status of subscribers.
func (s *userServer) GetSubscribeUsersStatus(ctx context.Context,
req *pbuser.GetSubscribeUsersStatusReq) (*pbuser.GetSubscribeUsersStatusResp, error) {
userList, err := s.db.GetAllSubscribeList(ctx, req.UserID)
if err != nil {
return nil, err
}
onlineStatusList, err := s.db.GetUserStatus(ctx, userList)
if err != nil {
return nil, err
}
return &pbuser.GetSubscribeUsersStatusResp{StatusList: onlineStatusList}, nil
}
// ProcessUserCommandAdd user general function add.
func (s *userServer) ProcessUserCommandAdd(ctx context.Context, req *pbuser.ProcessUserCommandAddReq) (*pbuser.ProcessUserCommandAddResp, error) {
err := authverify.CheckAccessV3(ctx, req.UserID, s.config.Share.IMAdminUserID)
@@ -736,8 +669,3 @@ func (s *userServer) SortQuery(ctx context.Context, req *pbuser.SortQueryReq) (*
}
return &pbuser.SortQueryResp{Users: convert.UsersDB2Pb(users)}, nil
}
func (s *userServer) SetUserOnlineStatus(ctx context.Context, req *pbuser.SetUserOnlineStatusReq) (*pbuser.SetUserOnlineStatusResp, error) {
//TODO implement me
panic("implement me")
}
+25 -3
View File
@@ -20,6 +20,7 @@ import (
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discoveryregister"
"github.com/openimsdk/protocol/msg"
"github.com/openimsdk/protocol/third"
"github.com/openimsdk/tools/mcontext"
"github.com/openimsdk/tools/mw"
"google.golang.org/grpc"
@@ -39,7 +40,7 @@ type CronTaskConfig struct {
}
func Start(ctx context.Context, config *CronTaskConfig) error {
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime, "msgDestructTime", config.CronTask.RetainChatRecords)
log.CInfo(ctx, "CRON-TASK server is initializing", "chatRecordsClearTime", config.CronTask.CronExecuteTime, "msgDestructTime", config.CronTask.RetainChatRecords)
if config.CronTask.RetainChatRecords < 1 {
return errs.New("msg destruct time must be greater than 1").Wrap()
}
@@ -66,10 +67,31 @@ func Start(ctx context.Context, config *CronTaskConfig) error {
}
log.ZInfo(ctx, "cron clear chat records success", "deltime", deltime, "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.ChatRecordsClearTime, clearFunc); err != nil {
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, clearFunc); err != nil {
return errs.Wrap(err)
}
log.ZInfo(ctx, "start cron task", "chatRecordsClearTime", config.CronTask.ChatRecordsClearTime)
tConn, err := client.GetConn(ctx, config.Share.RpcRegisterName.Third)
if err != nil {
return err
}
thirdClient := third.NewThirdClient(tConn)
deleteFunc := func() {
now := time.Now()
deleteTime := now.Add(-time.Hour * 24 * time.Duration(config.CronTask.FileExpireTime))
ctx := mcontext.SetOperationID(ctx, fmt.Sprintf("cron_%d_%d", os.Getpid(), deleteTime.UnixMilli()))
log.ZInfo(ctx, "deleteoutDatedData ", "deletetime", deleteTime, "timestamp", deleteTime.UnixMilli())
if _, err := thirdClient.DeleteOutdatedData(ctx, &third.DeleteOutdatedDataReq{ExpireTime: deleteTime.UnixMilli()}); err != nil {
log.ZError(ctx, "cron deleteoutDatedData failed", err, "deleteTime", deleteTime, "cont", time.Since(now))
return
}
log.ZInfo(ctx, "cron deleteoutDatedData success", "deltime", deleteTime, "cont", time.Since(now))
}
if _, err := crontab.AddFunc(config.CronTask.CronExecuteTime, deleteFunc); err != nil {
return errs.Wrap(err)
}
log.ZInfo(ctx, "start cron task", "CronExecuteTime", config.CronTask.CronExecuteTime)
crontab.Start()
<-ctx.Done()
return nil