mirror of
https://github.com/openimsdk/open-im-server.git
synced 2026-04-28 14:29:19 +08:00
feat: optimize code and support running in single process mode (#3142)
* pb * fix: Modifying other fields while setting IsPrivateChat does not take effect * fix: quote message error revoke * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * refactoring scheduled tasks * upgrading pkg tools * fix * fix * optimize log output * feat: support GetLastMessage * feat: support GetLastMessage * feat: s3 switch * feat: s3 switch * fix: GetUsersOnline * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: SendBusinessNotification supported configuration parameters * feat: seq conversion failed without exiting * monolithic * fix: DeleteDoc crash * fix: DeleteDoc crash * fix: monolithic * fix: monolithic * fix: fill send time * fix: fill send time * fix: crash caused by withdrawing messages from users who have left the group * fix: mq * fix: mq * fix: user msg timestamp * fix: mq * 1 * 1 * 1 * 1 * 1 * 1 * 1 * seq read config * seq read config * 1 * 1 * fix: the source message of the reference is withdrawn, and the referenced message is deleted * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1 * 1
This commit is contained in:
@@ -30,16 +30,14 @@ type ConfigManager struct {
|
||||
client *clientv3.Client
|
||||
|
||||
configPath string
|
||||
runtimeEnv string
|
||||
}
|
||||
|
||||
func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string, runtimeEnv string) *ConfigManager {
|
||||
func NewConfigManager(IMAdminUserID []string, cfg *config.AllConfig, client *clientv3.Client, configPath string) *ConfigManager {
|
||||
cm := &ConfigManager{
|
||||
imAdminUserID: IMAdminUserID,
|
||||
config: cfg,
|
||||
client: client,
|
||||
configPath: configPath,
|
||||
runtimeEnv: runtimeEnv,
|
||||
}
|
||||
return cm
|
||||
}
|
||||
@@ -73,7 +71,7 @@ func (cm *ConfigManager) GetConfig(c *gin.Context) {
|
||||
func (cm *ConfigManager) GetConfigList(c *gin.Context) {
|
||||
var resp apistruct.GetConfigListResp
|
||||
resp.ConfigNames = cm.config.GetConfigNames()
|
||||
resp.Environment = runtimeenv.PrintRuntimeEnvironment()
|
||||
resp.Environment = runtimeenv.RuntimeEnvironment()
|
||||
resp.Version = version.Version
|
||||
|
||||
apiresp.GinSuccess(c, resp)
|
||||
@@ -209,13 +207,7 @@ func (cm *ConfigManager) resetConfig(c *gin.Context, checkChange bool, ops ...cl
|
||||
|
||||
changedKeys := make([]string, 0, len(configMap))
|
||||
for k, v := range configMap {
|
||||
err := config.Load(
|
||||
cm.configPath,
|
||||
k,
|
||||
config.EnvPrefixMap[k],
|
||||
cm.runtimeEnv,
|
||||
v.new,
|
||||
)
|
||||
err := config.Load(cm.configPath, k, config.EnvPrefixMap[k], v.new)
|
||||
if err != nil {
|
||||
log.ZError(c, "load config failed", err)
|
||||
continue
|
||||
|
||||
+48
-120
@@ -20,157 +20,85 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
*conf.AllConfig
|
||||
conf.AllConfig
|
||||
|
||||
RuntimeEnv string
|
||||
ConfigPath string
|
||||
ConfigPath conf.Path
|
||||
Index conf.Index
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, index)
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
|
||||
apiPort, err := datautil.GetElemByIndex(config.API.Api.Ports, int(config.Index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
client, err := kdisc.NewDiscoveryRegister(&config.Discovery, config.RuntimeEnv, []string{
|
||||
config.Discovery.RpcService.MessageGateway,
|
||||
})
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getAutoPort := func() (net.Listener, int, error) {
|
||||
registerAddr := net.JoinHostPort(registerIP, "0")
|
||||
listener, err := net.Listen("tcp", registerAddr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
|
||||
}
|
||||
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
return listener, port, nil
|
||||
}
|
||||
|
||||
if config.API.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
|
||||
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
|
||||
}
|
||||
|
||||
router, err := newGinRouter(ctx, client, config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.API.Prometheus.Enable {
|
||||
var (
|
||||
listener net.Listener
|
||||
)
|
||||
|
||||
if config.API.Prometheus.AutoSetPorts {
|
||||
listener, prometheusPort, err = getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
|
||||
_, err = etcdClient.Put(ctx, prommetrics.BuildDiscoveryKey(prommetrics.APIKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "etcd put err")
|
||||
}
|
||||
} else {
|
||||
prometheusPort, err = datautil.GetElemByIndex(config.API.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
|
||||
}
|
||||
apiCtx, apiCancel := context.WithCancelCause(context.Background())
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
httpServer := &http.Server{
|
||||
Handler: router,
|
||||
Addr: net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort)),
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := prommetrics.ApiInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("api prometheus start err: %d", prometheusPort))
|
||||
netDone <- struct{}{}
|
||||
defer close(done)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
apiCancel(fmt.Errorf("recv ctx %w", context.Cause(ctx)))
|
||||
case <-apiCtx.Done():
|
||||
}
|
||||
log.ZDebug(ctx, "api server is shutting down")
|
||||
if err := httpServer.Shutdown(context.Background()); err != nil {
|
||||
log.ZWarn(ctx, "api server shutdown err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
address := net.JoinHostPort(network.GetListenIP(config.API.Api.ListenIP), strconv.Itoa(apiPort))
|
||||
|
||||
server := http.Server{Addr: address, Handler: router}
|
||||
log.CInfo(ctx, "API server is initializing", "runtimeEnv", config.RuntimeEnv, "address", address, "apiPort", apiPort, "prometheusPort", prometheusPort)
|
||||
go func() {
|
||||
err = server.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, fmt.Sprintf("api start err: %s", server.Addr))
|
||||
netDone <- struct{}{}
|
||||
log.CInfo(ctx, "api server is init", "runtimeEnv", runtimeenv.RuntimeEnvironment(), "address", httpServer.Addr, "apiPort", apiPort)
|
||||
err := httpServer.ListenAndServe()
|
||||
if err == nil {
|
||||
err = errors.New("api done")
|
||||
}
|
||||
apiCancel(err)
|
||||
}()
|
||||
|
||||
if config.Discovery.Enable == conf.ETCD {
|
||||
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames())
|
||||
cm.Watch(ctx)
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
|
||||
shutdown := func() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
err := server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "shutdown err")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
disetcd.RegisterShutDown(shutdown)
|
||||
//if config.Discovery.Enable == conf.ETCD {
|
||||
// cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), config.GetConfigNames())
|
||||
// cm.Watch(ctx)
|
||||
//}
|
||||
//sigs := make(chan os.Signal, 1)
|
||||
//signal.Notify(sigs, syscall.SIGTERM)
|
||||
//select {
|
||||
//case val := <-sigs:
|
||||
// log.ZDebug(ctx, "recv exit", "signal", val.String())
|
||||
// cancel(fmt.Errorf("signal %s", val.String()))
|
||||
//case <-ctx.Done():
|
||||
//}
|
||||
<-apiCtx.Done()
|
||||
exitCause := context.Cause(ctx)
|
||||
log.ZWarn(ctx, "api server exit", exitCause)
|
||||
timer := time.NewTimer(time.Second * 15)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-sigs:
|
||||
program.SIGTERMExit()
|
||||
if err := shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-netDone:
|
||||
close(netDone)
|
||||
return netErr
|
||||
case <-timer.C:
|
||||
log.ZWarn(ctx, "api server graceful stop timeout", nil)
|
||||
case <-done:
|
||||
log.ZDebug(ctx, "api server graceful stop done")
|
||||
}
|
||||
return nil
|
||||
return exitCause
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -11,16 +12,16 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
type PrometheusDiscoveryApi struct {
|
||||
config *Config
|
||||
client *clientv3.Client
|
||||
kv discovery.KeyValue
|
||||
}
|
||||
|
||||
func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegistry) *PrometheusDiscoveryApi {
|
||||
func NewPrometheusDiscoveryApi(config *Config, client discovery.Conn) *PrometheusDiscoveryApi {
|
||||
api := &PrometheusDiscoveryApi{
|
||||
config: config,
|
||||
}
|
||||
@@ -30,43 +31,26 @@ func NewPrometheusDiscoveryApi(config *Config, client discovery.SvcDiscoveryRegi
|
||||
return api
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) Enable(c *gin.Context) {
|
||||
if p.config.Discovery.Enable != conf.ETCD {
|
||||
c.JSON(http.StatusOK, []struct{}{})
|
||||
c.Abort()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) discovery(c *gin.Context, key string) {
|
||||
eResp, err := p.client.Get(c, prommetrics.BuildDiscoveryKey(key))
|
||||
value, err := p.kv.GetKey(c, prommetrics.BuildDiscoveryKey(key))
|
||||
if err != nil {
|
||||
// Log and respond with an error if preparation fails.
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "etcd get err"))
|
||||
if errors.Is(err, discovery.ErrNotSupportedKeyValue) {
|
||||
c.JSON(http.StatusOK, []struct{}{})
|
||||
return
|
||||
}
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "get key value"))
|
||||
return
|
||||
}
|
||||
if len(eResp.Kvs) == 0 {
|
||||
c.JSON(http.StatusOK, []*prommetrics.Target{})
|
||||
if len(value) == 0 {
|
||||
c.JSON(http.StatusOK, []*prommetrics.RespTarget{})
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
resp = &prommetrics.RespTarget{
|
||||
Targets: make([]string, 0, len(eResp.Kvs)),
|
||||
}
|
||||
)
|
||||
|
||||
for i := range eResp.Kvs {
|
||||
var target prommetrics.Target
|
||||
err = json.Unmarshal(eResp.Kvs[i].Value, &target)
|
||||
if err != nil {
|
||||
log.ZError(c, "prometheus unmarshal err", errs.Wrap(err))
|
||||
}
|
||||
resp.Targets = append(resp.Targets, target.Target)
|
||||
if resp.Labels == nil {
|
||||
resp.Labels = target.Labels
|
||||
}
|
||||
var resp prommetrics.RespTarget
|
||||
if err := json.Unmarshal(value, &resp); err != nil {
|
||||
apiresp.GinError(c, errs.WrapMsg(err, "json unmarshal err"))
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(200, []*prommetrics.RespTarget{resp})
|
||||
c.JSON(http.StatusOK, []*prommetrics.RespTarget{&resp})
|
||||
}
|
||||
|
||||
func (p *PrometheusDiscoveryApi) Api(c *gin.Context) {
|
||||
|
||||
@@ -52,7 +52,7 @@ func prommetricsGin() gin.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cfg *Config) (*gin.Engine, error) {
|
||||
func newGinRouter(ctx context.Context, client discovery.Conn, cfg *Config) (*gin.Engine, error) {
|
||||
authConn, err := client.GetConn(ctx, cfg.Discovery.RpcService.Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -283,7 +283,7 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
|
||||
}
|
||||
{
|
||||
pd := NewPrometheusDiscoveryApi(cfg, client)
|
||||
proDiscoveryGroup := r.Group("/prometheus_discovery", pd.Enable)
|
||||
proDiscoveryGroup := r.Group("/prometheus_discovery")
|
||||
proDiscoveryGroup.GET("/api", pd.Api)
|
||||
proDiscoveryGroup.GET("/user", pd.User)
|
||||
proDiscoveryGroup.GET("/group", pd.Group)
|
||||
@@ -301,9 +301,8 @@ func newGinRouter(ctx context.Context, client discovery.SvcDiscoveryRegistry, cf
|
||||
if cfg.Discovery.Enable == config.ETCD {
|
||||
etcdClient = client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
}
|
||||
cm := NewConfigManager(cfg.Share.IMAdminUserID, cfg.AllConfig, etcdClient, cfg.ConfigPath, cfg.RuntimeEnv)
|
||||
cm := NewConfigManager(cfg.Share.IMAdminUserID, &cfg.AllConfig, etcdClient, string(cfg.ConfigPath))
|
||||
{
|
||||
|
||||
configGroup := r.Group("/config", cm.CheckAdmin)
|
||||
configGroup.POST("/get_config_list", cm.GetConfigList)
|
||||
configGroup.POST("/get_config", cm.GetConfig)
|
||||
|
||||
@@ -29,11 +29,11 @@ import (
|
||||
|
||||
type UserApi struct {
|
||||
Client user.UserClient
|
||||
discov discovery.SvcDiscoveryRegistry
|
||||
discov discovery.Conn
|
||||
config config.RpcService
|
||||
}
|
||||
|
||||
func NewUserApi(client user.UserClient, discov discovery.SvcDiscoveryRegistry, config config.RpcService) UserApi {
|
||||
func NewUserApi(client user.UserClient, discov discovery.Conn, config config.RpcService) UserApi {
|
||||
return UserApi{Client: client, discov: discov, config: config}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/startrpc"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
@@ -35,7 +34,7 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
func (s *Server) InitServer(ctx context.Context, config *Config, disCov discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -51,26 +50,26 @@ func (s *Server) InitServer(ctx context.Context, config *Config, disCov discover
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
||||
return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP,
|
||||
conf.MsgGateway.RPC.RegisterIP,
|
||||
conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index,
|
||||
conf.Discovery.RpcService.MessageGateway,
|
||||
nil,
|
||||
conf,
|
||||
[]string{
|
||||
conf.Share.GetConfigFileName(),
|
||||
conf.Discovery.GetConfigFileName(),
|
||||
conf.MsgGateway.GetConfigFileName(),
|
||||
conf.WebhooksConfig.GetConfigFileName(),
|
||||
conf.RedisConfig.GetConfigFileName(),
|
||||
},
|
||||
[]string{
|
||||
conf.Discovery.RpcService.MessageGateway,
|
||||
},
|
||||
s.InitServer,
|
||||
)
|
||||
}
|
||||
//func (s *Server) Start(ctx context.Context, index int, conf *Config) error {
|
||||
// return startrpc.Start(ctx, &conf.Discovery, &conf.MsgGateway.Prometheus, conf.MsgGateway.ListenIP,
|
||||
// conf.MsgGateway.RPC.RegisterIP,
|
||||
// conf.MsgGateway.RPC.AutoSetPorts, conf.MsgGateway.RPC.Ports, index,
|
||||
// conf.Discovery.RpcService.MessageGateway,
|
||||
// nil,
|
||||
// conf,
|
||||
// []string{
|
||||
// conf.Share.GetConfigFileName(),
|
||||
// conf.Discovery.GetConfigFileName(),
|
||||
// conf.MsgGateway.GetConfigFileName(),
|
||||
// conf.WebhooksConfig.GetConfigFileName(),
|
||||
// conf.RedisConfig.GetConfigFileName(),
|
||||
// },
|
||||
// []string{
|
||||
// conf.Discovery.RpcService.MessageGateway,
|
||||
// },
|
||||
// s.InitServer,
|
||||
// )
|
||||
//}
|
||||
|
||||
type Server struct {
|
||||
msggateway.UnimplementedMsgGatewayServer
|
||||
|
||||
+54
-15
@@ -19,10 +19,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
@@ -33,26 +35,25 @@ type Config struct {
|
||||
RedisConfig config.Redis
|
||||
WebhooksConfig config.Webhooks
|
||||
Discovery config.Discovery
|
||||
|
||||
RuntimeEnv string
|
||||
Index config.Index
|
||||
}
|
||||
|
||||
// Start run ws server.
|
||||
func Start(ctx context.Context, index int, conf *Config) error {
|
||||
conf.RuntimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", conf.RuntimeEnv,
|
||||
func Start(ctx context.Context, conf *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
|
||||
"rpcPorts", conf.MsgGateway.RPC.Ports,
|
||||
"wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
|
||||
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
|
||||
wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, int(conf.Index))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
|
||||
dbb := dbbuild.NewBuilder(nil, &conf.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
longServer := NewWsServer(
|
||||
conf,
|
||||
WithPort(wsPort),
|
||||
@@ -67,12 +68,50 @@ func Start(ctx context.Context, index int, conf *Config) error {
|
||||
return err
|
||||
})
|
||||
|
||||
if err := hubServer.InitServer(ctx, conf, client, server); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go longServer.ChangeOnlineStatus(4)
|
||||
|
||||
netDone := make(chan error)
|
||||
go func() {
|
||||
err = hubServer.Start(ctx, index, conf)
|
||||
netDone <- err
|
||||
}()
|
||||
return hubServer.LongConnServer.Run(netDone)
|
||||
return hubServer.LongConnServer.Run(ctx)
|
||||
}
|
||||
|
||||
//
|
||||
//// Start run ws server.
|
||||
//func Start(ctx context.Context, index int, conf *Config) error {
|
||||
// log.CInfo(ctx, "MSG-GATEWAY server is initializing", "runtimeEnv", runtimeenv.RuntimeEnvironment(),
|
||||
// "rpcPorts", conf.MsgGateway.RPC.Ports,
|
||||
// "wsPort", conf.MsgGateway.LongConnSvr.Ports, "prometheusPorts", conf.MsgGateway.Prometheus.Ports)
|
||||
// wsPort, err := datautil.GetElemByIndex(conf.MsgGateway.LongConnSvr.Ports, index)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// rdb, err := redisutil.NewRedisClient(ctx, conf.RedisConfig.Build())
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// longServer := NewWsServer(
|
||||
// conf,
|
||||
// WithPort(wsPort),
|
||||
// WithMaxConnNum(int64(conf.MsgGateway.LongConnSvr.WebsocketMaxConnNum)),
|
||||
// WithHandshakeTimeout(time.Duration(conf.MsgGateway.LongConnSvr.WebsocketTimeout)*time.Second),
|
||||
// WithMessageMaxMsgLength(conf.MsgGateway.LongConnSvr.WebsocketMaxMsgLen),
|
||||
// )
|
||||
//
|
||||
// hubServer := NewServer(longServer, conf, func(srv *Server) error {
|
||||
// var err error
|
||||
// longServer.online, err = rpccache.NewOnlineCache(srv.userClient, nil, rdb, false, longServer.subscriberUserOnlineStatusChanges)
|
||||
// return err
|
||||
// })
|
||||
//
|
||||
// go longServer.ChangeOnlineStatus(4)
|
||||
//
|
||||
// netDone := make(chan error)
|
||||
// go func() {
|
||||
// err = hubServer.Start(ctx, index, conf)
|
||||
// netDone <- err
|
||||
// }()
|
||||
// return hubServer.LongConnServer.Run(netDone)
|
||||
//}
|
||||
|
||||
@@ -2,7 +2,6 @@ package msggateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
pbAuth "github.com/openimsdk/protocol/auth"
|
||||
@@ -23,19 +21,18 @@ import (
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type LongConnServer interface {
|
||||
Run(done chan error) error
|
||||
Run(ctx context.Context) error
|
||||
wsHandler(w http.ResponseWriter, r *http.Request)
|
||||
GetUserAllCons(userID string) ([]*Client, bool)
|
||||
GetUserPlatformCons(userID string, platform int) ([]*Client, bool, bool)
|
||||
Validate(s any) error
|
||||
SetDiscoveryRegistry(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config) error
|
||||
SetDiscoveryRegistry(ctx context.Context, client discovery.Conn, config *Config) error
|
||||
KickUserConn(client *Client) error
|
||||
UnRegister(c *Client)
|
||||
SetKickHandlerInfo(i *kickHandler)
|
||||
@@ -60,7 +57,7 @@ type WsServer struct {
|
||||
handshakeTimeout time.Duration
|
||||
writeBufferSize int
|
||||
validate *validator.Validate
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
Compressor
|
||||
//Encoder
|
||||
MessageHandler
|
||||
@@ -75,7 +72,7 @@ type kickHandler struct {
|
||||
newClient *Client
|
||||
}
|
||||
|
||||
func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.SvcDiscoveryRegistry, config *Config) error {
|
||||
func (ws *WsServer) SetDiscoveryRegistry(ctx context.Context, disCov discovery.Conn, config *Config) error {
|
||||
userConn, err := disCov.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -158,19 +155,14 @@ func NewWsServer(msgGatewayConfig *Config, opts ...Option) *WsServer {
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WsServer) Run(done chan error) error {
|
||||
var (
|
||||
client *Client
|
||||
netErr error
|
||||
shutdownDone = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
server := http.Server{Addr: ":" + stringutil.IntToString(ws.port), Handler: nil}
|
||||
func (ws *WsServer) Run(ctx context.Context) error {
|
||||
var client *Client
|
||||
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-shutdownDone:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case client = <-ws.registerChan:
|
||||
ws.registerClient(client)
|
||||
@@ -181,57 +173,56 @@ func (ws *WsServer) Run(done chan error) error {
|
||||
}
|
||||
}
|
||||
}()
|
||||
netDone := make(chan struct{}, 1)
|
||||
go func() {
|
||||
http.HandleFunc("/", ws.wsHandler)
|
||||
err := server.ListenAndServe()
|
||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, "ws start err", server.Addr)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
shutDown := func() error {
|
||||
sErr := server.Shutdown(ctx)
|
||||
if sErr != nil {
|
||||
return errs.WrapMsg(sErr, "shutdown err")
|
||||
}
|
||||
close(shutdownDone)
|
||||
return nil
|
||||
}
|
||||
etcd.RegisterShutDown(shutDown)
|
||||
defer cancel()
|
||||
var err error
|
||||
select {
|
||||
case err = <-done:
|
||||
if err := shutDown(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-netDone:
|
||||
}
|
||||
return netErr
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wsServer := http.Server{Addr: fmt.Sprintf(":%d", ws.port), Handler: nil}
|
||||
http.HandleFunc("/", ws.wsHandler)
|
||||
go func() {
|
||||
defer close(done)
|
||||
<-ctx.Done()
|
||||
_ = wsServer.Shutdown(context.Background())
|
||||
}()
|
||||
err := wsServer.ListenAndServe()
|
||||
if err == nil {
|
||||
err = fmt.Errorf("http server closed")
|
||||
}
|
||||
cancel(fmt.Errorf("msg gateway %w", err))
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
timeout := time.NewTimer(time.Second * 15)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
log.ZWarn(ctx, "msg gateway graceful stop timeout", nil)
|
||||
case <-done:
|
||||
log.ZDebug(ctx, "msg gateway graceful stop done")
|
||||
}
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
|
||||
var concurrentRequest = 3
|
||||
const concurrentRequest = 3
|
||||
|
||||
func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *Client) error {
|
||||
conns, err := ws.disCov.GetConns(ctx, ws.msgGatewayConfig.Discovery.RpcService.MessageGateway)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(conns) == 0 || (len(conns) == 1 && ws.disCov.IsSelfNode(conns[0])) {
|
||||
return nil
|
||||
}
|
||||
|
||||
wg := errgroup.Group{}
|
||||
wg.SetLimit(concurrentRequest)
|
||||
|
||||
// Online push user online message to other node
|
||||
for _, v := range conns {
|
||||
v := v
|
||||
log.ZDebug(ctx, " sendUserOnlineInfoToOtherNode conn ", "target", v.Target())
|
||||
if v.Target() == ws.disCov.GetSelfConnTarget() {
|
||||
log.ZDebug(ctx, "Filter out this node", "node", v.Target())
|
||||
log.ZDebug(ctx, "sendUserOnlineInfoToOtherNode conn")
|
||||
if ws.disCov.IsSelfNode(v) {
|
||||
log.ZDebug(ctx, "Filter out this node")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -242,7 +233,7 @@ func (ws *WsServer) sendUserOnlineInfoToOtherNode(ctx context.Context, client *C
|
||||
PlatformID: int32(client.PlatformID), Token: client.token,
|
||||
})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err, "node", v.Target())
|
||||
log.ZWarn(ctx, "MultiTerminalLoginCheck err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
+93
-154
@@ -16,51 +16,35 @@ package msgtransfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/mq"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
conf "github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
discRegister "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
type MsgTransfer struct {
|
||||
historyConsumer mq.Consumer
|
||||
historyMongoConsumer mq.Consumer
|
||||
// This consumer aggregated messages, subscribed to the topic:toRedis,
|
||||
// the message is stored in redis, Incr Redis, and then the message is sent to toPush topic for push,
|
||||
// and the message is sent to toMongo topic for persistence
|
||||
historyCH *OnlineHistoryRedisConsumerHandler
|
||||
historyHandler *OnlineHistoryRedisConsumerHandler
|
||||
//This consumer handle message to mongo
|
||||
historyMongoCH *OnlineHistoryMongoConsumerHandler
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
runTimeEnv string
|
||||
historyMongoHandler *OnlineHistoryMongoConsumerHandler
|
||||
ctx context.Context
|
||||
//cancel context.CancelFunc
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
@@ -71,48 +55,59 @@ type Config struct {
|
||||
Share conf.Share
|
||||
WebhooksConfig conf.Webhooks
|
||||
Discovery conf.Discovery
|
||||
Index conf.Index
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, index int, config *Config) error {
|
||||
runTimeEnv := runtimeenv.PrintRuntimeEnvironment()
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runTimeEnv, "prometheusPorts",
|
||||
config.MsgTransfer.Prometheus.Ports, "index", index)
|
||||
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
log.CInfo(ctx, "MSG-TRANSFER server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "prometheusPorts",
|
||||
config.MsgTransfer.Prometheus.Ports, "index", config.Index)
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client, err := discRegister.NewDiscoveryRegister(&config.Discovery, runTimeEnv, nil)
|
||||
|
||||
//if config.Discovery.Enable == conf.ETCD {
|
||||
// cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
|
||||
// config.MsgTransfer.GetConfigFileName(),
|
||||
// config.RedisConfig.GetConfigFileName(),
|
||||
// config.MongodbConfig.GetConfigFileName(),
|
||||
// config.KafkaConfig.GetConfigFileName(),
|
||||
// config.Share.GetConfigFileName(),
|
||||
// config.WebhooksConfig.GetConfigFileName(),
|
||||
// config.Discovery.GetConfigFileName(),
|
||||
// conf.LogConfigFileName,
|
||||
// })
|
||||
// cm.Watch(ctx)
|
||||
//}
|
||||
mongoProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToMongoTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")))
|
||||
|
||||
if config.Discovery.Enable == conf.ETCD {
|
||||
cm := disetcd.NewConfigManager(client.(*etcd.SvcDiscoveryRegistryImpl).GetClient(), []string{
|
||||
config.MsgTransfer.GetConfigFileName(),
|
||||
config.RedisConfig.GetConfigFileName(),
|
||||
config.MongodbConfig.GetConfigFileName(),
|
||||
config.KafkaConfig.GetConfigFileName(),
|
||||
config.Share.GetConfigFileName(),
|
||||
config.WebhooksConfig.GetConfigFileName(),
|
||||
config.Discovery.GetConfigFileName(),
|
||||
conf.LogConfigFileName,
|
||||
})
|
||||
cm.Watch(ctx)
|
||||
pushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToPushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgDocModel, err := mgo.NewMsgMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel := redis.NewMsgCache(rdb, msgDocModel)
|
||||
var msgModel cache.MsgCache
|
||||
if rdb == nil {
|
||||
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel = mcache.NewMsgCache(cm, msgDocModel)
|
||||
} else {
|
||||
msgModel = redis.NewMsgCache(rdb, msgDocModel)
|
||||
}
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -123,124 +118,68 @@ func Start(ctx context.Context, index int, config *Config) error {
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
msgTransferDatabase, err := controller.NewMsgTransferDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, mongoProducer, pushProducer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyCH, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase)
|
||||
historyConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToRedisTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyMongoCH, err := NewOnlineHistoryMongoConsumerHandler(&config.KafkaConfig, msgTransferDatabase)
|
||||
historyMongoConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToMongoTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyHandler, err := NewOnlineHistoryRedisConsumerHandler(ctx, client, config, msgTransferDatabase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
historyMongoHandler := NewOnlineHistoryMongoConsumerHandler(msgTransferDatabase)
|
||||
|
||||
msgTransfer := &MsgTransfer{
|
||||
historyCH: historyCH,
|
||||
historyMongoCH: historyMongoCH,
|
||||
runTimeEnv: runTimeEnv,
|
||||
historyConsumer: historyConsumer,
|
||||
historyMongoConsumer: historyMongoConsumer,
|
||||
historyHandler: historyHandler,
|
||||
historyMongoHandler: historyMongoHandler,
|
||||
}
|
||||
|
||||
return msgTransfer.Start(index, config, client)
|
||||
return msgTransfer.Start(ctx)
|
||||
}
|
||||
|
||||
func (m *MsgTransfer) Start(index int, config *Config, client discovery.SvcDiscoveryRegistry) error {
|
||||
m.ctx, m.cancel = context.WithCancel(context.Background())
|
||||
var (
|
||||
netDone = make(chan struct{}, 1)
|
||||
netErr error
|
||||
)
|
||||
func (m *MsgTransfer) Start(ctx context.Context) error {
|
||||
var cancel context.CancelCauseFunc
|
||||
m.ctx, cancel = context.WithCancelCause(ctx)
|
||||
|
||||
go m.historyCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyCH)
|
||||
go m.historyMongoCH.historyConsumerGroup.RegisterHandleAndConsumer(m.ctx, m.historyMongoCH)
|
||||
go m.historyCH.HandleUserHasReadSeqMessages(m.ctx)
|
||||
err := m.historyCH.redisMessageBatches.Start()
|
||||
go func() {
|
||||
for {
|
||||
if err := m.historyConsumer.Subscribe(m.ctx, m.historyHandler.HandlerRedisMessage); err != nil {
|
||||
cancel(fmt.Errorf("history consumer %w", err))
|
||||
log.ZError(m.ctx, "historyConsumer err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
m.historyMongoHandler.HandleChatWs2Mongo(ctx, key, value)
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
if err := m.historyMongoConsumer.Subscribe(m.ctx, fn); err != nil {
|
||||
cancel(fmt.Errorf("history mongo consumer %w", err))
|
||||
log.ZError(m.ctx, "historyMongoConsumer err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go m.historyHandler.HandleUserHasReadSeqMessages(m.ctx)
|
||||
|
||||
err := m.historyHandler.redisMessageBatches.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
getAutoPort := func() (net.Listener, int, error) {
|
||||
registerAddr := net.JoinHostPort(registerIP, "0")
|
||||
listener, err := net.Listen("tcp", registerAddr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "registerAddr", registerAddr)
|
||||
}
|
||||
_, portStr, _ := net.SplitHostPort(listener.Addr().String())
|
||||
port, _ := strconv.Atoi(portStr)
|
||||
return listener, port, nil
|
||||
}
|
||||
|
||||
if config.MsgTransfer.Prometheus.AutoSetPorts && config.Discovery.Enable != conf.ETCD {
|
||||
return errs.New("only etcd support autoSetPorts", "RegisterName", "api").Wrap()
|
||||
}
|
||||
|
||||
if config.MsgTransfer.Prometheus.Enable {
|
||||
var (
|
||||
listener net.Listener
|
||||
prometheusPort int
|
||||
)
|
||||
|
||||
if config.MsgTransfer.Prometheus.AutoSetPorts {
|
||||
listener, prometheusPort, err = getAutoPort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
etcdClient := client.(*etcd.SvcDiscoveryRegistryImpl).GetClient()
|
||||
|
||||
_, err = etcdClient.Put(context.TODO(), prommetrics.BuildDiscoveryKey(prommetrics.MessageTransferKeyName), jsonutil.StructToJsonString(prommetrics.BuildDefaultTarget(registerIP, prometheusPort)))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "etcd put err")
|
||||
}
|
||||
} else {
|
||||
prometheusPort, err = datautil.GetElemByIndex(config.MsgTransfer.Prometheus.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listener, err = net.Listen("tcp", fmt.Sprintf(":%d", prometheusPort))
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "addr", fmt.Sprintf(":%d", prometheusPort))
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.ZPanic(m.ctx, "MsgTransfer Start Panic", errs.ErrPanic(r))
|
||||
}
|
||||
}()
|
||||
if err := prommetrics.TransferInit(listener); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||
netErr = errs.WrapMsg(err, "prometheus start error", "prometheusPort", prometheusPort)
|
||||
netDone <- struct{}{}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM)
|
||||
select {
|
||||
case <-sigs:
|
||||
program.SIGTERMExit()
|
||||
// graceful close kafka client.
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
return nil
|
||||
case <-netDone:
|
||||
m.cancel()
|
||||
m.historyCH.redisMessageBatches.Close()
|
||||
m.historyCH.Close()
|
||||
m.historyCH.historyConsumerGroup.Close()
|
||||
m.historyMongoCH.historyConsumerGroup.Close()
|
||||
close(netDone)
|
||||
return netErr
|
||||
}
|
||||
<-m.ctx.Done()
|
||||
return context.Cause(m.ctx)
|
||||
}
|
||||
|
||||
@@ -18,14 +18,13 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
@@ -37,7 +36,6 @@ import (
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/stringutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
@@ -64,9 +62,7 @@ type userHasReadSeq struct {
|
||||
}
|
||||
|
||||
type OnlineHistoryRedisConsumerHandler struct {
|
||||
historyConsumerGroup *kafka.MConsumerGroup
|
||||
|
||||
redisMessageBatches *batcher.Batcher[sarama.ConsumerMessage]
|
||||
redisMessageBatches *batcher.Batcher[ConsumerMessage]
|
||||
|
||||
msgTransferDatabase controller.MsgTransferDatabase
|
||||
conversationUserHasReadChan chan *userHasReadSeq
|
||||
@@ -76,12 +72,13 @@ type OnlineHistoryRedisConsumerHandler struct {
|
||||
conversationClient *rpcli.ConversationClient
|
||||
}
|
||||
|
||||
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.SvcDiscoveryRegistry, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) {
|
||||
kafkaConf := config.KafkaConfig
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToRedisGroupID, []string{kafkaConf.ToRedisTopic}, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
type ConsumerMessage struct {
|
||||
Ctx context.Context
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.Conn, config *Config, database controller.MsgTransferDatabase) (*OnlineHistoryRedisConsumerHandler, error) {
|
||||
groupConn, err := client.GetConn(ctx, config.Discovery.RpcService.Group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -97,7 +94,7 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
|
||||
och.conversationClient = rpcli.NewConversationClient(conversationConn)
|
||||
och.wg.Add(1)
|
||||
|
||||
b := batcher.New[sarama.ConsumerMessage](
|
||||
b := batcher.New[ConsumerMessage](
|
||||
batcher.WithSize(size),
|
||||
batcher.WithWorker(worker),
|
||||
batcher.WithInterval(interval),
|
||||
@@ -109,16 +106,15 @@ func NewOnlineHistoryRedisConsumerHandler(ctx context.Context, client discovery.
|
||||
hashCode := stringutil.GetHashCode(key)
|
||||
return int(hashCode) % och.redisMessageBatches.Worker()
|
||||
}
|
||||
b.Key = func(consumerMessage *sarama.ConsumerMessage) string {
|
||||
return string(consumerMessage.Key)
|
||||
b.Key = func(consumerMessage *ConsumerMessage) string {
|
||||
return consumerMessage.Key
|
||||
}
|
||||
b.Do = och.do
|
||||
och.redisMessageBatches = b
|
||||
och.historyConsumerGroup = historyConsumerGroup
|
||||
|
||||
return &och, nil
|
||||
}
|
||||
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[sarama.ConsumerMessage]) {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) do(ctx context.Context, channelID int, val *batcher.Msg[ConsumerMessage]) {
|
||||
ctx = mcontext.WithTriggerIDContext(ctx, val.TriggerID())
|
||||
ctxMessages := och.parseConsumerMessages(ctx, val.Val())
|
||||
ctx = withAggregationCtx(ctx, ctxMessages)
|
||||
@@ -189,7 +185,7 @@ func (och *OnlineHistoryRedisConsumerHandler) doSetReadSeq(ctx context.Context,
|
||||
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*sarama.ConsumerMessage) []*ContextMsg {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.Context, consumerMessages []*ConsumerMessage) []*ContextMsg {
|
||||
var ctxMessages []*ContextMsg
|
||||
for i := 0; i < len(consumerMessages); i++ {
|
||||
ctxMsg := &ContextMsg{}
|
||||
@@ -199,16 +195,9 @@ func (och *OnlineHistoryRedisConsumerHandler) parseConsumerMessages(ctx context.
|
||||
log.ZWarn(ctx, "msg_transfer Unmarshal msg err", err, string(consumerMessages[i].Value))
|
||||
continue
|
||||
}
|
||||
var arr []string
|
||||
for i, header := range consumerMessages[i].Headers {
|
||||
arr = append(arr, strconv.Itoa(i), string(header.Key), string(header.Value))
|
||||
}
|
||||
log.ZDebug(ctx, "consumer.kafka.GetContextWithMQHeader", "len", len(consumerMessages[i].Headers),
|
||||
"header", strings.Join(arr, ", "))
|
||||
ctxMsg.ctx = kafka.GetContextWithMQHeader(consumerMessages[i].Headers)
|
||||
ctxMsg.ctx = consumerMessages[i].Ctx
|
||||
ctxMsg.message = msgFromMQ
|
||||
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key",
|
||||
string(consumerMessages[i].Key))
|
||||
log.ZDebug(ctx, "message parse finish", "message", msgFromMQ, "key", consumerMessages[i].Key)
|
||||
ctxMessages = append(ctxMessages, ctxMsg)
|
||||
}
|
||||
return ctxMessages
|
||||
@@ -383,7 +372,9 @@ func (och *OnlineHistoryRedisConsumerHandler) Close() {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) toPushTopic(ctx context.Context, key, conversationID string, msgs []*ContextMsg) {
|
||||
for _, v := range msgs {
|
||||
log.ZDebug(ctx, "push msg to topic", "msg", v.message.String())
|
||||
_, _, _ = och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message)
|
||||
if err := och.msgTransferDatabase.MsgToPushMQ(v.ctx, key, conversationID, v.message); err != nil {
|
||||
log.ZError(ctx, "msg to push topic error", err, "msg", v.message.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,35 +392,10 @@ func withAggregationCtx(ctx context.Context, values []*ContextMsg) context.Conte
|
||||
return mcontext.SetOperationID(ctx, allMessageOperationID)
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
func (och *OnlineHistoryRedisConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error {
|
||||
func (och *OnlineHistoryRedisConsumerHandler) HandlerRedisMessage(ctx context.Context, key string, value []byte) error { // a instance in the consumer group
|
||||
err := och.redisMessageBatches.Put(ctx, &ConsumerMessage{Ctx: ctx, Key: key, Value: value})
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "put msg to error", err, "key", key, "value", value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (och *OnlineHistoryRedisConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession,
|
||||
claim sarama.ConsumerGroupClaim) error { // a instance in the consumer group
|
||||
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
och.redisMessageBatches.OnComplete = func(lastMessage *sarama.ConsumerMessage, totalCount int) {
|
||||
session.MarkMessage(lastMessage, "")
|
||||
session.Commit()
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-claim.Messages():
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(msg.Value) == 0 {
|
||||
continue
|
||||
}
|
||||
err := och.redisMessageBatches.Put(context.Background(), msg)
|
||||
if err != nil {
|
||||
log.ZWarn(context.Background(), "put msg to error", err, "msg", msg)
|
||||
}
|
||||
case <-session.Context().Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,36 +17,24 @@ package msgtransfer
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
pbmsg "github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type OnlineHistoryMongoConsumerHandler struct {
|
||||
historyConsumerGroup *kafka.MConsumerGroup
|
||||
msgTransferDatabase controller.MsgTransferDatabase
|
||||
msgTransferDatabase controller.MsgTransferDatabase
|
||||
}
|
||||
|
||||
func NewOnlineHistoryMongoConsumerHandler(kafkaConf *config.Kafka, database controller.MsgTransferDatabase) (*OnlineHistoryMongoConsumerHandler, error) {
|
||||
historyConsumerGroup, err := kafka.NewMConsumerGroup(kafkaConf.Build(), kafkaConf.ToMongoGroupID, []string{kafkaConf.ToMongoTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewOnlineHistoryMongoConsumerHandler(database controller.MsgTransferDatabase) *OnlineHistoryMongoConsumerHandler {
|
||||
return &OnlineHistoryMongoConsumerHandler{
|
||||
msgTransferDatabase: database,
|
||||
}
|
||||
|
||||
mc := &OnlineHistoryMongoConsumerHandler{
|
||||
historyConsumerGroup: historyConsumerGroup,
|
||||
msgTransferDatabase: database,
|
||||
}
|
||||
return mc, nil
|
||||
}
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Context, cMsg *sarama.ConsumerMessage, key string, session sarama.ConsumerGroupSession) {
|
||||
msg := cMsg.Value
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) HandleChatWs2Mongo(ctx context.Context, key string, msg []byte) {
|
||||
msgFromMQ := pbmsg.MsgDataToMongoByMQ{}
|
||||
err := proto.Unmarshal(msg, &msgFromMQ)
|
||||
if err != nil {
|
||||
@@ -54,7 +42,7 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
return
|
||||
}
|
||||
if len(msgFromMQ.MsgData) == 0 {
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "cMsg", cMsg)
|
||||
log.ZError(ctx, "msgFromMQ.MsgData is empty", nil, "key", key, "msg", msg)
|
||||
return
|
||||
}
|
||||
log.ZDebug(ctx, "mongo consumer recv msg", "msgs", msgFromMQ.String())
|
||||
@@ -82,22 +70,3 @@ func (mc *OnlineHistoryMongoConsumerHandler) handleChatWs2Mongo(ctx context.Cont
|
||||
// msgFromMQ.MsgData, "conversationID", msgFromMQ.ConversationID)
|
||||
//}
|
||||
}
|
||||
|
||||
func (*OnlineHistoryMongoConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (*OnlineHistoryMongoConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (mc *OnlineHistoryMongoConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // an instance in the consumer group
|
||||
log.ZDebug(context.Background(), "online new session msg come", "highWaterMarkOffset",
|
||||
claim.HighWaterMarkOffset(), "topic", claim.Topic(), "partition", claim.Partition())
|
||||
for msg := range claim.Messages() {
|
||||
ctx := mc.historyConsumerGroup.GetContextFromMsg(msg)
|
||||
if len(msg.Value) != 0 {
|
||||
mc.handleChatWs2Mongo(ctx, msg, string(msg.Key), sess)
|
||||
} else {
|
||||
log.ZError(ctx, "mongo msg get from kafka but is nil", nil, "conversationID", msg.Key)
|
||||
}
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ package push
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
|
||||
@@ -3,7 +3,6 @@ package push
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
@@ -12,40 +11,21 @@ import (
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type OfflinePushConsumerHandler struct {
|
||||
OfflinePushConsumerGroup *kafka.MConsumerGroup
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
}
|
||||
|
||||
func NewOfflinePushConsumerHandler(config *Config, offlinePusher offlinepush.OfflinePusher) (*OfflinePushConsumerHandler, error) {
|
||||
var offlinePushConsumerHandler OfflinePushConsumerHandler
|
||||
var err error
|
||||
offlinePushConsumerHandler.offlinePusher = offlinePusher
|
||||
offlinePushConsumerHandler.OfflinePushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToOfflineGroupID,
|
||||
[]string{config.KafkaConfig.ToOfflinePushTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func NewOfflinePushConsumerHandler(offlinePusher offlinepush.OfflinePusher) *OfflinePushConsumerHandler {
|
||||
return &OfflinePushConsumerHandler{
|
||||
offlinePusher: offlinePusher,
|
||||
}
|
||||
return &offlinePushConsumerHandler, nil
|
||||
}
|
||||
|
||||
func (*OfflinePushConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||
func (*OfflinePushConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||
func (o *OfflinePushConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
for msg := range claim.Messages() {
|
||||
ctx := o.OfflinePushConsumerGroup.GetContextFromMsg(msg)
|
||||
o.handleMsg2OfflinePush(ctx, msg.Value)
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OfflinePushConsumerHandler) handleMsg2OfflinePush(ctx context.Context, msg []byte) {
|
||||
func (o *OfflinePushConsumerHandler) HandleMsg2OfflinePush(ctx context.Context, msg []byte) {
|
||||
offlinePushMsg := pbpush.PushMsgReq{}
|
||||
if err := proto.Unmarshal(msg, &offlinePushMsg); err != nil {
|
||||
log.ZError(ctx, "offline push Unmarshal msg err", err, "msg", string(msg))
|
||||
|
||||
@@ -2,7 +2,7 @@ package push
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
@@ -30,37 +31,36 @@ func newEmptyOnlinePusher() *emptyOnlinePusher {
|
||||
return &emptyOnlinePusher{}
|
||||
}
|
||||
|
||||
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData,
|
||||
pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
|
||||
func (emptyOnlinePusher) GetConnsAndOnlinePush(ctx context.Context, msg *sdkws.MsgData, pushToUserIDs []string) (wsResults []*msggateway.SingleMsgToUserResults, err error) {
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetConnsAndOnlinePush", nil)
|
||||
return nil, nil
|
||||
}
|
||||
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData,
|
||||
wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
|
||||
func (u emptyOnlinePusher) GetOnlinePushFailedUserIDs(ctx context.Context, msg *sdkws.MsgData, wsResults []*msggateway.SingleMsgToUserResults, pushToUserIDs *[]string) []string {
|
||||
log.ZInfo(ctx, "emptyOnlinePusher GetOnlinePushFailedUserIDs", nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewOnlinePusher(disCov discovery.SvcDiscoveryRegistry, config *Config) OnlinePusher {
|
||||
|
||||
if config.runTimeEnv == conf.KUBERNETES {
|
||||
return NewDefaultAllNode(disCov, config)
|
||||
func NewOnlinePusher(disCov discovery.Conn, config *Config) (OnlinePusher, error) {
|
||||
if conf.Standalone() {
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
}
|
||||
if runtimeenv.RuntimeEnvironment() == conf.KUBERNETES {
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
}
|
||||
switch config.Discovery.Enable {
|
||||
case conf.ETCD:
|
||||
return NewDefaultAllNode(disCov, config)
|
||||
return NewDefaultAllNode(disCov, config), nil
|
||||
default:
|
||||
log.ZError(context.Background(), "NewOnlinePusher is error", errs.Wrap(errors.New("unsupported discovery type")), "type", config.Discovery.Enable)
|
||||
return nil
|
||||
return nil, errs.New(fmt.Sprintf("unsupported discovery type %s", config.Discovery.Enable))
|
||||
}
|
||||
}
|
||||
|
||||
type DefaultAllNode struct {
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
config *Config
|
||||
}
|
||||
|
||||
func NewDefaultAllNode(disCov discovery.SvcDiscoveryRegistry, config *Config) *DefaultAllNode {
|
||||
func NewDefaultAllNode(disCov discovery.Conn, config *Config) *DefaultAllNode {
|
||||
return &DefaultAllNode{disCov: disCov, config: config}
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (k *K8sStaticConsistentHash) GetConnsAndOnlinePush(ctx context.Context, msg
|
||||
}
|
||||
}
|
||||
log.ZDebug(ctx, "genUsers send hosts struct:", "usersHost", usersHost)
|
||||
var usersConns = make(map[*grpc.ClientConn][]string)
|
||||
var usersConns = make(map[grpc.ClientConnInterface][]string)
|
||||
for host, userIds := range usersHost {
|
||||
tconn, _ := k.disCov.GetConn(ctx, host)
|
||||
usersConns[tconn] = userIds
|
||||
|
||||
+77
-25
@@ -2,39 +2,43 @@ package push
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
pbpush "github.com/openimsdk/protocol/push"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type pushServer struct {
|
||||
pbpush.UnimplementedPushMsgServiceServer
|
||||
database controller.PushDatabase
|
||||
disCov discovery.SvcDiscoveryRegistry
|
||||
disCov discovery.Conn
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
pushCh *ConsumerHandler
|
||||
offlinePushCh *OfflinePushConsumerHandler
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
RpcConfig config.Push
|
||||
RedisConfig config.Redis
|
||||
MongoConfig config.Mongo
|
||||
KafkaConfig config.Kafka
|
||||
NotificationConfig config.Notification
|
||||
Share config.Share
|
||||
WebhooksConfig config.Webhooks
|
||||
LocalCacheConfig config.LocalCache
|
||||
Discovery config.Discovery
|
||||
FcmConfigPath string
|
||||
|
||||
runTimeEnv string
|
||||
FcmConfigPath config.Path
|
||||
}
|
||||
|
||||
func (p pushServer) DelUserPushToken(ctx context.Context,
|
||||
@@ -45,42 +49,90 @@ func (p pushServer) DelUserPushToken(ctx context.Context,
|
||||
return &pbpush.DelUserPushTokenResp{}, nil
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
config.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheModel := redis.NewThirdCache(rdb)
|
||||
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, config.FcmConfigPath)
|
||||
var cacheModel cache.ThirdCache
|
||||
if rdb == nil {
|
||||
mdb, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mc, err := mgo.NewCacheMgo(mdb.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cacheModel = mcache.NewThirdCache(mc)
|
||||
} else {
|
||||
cacheModel = redis.NewThirdCache(rdb)
|
||||
}
|
||||
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, string(config.FcmConfigPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
|
||||
offlinePushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToOfflinePushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
database := controller.NewPushDatabase(cacheModel, offlinePushProducer)
|
||||
|
||||
pushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToPushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offlinePushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToOfflinePushTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
database := controller.NewPushDatabase(cacheModel, &config.KafkaConfig)
|
||||
|
||||
consumer, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
|
||||
pushHandler, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offlinePushConsumer, err := NewOfflinePushConsumerHandler(config, offlinePusher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offlineHandler := NewOfflinePushConsumerHandler(offlinePusher)
|
||||
|
||||
pbpush.RegisterPushMsgServiceServer(server, &pushServer{
|
||||
database: database,
|
||||
disCov: client,
|
||||
offlinePusher: offlinePusher,
|
||||
pushCh: consumer,
|
||||
offlinePushCh: offlinePushConsumer,
|
||||
})
|
||||
|
||||
go consumer.pushConsumerGroup.RegisterHandleAndConsumer(ctx, consumer)
|
||||
go func() {
|
||||
pushHandler.WaitCache()
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
pushHandler.HandleMs2PsChat(ctx, value)
|
||||
return nil
|
||||
}
|
||||
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
|
||||
log.ZInfo(consumerCtx, "begin consume messages")
|
||||
for {
|
||||
if err := pushConsumer.Subscribe(consumerCtx, fn); err != nil {
|
||||
log.ZError(consumerCtx, "subscribe err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go offlinePushConsumer.OfflinePushConsumerGroup.RegisterHandleAndConsumer(ctx, offlinePushConsumer)
|
||||
go func() {
|
||||
fn := func(ctx context.Context, key string, value []byte) error {
|
||||
offlineHandler.HandleMsg2OfflinePush(ctx, value)
|
||||
return nil
|
||||
}
|
||||
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
|
||||
log.ZInfo(consumerCtx, "begin consume messages")
|
||||
for {
|
||||
if err := offlinePushConsumer.Subscribe(consumerCtx, fn); err != nil {
|
||||
log.ZError(consumerCtx, "subscribe err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,13 +3,8 @@ package push
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/IBM/sarama"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush"
|
||||
"github.com/openimsdk/open-im-server/v3/internal/push/offlinepush/options"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
@@ -17,6 +12,7 @@ import (
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/conversationutil"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/msggateway"
|
||||
@@ -25,7 +21,6 @@ import (
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
@@ -34,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
type ConsumerHandler struct {
|
||||
pushConsumerGroup *kafka.MConsumerGroup
|
||||
//pushConsumerGroup mq.Consumer
|
||||
offlinePusher offlinepush.OfflinePusher
|
||||
onlinePusher OnlinePusher
|
||||
pushDatabase controller.PushDatabase
|
||||
@@ -49,15 +44,7 @@ type ConsumerHandler struct {
|
||||
conversationClient *rpcli.ConversationClient
|
||||
}
|
||||
|
||||
func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient,
|
||||
client discovery.SvcDiscoveryRegistry) (*ConsumerHandler, error) {
|
||||
var consumerHandler ConsumerHandler
|
||||
var err error
|
||||
consumerHandler.pushConsumerGroup, err = kafka.NewMConsumerGroup(config.KafkaConfig.Build(), config.KafkaConfig.ToPushGroupID,
|
||||
[]string{config.KafkaConfig.ToPushTopic}, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewConsumerHandler(ctx context.Context, config *Config, database controller.PushDatabase, offlinePusher offlinepush.OfflinePusher, rdb redis.UniversalClient, client discovery.Conn) (*ConsumerHandler, error) {
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -74,13 +61,18 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
onlinePusher, err := NewOnlinePusher(client, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var consumerHandler ConsumerHandler
|
||||
consumerHandler.userClient = rpcli.NewUserClient(userConn)
|
||||
consumerHandler.groupClient = rpcli.NewGroupClient(groupConn)
|
||||
consumerHandler.msgClient = rpcli.NewMsgClient(msgConn)
|
||||
consumerHandler.conversationClient = rpcli.NewConversationClient(conversationConn)
|
||||
|
||||
consumerHandler.offlinePusher = offlinePusher
|
||||
consumerHandler.onlinePusher = NewOnlinePusher(client, config)
|
||||
consumerHandler.onlinePusher = onlinePusher
|
||||
consumerHandler.groupLocalCache = rpccache.NewGroupLocalCache(consumerHandler.groupClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.conversationLocalCache = rpccache.NewConversationLocalCache(consumerHandler.conversationClient, &config.LocalCacheConfig, rdb)
|
||||
consumerHandler.webhookClient = webhook.NewWebhookClient(config.WebhooksConfig.URL)
|
||||
@@ -93,7 +85,7 @@ func NewConsumerHandler(ctx context.Context, config *Config, database controller
|
||||
return &consumerHandler, nil
|
||||
}
|
||||
|
||||
func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
func (c *ConsumerHandler) HandleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
msgFromMQ := pbpush.PushMsgReq{}
|
||||
if err := proto.Unmarshal(msg, &msgFromMQ); err != nil {
|
||||
log.ZError(ctx, "push Unmarshal msg err", err, "msg", string(msg))
|
||||
@@ -127,25 +119,12 @@ func (c *ConsumerHandler) handleMs2PsChat(ctx context.Context, msg []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func (*ConsumerHandler) Setup(sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (*ConsumerHandler) Cleanup(sarama.ConsumerGroupSession) error { return nil }
|
||||
|
||||
func (c *ConsumerHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
|
||||
func (c *ConsumerHandler) WaitCache() {
|
||||
c.onlineCache.Lock.Lock()
|
||||
for c.onlineCache.CurrentPhase.Load() < rpccache.DoSubscribeOver {
|
||||
c.onlineCache.Cond.Wait()
|
||||
}
|
||||
c.onlineCache.Lock.Unlock()
|
||||
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
|
||||
log.ZInfo(ctx, "begin consume messages")
|
||||
|
||||
for msg := range claim.Messages() {
|
||||
ctx := c.pushConsumerGroup.GetContextFromMsg(msg)
|
||||
c.handleMs2PsChat(ctx, msg.Value)
|
||||
sess.MarkMessage(msg, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Push2User Suitable for two types of conversations, one is SingleChatType and the other is NotificationChatType.
|
||||
|
||||
@@ -18,11 +18,14 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
redis2 "github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
|
||||
@@ -43,7 +46,7 @@ import (
|
||||
type authServer struct {
|
||||
pbauth.UnimplementedAuthServer
|
||||
authDatabase controller.AuthDatabase
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
userClient *rpcli.UserClient
|
||||
}
|
||||
@@ -51,15 +54,31 @@ type authServer struct {
|
||||
type Config struct {
|
||||
RpcConfig config.Auth
|
||||
RedisConfig config.Redis
|
||||
MongoConfig config.Mongo
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var token cache.TokenModel
|
||||
if rdb == nil {
|
||||
mdb, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mc, err := mgo.NewCacheMgo(mdb.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
token = mcache.NewTokenCacheModel(mc, config.RpcConfig.TokenPolicy.Expire)
|
||||
} else {
|
||||
token = redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire)
|
||||
}
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -67,7 +86,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
pbauth.RegisterAuthServer(server, &authServer{
|
||||
RegisterCenter: client,
|
||||
authDatabase: controller.NewAuthDatabase(
|
||||
redis2.NewTokenCacheModel(rdb, config.RpcConfig.TokenPolicy.Expire),
|
||||
token,
|
||||
config.Share.Secret,
|
||||
config.RpcConfig.TokenPolicy.Expire,
|
||||
config.Share.MultiLogin,
|
||||
@@ -192,7 +211,7 @@ func (s *authServer) forceKickOff(ctx context.Context, userID string, platformID
|
||||
return err
|
||||
}
|
||||
for _, v := range conns {
|
||||
log.ZDebug(ctx, "forceKickOff", "conn", v.Target())
|
||||
log.ZDebug(ctx, "forceKickOff", "userID", userID, "platformID", platformID)
|
||||
client := msggateway.NewMsgGatewayClient(v)
|
||||
kickReq := &msggateway.KickUserOfflineReq{KickUserIDList: []string{userID}, PlatformID: platformID}
|
||||
_, err := client.KickUserOffline(ctx, kickReq)
|
||||
|
||||
@@ -19,24 +19,22 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
dbModel "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@@ -66,12 +64,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -96,7 +95,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
pbconversation.RegisterConversationServer(server, &conversationServer{
|
||||
conversationNotificationSender: NewConversationNotificationSender(&config.NotificationConfig, msgClient),
|
||||
conversationDatabase: controller.NewConversationDatabase(conversationDB,
|
||||
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, redis.GetRocksCacheOptions(), conversationDB), mgocli.GetTx()),
|
||||
redis.NewConversationRedis(rdb, &config.LocalCacheConfig, conversationDB), mgocli.GetTx()),
|
||||
userClient: rpcli.NewUserClient(userConn),
|
||||
groupClient: rpcli.NewGroupClient(groupConn),
|
||||
msgClient: msgClient,
|
||||
|
||||
@@ -17,13 +17,15 @@ package group
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/callbackstruct"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
@@ -42,8 +44,6 @@ import (
|
||||
pbgroup "github.com/openimsdk/protocol/group"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/protocol/wrapperspb"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
@@ -76,12 +76,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,11 +98,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//userRpcClient := rpcclient.NewUserRpcClient(client, config.Share.RpcRegisterName.User, config.Share.IMAdminUserID)
|
||||
//msgRpcClient := rpcclient.NewMessageRpcClient(client, config.Share.RpcRegisterName.Msg)
|
||||
//conversationRpcClient := rpcclient.NewConversationRpcClient(client, config.Share.RpcRegisterName.Conversation)
|
||||
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+29
-15
@@ -16,22 +16,24 @@ package msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/mqbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/notification"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpccache"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@@ -56,8 +58,8 @@ type Config struct {
|
||||
// MsgServer encapsulates dependencies required for message handling.
|
||||
type msgServer struct {
|
||||
msg.UnimplementedMsgServer
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry // Service discovery registry for service registration.
|
||||
MsgDatabase controller.CommonMsgDatabase // Interface for message database operations.
|
||||
RegisterCenter discovery.Conn // Service discovery registry for service registration.
|
||||
MsgDatabase controller.CommonMsgDatabase // Interface for message database operations.
|
||||
StreamMsgDatabase controller.StreamMsgDatabase
|
||||
UserLocalCache *rpccache.UserLocalCache // Local cache for user data.
|
||||
FriendLocalCache *rpccache.FriendLocalCache // Local cache for friend data.
|
||||
@@ -76,12 +78,18 @@ func (m *msgServer) addInterceptorHandler(interceptorFunc ...MessageInterceptorF
|
||||
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
builder := mqbuild.NewBuilder(&config.KafkaConfig)
|
||||
redisProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToRedisTopic)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -89,7 +97,16 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel := redis.NewMsgCache(rdb, msgDocModel)
|
||||
var msgModel cache.MsgCache
|
||||
if rdb == nil {
|
||||
cm, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgModel = mcache.NewMsgCache(cm, msgDocModel)
|
||||
} else {
|
||||
msgModel = redis.NewMsgCache(rdb, msgDocModel)
|
||||
}
|
||||
seqConversation, err := mgo.NewSeqConversationMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -104,10 +121,6 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
return err
|
||||
}
|
||||
seqUserCache := redis.NewSeqUserCacheRedis(rdb, seqUser)
|
||||
msgDatabase, err := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, &config.KafkaConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -125,6 +138,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
return err
|
||||
}
|
||||
conversationClient := rpcli.NewConversationClient(conversationConn)
|
||||
msgDatabase := controller.NewCommonMsgDatabase(msgDocModel, msgModel, seqUserCache, seqConversationCache, redisProducer)
|
||||
s := &msgServer{
|
||||
MsgDatabase: msgDatabase,
|
||||
StreamMsgDatabase: controller.NewStreamMsgDatabase(streamMsg),
|
||||
|
||||
@@ -16,26 +16,25 @@ package relation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
|
||||
"github.com/openimsdk/tools/mq/memamq"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/relation"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
@@ -47,7 +46,7 @@ type friendServer struct {
|
||||
db controller.FriendDatabase
|
||||
blackDatabase controller.BlackDatabase
|
||||
notificationSender *FriendNotificationSender
|
||||
RegisterCenter discovery.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
webhookClient *webhook.Client
|
||||
queue *memamq.MemoryQueue
|
||||
@@ -66,12 +65,13 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -114,12 +114,12 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
db: controller.NewFriendDatabase(
|
||||
friendMongoDB,
|
||||
friendRequestMongoDB,
|
||||
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB, redis.GetRocksCacheOptions()),
|
||||
redis.NewFriendCacheRedis(rdb, &config.LocalCacheConfig, friendMongoDB),
|
||||
mgocli.GetTx(),
|
||||
),
|
||||
blackDatabase: controller.NewBlackDatabase(
|
||||
blackMongoDB,
|
||||
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB, redis.GetRocksCacheOptions()),
|
||||
redis.NewBlackCacheRedis(rdb, &config.LocalCacheConfig, blackMongoDB),
|
||||
),
|
||||
notificationSender: notificationSender,
|
||||
RegisterCenter: client,
|
||||
|
||||
@@ -19,11 +19,12 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
@@ -37,7 +38,10 @@ import (
|
||||
)
|
||||
|
||||
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
|
||||
limit := t.s3dataBase.PartLimit()
|
||||
limit, err := t.s3dataBase.PartLimit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.PartLimitResp{
|
||||
MinPartSize: limit.MinPartSize,
|
||||
MaxPartSize: limit.MaxPartSize,
|
||||
|
||||
+37
-14
@@ -17,9 +17,14 @@ package third
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/mcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/s3/disable"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
@@ -29,8 +34,6 @@ import (
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
@@ -60,15 +63,17 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logdb, err := mgo.NewLogMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -77,15 +82,31 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var thirdCache cache.ThirdCache
|
||||
if rdb == nil {
|
||||
tc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thirdCache = mcache.NewThirdCache(tc)
|
||||
} else {
|
||||
thirdCache = redis.NewThirdCache(rdb)
|
||||
}
|
||||
// Select the oss method according to the profile policy
|
||||
enable := config.RpcConfig.Object.Enable
|
||||
var (
|
||||
o s3.Interface
|
||||
)
|
||||
switch enable {
|
||||
var o s3.Interface
|
||||
switch enable := config.RpcConfig.Object.Enable; enable {
|
||||
case "minio":
|
||||
o, err = minio.NewMinio(ctx, redis.NewMinioCache(rdb), *config.MinioConfig.Build())
|
||||
var minioCache minio.Cache
|
||||
if rdb == nil {
|
||||
mc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minioCache = mcache.NewMinioCache(mc)
|
||||
} else {
|
||||
minioCache = redis.NewMinioCache(rdb)
|
||||
}
|
||||
o, err = minio.NewMinio(ctx, minioCache, *config.MinioConfig.Build())
|
||||
case "cos":
|
||||
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
|
||||
case "oss":
|
||||
@@ -94,6 +115,8 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
|
||||
case "aws":
|
||||
o, err = aws.NewAws(*config.RpcConfig.Object.Aws.Build())
|
||||
case "":
|
||||
o = disable.NewDisable()
|
||||
default:
|
||||
err = fmt.Errorf("invalid object enable: %s", enable)
|
||||
}
|
||||
@@ -106,7 +129,7 @@ func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryReg
|
||||
}
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
third.RegisterThirdServer(server, &thirdServer{
|
||||
thirdDatabase: controller.NewThirdDatabase(redis.NewThirdCache(rdb), logdb),
|
||||
thirdDatabase: controller.NewThirdDatabase(thirdCache, logdb),
|
||||
s3dataBase: controller.NewS3Database(rdb, o, s3db),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
config: config,
|
||||
|
||||
+13
-13
@@ -23,29 +23,27 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/internal/rpc/relation"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/redis"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/database/mgo"
|
||||
tablerelation "github.com/openimsdk/open-im-server/v3/pkg/common/storage/model"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/webhook"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/dbbuild"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcli"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/group"
|
||||
friendpb "github.com/openimsdk/protocol/relation"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/authverify"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/convert"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/servererrs"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/controller"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/sdkws"
|
||||
pbuser "github.com/openimsdk/protocol/user"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/pagination"
|
||||
registry "github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/grpc"
|
||||
@@ -57,7 +55,7 @@ type userServer struct {
|
||||
db controller.UserDatabase
|
||||
friendNotificationSender *relation.FriendNotificationSender
|
||||
userNotificationSender *UserNotificationSender
|
||||
RegisterCenter registry.SvcDiscoveryRegistry
|
||||
RegisterCenter discovery.Conn
|
||||
config *Config
|
||||
webhookClient *webhook.Client
|
||||
groupClient *rpcli.GroupClient
|
||||
@@ -76,15 +74,17 @@ type Config struct {
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client registry.SvcDiscoveryRegistry, server *grpc.Server) error {
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, config.MongodbConfig.Build())
|
||||
func Start(ctx context.Context, config *Config, client discovery.Conn, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := redisutil.NewRedisClient(ctx, config.RedisConfig.Build())
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
users := make([]*tablerelation.User, 0)
|
||||
|
||||
for _, v := range config.Share.IMAdminUserID {
|
||||
|
||||
@@ -1,47 +1,37 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/config"
|
||||
kdisc "github.com/openimsdk/open-im-server/v3/pkg/common/discovery"
|
||||
disetcd "github.com/openimsdk/open-im-server/v3/pkg/common/discovery/etcd"
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/mw"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/robfig/cron/v3"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type CronTaskConfig struct {
|
||||
type Config struct {
|
||||
CronTask config.CronTask
|
||||
Share config.Share
|
||||
Discovery config.Discovery
|
||||
|
||||
runTimeEnv string
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, conf *CronTaskConfig) error {
|
||||
conf.runTimeEnv = runtimeenv.PrintRuntimeEnvironment()
|
||||
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", conf.runTimeEnv, "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
||||
func Start(ctx context.Context, conf *Config, client discovery.Conn, service grpc.ServiceRegistrar) error {
|
||||
log.CInfo(ctx, "CRON-TASK server is initializing", "runTimeEnv", runtimeenv.RuntimeEnvironment(), "chatRecordsClearTime", conf.CronTask.CronExecuteTime, "msgDestructTime", conf.CronTask.RetainChatRecords)
|
||||
if conf.CronTask.RetainChatRecords < 1 {
|
||||
return errs.New("msg destruct time must be greater than 1").Wrap()
|
||||
log.ZInfo(ctx, "disable cron")
|
||||
<-ctx.Done()
|
||||
return nil
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(&conf.Discovery, conf.runTimeEnv, nil)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to register discovery service")
|
||||
}
|
||||
client.AddOption(mw.GrpcClient(), grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
ctx = mcontext.SetOpUserID(ctx, conf.Share.IMAdminUserID[0])
|
||||
|
||||
msgConn, err := client.GetConn(ctx, conf.Discovery.RpcService.Msg)
|
||||
@@ -88,13 +78,15 @@ func Start(ctx context.Context, conf *CronTaskConfig) error {
|
||||
}
|
||||
log.ZDebug(ctx, "start cron task", "CronExecuteTime", conf.CronTask.CronExecuteTime)
|
||||
srv.cron.Start()
|
||||
log.ZDebug(ctx, "cron task server is running")
|
||||
<-ctx.Done()
|
||||
log.ZDebug(ctx, "cron task server is shutting down")
|
||||
return nil
|
||||
}
|
||||
|
||||
type cronServer struct {
|
||||
ctx context.Context
|
||||
config *CronTaskConfig
|
||||
config *Config
|
||||
cron *cron.Cron
|
||||
msgClient msg.MsgClient
|
||||
conversationClient pbconversation.ConversationClient
|
||||
@@ -1,4 +1,4 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -24,7 +24,7 @@ func TestName(t *testing.T) {
|
||||
Address: []string{"localhost:12379"},
|
||||
},
|
||||
}
|
||||
client, err := kdisc.NewDiscoveryRegister(conf, "source")
|
||||
client, err := kdisc.NewDiscoveryRegister(conf, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/protocol/msg"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) deleteMsg() {
|
||||
@@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/protocol/third"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) clearS3() {
|
||||
@@ -1,12 +1,13 @@
|
||||
package tools
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
pbconversation "github.com/openimsdk/protocol/conversation"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (c *cronServer) clearUserMsg() {
|
||||
Reference in New Issue
Block a user