s3 minio, cos, oss support

This commit is contained in:
withchao
2023-07-10 15:50:57 +08:00
parent 4b8af3be2e
commit fea3c5358a
35 changed files with 2652 additions and 2113 deletions
+8
View File
@@ -0,0 +1,8 @@
package cont
const (
hashPath = "openim/data/hash/"
tempPath = "openim/temp/"
UploadTypeMultipart = 1 // 分片上传
UploadTypePresigned = 2 // 预签名上传
)
+233
View File
@@ -0,0 +1,233 @@
package cont
import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
"github.com/google/uuid"
"path"
"strings"
"time"
)
func New(impl s3.Interface) *Controller {
return &Controller{impl: impl}
}
type Controller struct {
impl s3.Interface
}
func (c *Controller) HashPath(md5 string) string {
return path.Join(hashPath, md5)
}
func (c *Controller) NowPath() string {
now := time.Now()
return path.Join(
fmt.Sprintf("%04d", now.Year()),
fmt.Sprintf("%02d", now.Month()),
fmt.Sprintf("%02d", now.Day()),
fmt.Sprintf("%02d", now.Hour()),
fmt.Sprintf("%02d", now.Minute()),
fmt.Sprintf("%02d", now.Second()),
)
}
func (c *Controller) UUID() string {
id := uuid.New()
return hex.EncodeToString(id[:])
}
func (c *Controller) PartSize(ctx context.Context, size int64) (int64, error) {
return c.impl.PartSize(ctx, size)
}
func (c *Controller) GetHashObject(ctx context.Context, hash string) (*s3.ObjectInfo, error) {
return c.impl.StatObject(ctx, c.HashPath(hash))
}
func (c *Controller) InitiateUpload(ctx context.Context, hash string, size int64, expire time.Duration, maxParts int) (*InitiateUploadResult, error) {
if size < 0 {
return nil, errors.New("invalid size")
}
if hashBytes, err := hex.DecodeString(hash); err != nil {
return nil, err
} else if len(hashBytes) != md5.Size {
return nil, errors.New("invalid md5")
}
partSize, err := c.impl.PartSize(ctx, size)
if err != nil {
return nil, err
}
partNumber := int(size / partSize)
if size%partSize > 0 {
partNumber++
}
if maxParts > 0 && partNumber > 0 && partNumber < maxParts {
return nil, errors.New(fmt.Sprintf("too many parts: %d", partNumber))
}
if info, err := c.impl.StatObject(ctx, c.HashPath(hash)); err == nil {
return nil, &HashAlreadyExistsError{Object: info}
} else if !c.impl.IsNotFound(err) {
return nil, err
}
if size <= partSize {
// 预签名上传
key := path.Join(tempPath, c.NowPath(), fmt.Sprintf("%s_%d_%s.presigned", hash, size, c.UUID()))
rawURL, err := c.impl.PresignedPutObject(ctx, key, expire)
if err != nil {
return nil, err
}
return &InitiateUploadResult{
UploadID: newMultipartUploadID(multipartUploadID{
Type: UploadTypePresigned,
ID: "",
Key: key,
Size: size,
Hash: hash,
}),
PartSize: partSize,
Sign: &s3.AuthSignResult{
Parts: []s3.SignPart{
{
PartNumber: 1,
URL: rawURL,
},
},
},
}, nil
} else {
// 分片上传
upload, err := c.impl.InitiateMultipartUpload(ctx, c.HashPath(hash))
if err != nil {
return nil, err
}
if maxParts < 0 {
maxParts = partNumber
}
var authSign *s3.AuthSignResult
if maxParts > 0 {
partNumbers := make([]int, partNumber)
for i := 0; i < maxParts; i++ {
partNumbers[i] = i + 1
}
authSign, err = c.impl.AuthSign(ctx, upload.UploadID, upload.Key, time.Hour*24, partNumbers)
if err != nil {
return nil, err
}
}
return &InitiateUploadResult{
UploadID: newMultipartUploadID(multipartUploadID{
Type: UploadTypeMultipart,
ID: upload.UploadID,
Key: upload.Key,
Size: size,
Hash: hash,
}),
PartSize: partSize,
Sign: authSign,
}, nil
}
}
func (c *Controller) CompleteUpload(ctx context.Context, uploadID string, partHashs []string) (*UploadResult, error) {
upload, err := parseMultipartUploadID(uploadID)
if err != nil {
return nil, err
}
if md5Sum := md5.Sum([]byte(strings.Join(partHashs, ","))); hex.EncodeToString(md5Sum[:]) != upload.Hash {
fmt.Println("CompleteUpload sum:", hex.EncodeToString(md5Sum[:]), "upload hash:", upload.Hash)
return nil, errors.New("md5 mismatching")
}
if info, err := c.impl.StatObject(ctx, c.HashPath(upload.Hash)); err == nil {
return &UploadResult{
Key: info.Key,
Size: info.Size,
Hash: info.ETag,
}, nil
} else if !c.impl.IsNotFound(err) {
return nil, err
}
cleanObject := make(map[string]struct{})
defer func() {
for key := range cleanObject {
_ = c.impl.DeleteObject(ctx, key)
}
}()
var targetKey string
switch upload.Type {
case UploadTypeMultipart:
parts := make([]s3.Part, len(partHashs))
for i, part := range partHashs {
parts[i] = s3.Part{
PartNumber: i + 1,
ETag: part,
}
}
// todo: 验证大小
result, err := c.impl.CompleteMultipartUpload(ctx, upload.ID, upload.Key, parts)
if err != nil {
return nil, err
}
targetKey = result.Key
case UploadTypePresigned:
uploadInfo, err := c.impl.StatObject(ctx, upload.Key)
if err != nil {
return nil, err
}
cleanObject[uploadInfo.Key] = struct{}{}
if uploadInfo.Size != upload.Size {
return nil, errors.New("upload size mismatching")
}
if uploadInfo.ETag != upload.Hash {
return nil, errors.New("upload md5 mismatching")
}
// 防止在这个时候,并发操作,导致文件被覆盖
copyInfo, err := c.impl.CopyObject(ctx, targetKey, upload.Key+"."+c.UUID())
if err != nil {
return nil, err
}
cleanObject[copyInfo.Key] = struct{}{}
if copyInfo.ETag != upload.Hash {
return nil, errors.New("copy md5 mismatching")
}
if _, err := c.impl.CopyObject(ctx, copyInfo.Key, c.HashPath(upload.Hash)); err != nil {
return nil, err
}
targetKey = copyInfo.Key
default:
return nil, errors.New("invalid upload id type")
}
return &UploadResult{
Key: targetKey,
Size: upload.Size,
Hash: upload.Hash,
}, nil
}
func (c *Controller) AuthSign(ctx context.Context, uploadID string, partNumbers []int) (*s3.AuthSignResult, error) {
upload, err := parseMultipartUploadID(uploadID)
if err != nil {
return nil, err
}
switch upload.Type {
case UploadTypeMultipart:
return c.impl.AuthSign(ctx, upload.ID, upload.Key, time.Hour*24, partNumbers)
case UploadTypePresigned:
return nil, errors.New("presigned id not support auth sign")
default:
return nil, errors.New("invalid upload id type")
}
}
func (c *Controller) IsNotFound(err error) bool {
return c.impl.IsNotFound(err)
}
func (c *Controller) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
return c.impl.AccessURL(ctx, name, expire, opt)
}
+14
View File
@@ -0,0 +1,14 @@
package cont
import (
"fmt"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
)
type HashAlreadyExistsError struct {
Object *s3.ObjectInfo
}
func (e *HashAlreadyExistsError) Error() string {
return fmt.Sprintf("hash already exists: %s", e.Object.Key)
}
+35
View File
@@ -0,0 +1,35 @@
package cont
import (
"encoding/base64"
"encoding/json"
"fmt"
)
type multipartUploadID struct {
Type int `json:"a,omitempty"`
ID string `json:"b,omitempty"`
Key string `json:"c,omitempty"`
Size int64 `json:"d,omitempty"`
Hash string `json:"e,omitempty"`
}
func newMultipartUploadID(id multipartUploadID) string {
data, err := json.Marshal(id)
if err != nil {
panic(err)
}
return base64.StdEncoding.EncodeToString(data)
}
func parseMultipartUploadID(id string) (*multipartUploadID, error) {
data, err := base64.StdEncoding.DecodeString(id)
if err != nil {
return nil, fmt.Errorf("invalid multipart upload id: %w", err)
}
var upload multipartUploadID
if err := json.Unmarshal(data, &upload); err != nil {
return nil, fmt.Errorf("invalid multipart upload id: %w", err)
}
return &upload, nil
}
+15
View File
@@ -0,0 +1,15 @@
package cont
import "github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
type InitiateUploadResult struct {
UploadID string `json:"uploadID"` // 上传ID
PartSize int64 `json:"partSize"` // 分片大小
Sign *s3.AuthSignResult `json:"sign"` // 分片信息
}
type UploadResult struct {
Hash string `json:"hash"`
Size int64 `json:"size"`
Key string `json:"key"`
}
+263
View File
@@ -0,0 +1,263 @@
package cos
import (
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/config"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
"github.com/tencentyun/cos-go-sdk-v5"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
minPartSize = 1024 * 1024 * 1 // 1MB
maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB
maxNumSize = 1000
)
func NewCos() (s3.Interface, error) {
conf := config.Config.Object.Cos
u, err := url.Parse(conf.BucketURL)
if err != nil {
panic(err)
}
client := cos.NewClient(&cos.BaseURL{BucketURL: u}, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: conf.SecretID,
SecretKey: conf.SecretKey,
SessionToken: conf.SessionToken,
},
})
return &Cos{
copyURL: u.Host + "/",
client: client,
credential: client.GetCredential(),
}, nil
}
type Cos struct {
copyURL string
client *cos.Client
credential *cos.Credential
}
func (c *Cos) Engine() string {
return "tencent-cos"
}
func (c *Cos) InitiateMultipartUpload(ctx context.Context, name string) (*s3.InitiateMultipartUploadResult, error) {
result, _, err := c.client.Object.InitiateMultipartUpload(ctx, name, nil)
if err != nil {
return nil, err
}
return &s3.InitiateMultipartUploadResult{
UploadID: result.UploadID,
Bucket: result.Bucket,
Key: result.Key,
}, nil
}
func (c *Cos) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
opts := &cos.CompleteMultipartUploadOptions{
Parts: make([]cos.Object, len(parts)),
}
for i, part := range parts {
opts.Parts[i] = cos.Object{
PartNumber: part.PartNumber,
ETag: strings.ToLower(part.ETag),
}
}
result, _, err := c.client.Object.CompleteMultipartUpload(ctx, name, uploadID, opts)
if err != nil {
return nil, err
}
return &s3.CompleteMultipartUploadResult{
Location: result.Location,
Bucket: result.Bucket,
Key: result.Key,
ETag: result.ETag,
}, nil
}
func (c *Cos) PartSize(ctx context.Context, size int64) (int64, error) {
if size <= 0 {
return 0, errors.New("size must be greater than 0")
}
if size > maxPartSize*maxNumSize {
return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize)
}
if size <= minPartSize*maxNumSize {
return minPartSize, nil
}
partSize := size / maxNumSize
if size%maxNumSize != 0 {
partSize++
}
return partSize, nil
}
func (c *Cos) ClientUploadPart(ctx context.Context, partSize int64, expire time.Duration, upload *s3.InitiateMultipartUploadResult) (*s3.MultipartUploadRequest, error) {
uri := c.client.BaseURL.BucketURL.String() + "/" + cos.EncodeURIComponent(upload.Key)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, uri, nil)
if err != nil {
return nil, err
}
cos.AddAuthorizationHeader(c.credential.SecretID, c.credential.SecretKey, c.credential.SessionToken, req, cos.NewAuthTime(expire))
return &s3.MultipartUploadRequest{
UploadID: upload.UploadID,
Bucket: upload.Bucket,
Key: upload.Key,
Method: req.Method,
URL: uri,
Query: url.Values{"uploadId": {upload.UploadID}},
Header: req.Header,
PartKey: "partNumber",
PartSize: partSize,
FirstPart: 1,
}, nil
}
func (c *Cos) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
result := s3.AuthSignResult{
URL: c.client.BaseURL.BucketURL.String() + "/" + cos.EncodeURIComponent(name),
Query: url.Values{"uploadId": {uploadID}},
Header: make(http.Header),
Parts: make([]s3.SignPart, len(partNumbers)),
}
req, err := http.NewRequestWithContext(ctx, http.MethodPut, result.URL, nil)
if err != nil {
return nil, err
}
cos.AddAuthorizationHeader(c.credential.SecretID, c.credential.SecretKey, c.credential.SessionToken, req, cos.NewAuthTime(expire))
result.Header = req.Header
for i, partNumber := range partNumbers {
result.Parts[i] = s3.SignPart{
PartNumber: partNumber,
Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}},
}
}
return &result, nil
}
func (c *Cos) PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error) {
rawURL, err := c.client.Object.GetPresignedURL(ctx, http.MethodPut, name, c.credential.SecretID, c.credential.SecretKey, expire, nil)
if err != nil {
return "", err
}
return rawURL.String(), nil
}
func (c *Cos) DeleteObject(ctx context.Context, name string) error {
_, err := c.client.Object.Delete(ctx, name)
return err
}
func (c *Cos) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
info, err := c.client.Object.Head(ctx, name, nil)
if err != nil {
return nil, err
}
res := &s3.ObjectInfo{Key: name}
if res.ETag = strings.ToLower(strings.ReplaceAll(info.Header.Get("ETag"), `"`, "")); res.ETag == "" {
return nil, errors.New("StatObject etag not found")
}
if contentLengthStr := info.Header.Get("Content-Length"); contentLengthStr == "" {
return nil, errors.New("StatObject content-length not found")
} else {
res.Size, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("StatObject content-length parse error: %w", err)
}
if res.Size < 0 {
return nil, errors.New("StatObject content-length must be greater than 0")
}
}
if lastModified := info.Header.Get("Last-Modified"); lastModified == "" {
return nil, errors.New("StatObject last-modified not found")
} else {
res.LastModified, err = time.Parse(http.TimeFormat, lastModified)
if err != nil {
return nil, fmt.Errorf("StatObject last-modified parse error: %w", err)
}
}
return res, nil
}
func (c *Cos) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
result, _, err := c.client.Object.Copy(ctx, src, c.copyURL+dst, &cos.ObjectCopyOptions{})
if err != nil {
return nil, err
}
return &s3.CopyObjectInfo{
Key: dst,
ETag: result.ETag,
}, nil
}
func (c *Cos) IsNotFound(err error) bool {
switch e := err.(type) {
case *cos.ErrorResponse:
return e.Response.StatusCode == http.StatusNotFound || e.Code == "NoSuchKey"
default:
return false
}
}
func (c *Cos) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
_, err := c.client.Object.AbortMultipartUpload(ctx, name, uploadID)
return err
}
func (c *Cos) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
result, _, err := c.client.Object.ListParts(ctx, name, uploadID, &cos.ObjectListPartsOptions{
MaxParts: strconv.Itoa(maxParts),
PartNumberMarker: strconv.Itoa(partNumberMarker),
})
if err != nil {
return nil, err
}
res := &s3.ListUploadedPartsResult{
Key: result.Key,
UploadID: result.UploadID,
UploadedParts: make([]s3.UploadedPart, len(result.Parts)),
}
res.MaxParts, _ = strconv.Atoi(result.MaxParts)
res.NextPartNumberMarker, _ = strconv.Atoi(result.NextPartNumberMarker)
for i, part := range result.Parts {
lastModified, _ := time.Parse(http.TimeFormat, part.LastModified)
res.UploadedParts[i] = s3.UploadedPart{
PartNumber: part.PartNumber,
LastModified: lastModified,
ETag: part.ETag,
Size: part.Size,
}
}
return res, nil
}
func (c *Cos) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
reqParams := make(url.Values)
if opt != nil {
if opt.ContentType != "" {
reqParams.Set("Content-Type", opt.ContentType)
}
if opt.ContentDisposition != "" {
reqParams.Set("Content-Disposition", opt.ContentDisposition)
}
}
if expire <= 0 {
expire = time.Hour * 24 * 365 * 99 // 99 years
} else if expire < time.Second {
expire = time.Second
}
rawURL, err := c.client.Object.GetPresignedURL(ctx, http.MethodGet, name, c.credential.SecretID, c.credential.SecretKey, expire, reqParams)
if err != nil {
return "", err
}
return rawURL.String(), nil
}
+242
View File
@@ -0,0 +1,242 @@
package minio
import (
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/config"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/signer"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
unsignedPayload = "UNSIGNED-PAYLOAD"
)
const (
minPartSize = 1024 * 1024 * 5 // 1MB
maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB
maxNumSize = 10000
)
func NewMinio() (s3.Interface, error) {
conf := config.Config.Object.Minio
u, err := url.Parse(conf.Endpoint)
if err != nil {
return nil, err
}
opts := &minio.Options{
Creds: credentials.NewStaticV4(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
Secure: u.Scheme == "https",
}
client, err := minio.New(u.Host, opts)
if err != nil {
return nil, err
}
return &Minio{
bucket: conf.Bucket,
bucketURL: conf.Endpoint + "/" + conf.Bucket + "/",
opts: opts,
core: &minio.Core{Client: client},
}, nil
}
type Minio struct {
bucket string
bucketURL string
opts *minio.Options
core *minio.Core
}
func (m *Minio) Engine() string {
return "minio"
}
func (m *Minio) InitiateMultipartUpload(ctx context.Context, name string) (*s3.InitiateMultipartUploadResult, error) {
uploadID, err := m.core.NewMultipartUpload(ctx, m.bucket, name, minio.PutObjectOptions{})
if err != nil {
return nil, err
}
return &s3.InitiateMultipartUploadResult{
Bucket: m.bucket,
Key: name,
UploadID: uploadID,
}, nil
}
func (m *Minio) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
minioParts := make([]minio.CompletePart, len(parts))
for i, part := range parts {
minioParts[i] = minio.CompletePart{
PartNumber: part.PartNumber,
ETag: strings.ToLower(part.ETag),
}
}
upload, err := m.core.CompleteMultipartUpload(ctx, m.bucket, name, uploadID, minioParts, minio.PutObjectOptions{})
if err != nil {
return nil, err
}
return &s3.CompleteMultipartUploadResult{
Location: upload.Location,
Bucket: upload.Bucket,
Key: upload.Key,
ETag: strings.ToLower(upload.ETag),
}, nil
}
func (m *Minio) PartSize(ctx context.Context, size int64) (int64, error) {
if size <= 0 {
return 0, errors.New("size must be greater than 0")
}
if size > maxPartSize*maxNumSize {
return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize)
}
if size <= minPartSize*maxNumSize {
return minPartSize, nil
}
partSize := size / maxNumSize
if size%maxNumSize != 0 {
partSize++
}
return partSize, nil
}
func (m *Minio) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
creds, err := m.opts.Creds.Get()
if err != nil {
return nil, err
}
result := s3.AuthSignResult{
URL: m.bucketURL + name,
Query: url.Values{"uploadId": {uploadID}},
Parts: make([]s3.SignPart, len(partNumbers)),
}
for i, partNumber := range partNumbers {
rawURL := result.URL + "?partNumber=" + strconv.Itoa(partNumber) + "&uploadId=" + uploadID
request, err := http.NewRequestWithContext(ctx, http.MethodPut, rawURL, nil)
if err != nil {
return nil, err
}
request.Header.Set("X-Amz-Content-Sha256", unsignedPayload)
request = signer.SignV4Trailer(*request, creds.AccessKeyID, creds.SecretAccessKey, creds.SessionToken, "us-east-1", nil)
result.Parts[i] = s3.SignPart{
PartNumber: partNumber,
URL: request.URL.String(),
Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}},
Header: request.Header,
}
}
return &result, nil
}
func (m *Minio) PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error) {
rawURL, err := m.core.Client.PresignedPutObject(ctx, m.bucket, name, expire)
if err != nil {
return "", err
}
return rawURL.String(), nil
}
func (m *Minio) DeleteObject(ctx context.Context, name string) error {
return m.core.Client.RemoveObject(ctx, m.bucket, name, minio.RemoveObjectOptions{})
}
func (m *Minio) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
info, err := m.core.Client.StatObject(ctx, m.bucket, name, minio.StatObjectOptions{})
if err != nil {
return nil, err
}
return &s3.ObjectInfo{
ETag: strings.ToLower(info.ETag),
Key: info.Key,
Size: info.Size,
LastModified: info.LastModified,
}, nil
}
func (m *Minio) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
result, err := m.core.Client.CopyObject(ctx, minio.CopyDestOptions{
Bucket: m.bucket,
Object: dst,
}, minio.CopySrcOptions{
Bucket: m.bucket,
Object: src,
})
if err != nil {
return nil, err
}
return &s3.CopyObjectInfo{
Key: dst,
ETag: strings.ToLower(result.ETag),
}, nil
}
func (m *Minio) IsNotFound(err error) bool {
if err == nil {
return false
}
switch e := err.(type) {
case minio.ErrorResponse:
return e.StatusCode == http.StatusNotFound || e.Code == "NoSuchKey"
case *minio.ErrorResponse:
return e.StatusCode == http.StatusNotFound || e.Code == "NoSuchKey"
default:
return false
}
}
func (m *Minio) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
return m.core.AbortMultipartUpload(ctx, m.bucket, name, uploadID)
}
func (m *Minio) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
result, err := m.core.ListObjectParts(ctx, m.bucket, name, uploadID, partNumberMarker, maxParts)
if err != nil {
return nil, err
}
res := &s3.ListUploadedPartsResult{
Key: result.Key,
UploadID: result.UploadID,
MaxParts: result.MaxParts,
NextPartNumberMarker: result.NextPartNumberMarker,
UploadedParts: make([]s3.UploadedPart, len(result.ObjectParts)),
}
for i, part := range result.ObjectParts {
res.UploadedParts[i] = s3.UploadedPart{
PartNumber: part.PartNumber,
LastModified: part.LastModified,
ETag: part.ETag,
Size: part.Size,
}
}
return res, nil
}
func (m *Minio) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
reqParams := make(url.Values)
if opt != nil {
if opt.ContentType != "" {
reqParams.Set("Content-Type", opt.ContentType)
}
if opt.ContentDisposition != "" {
reqParams.Set("Content-Disposition", opt.ContentDisposition)
}
}
if expire <= 0 {
expire = time.Hour * 24 * 365 * 99 // 99 years
} else if expire < time.Second {
expire = time.Second
}
u, err := m.core.Client.PresignedGetObject(ctx, m.bucket, name, expire, reqParams)
if err != nil {
return "", err
}
return u.String(), nil
}
+251
View File
@@ -0,0 +1,251 @@
package oss
import (
"context"
"errors"
"fmt"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/config"
"github.com/OpenIMSDK/Open-IM-Server/pkg/common/db/s3"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"net/http"
"net/url"
"strconv"
"strings"
"time"
)
const (
minPartSize = 1024 * 1024 * 1 // 1MB
maxPartSize = 1024 * 1024 * 1024 * 5 // 5GB
maxNumSize = 10000
)
func NewOSS() (s3.Interface, error) {
conf := config.Config.Object.Oss
if conf.BucketURL == "" {
return nil, errors.New("bucket url is empty")
}
client, err := oss.New(conf.Endpoint, conf.AccessKeyID, conf.AccessKeySecret)
if err != nil {
return nil, err
}
bucket, err := client.Bucket(conf.Bucket)
if err != nil {
return nil, err
}
if conf.BucketURL[len(conf.BucketURL)-1] != '/' {
conf.BucketURL += "/"
}
return &OSS{
bucketURL: conf.BucketURL,
bucket: bucket,
credentials: client.Config.GetCredentials(),
}, nil
}
type OSS struct {
bucketURL string
bucket *oss.Bucket
credentials oss.Credentials
}
func (o *OSS) Engine() string {
return "ali-oss"
}
func (o *OSS) InitiateMultipartUpload(ctx context.Context, name string) (*s3.InitiateMultipartUploadResult, error) {
result, err := o.bucket.InitiateMultipartUpload(name)
if err != nil {
return nil, err
}
return &s3.InitiateMultipartUploadResult{
UploadID: result.UploadID,
Bucket: result.Bucket,
Key: result.Key,
}, nil
}
func (o *OSS) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
ossParts := make([]oss.UploadPart, len(parts))
for i, part := range parts {
ossParts[i] = oss.UploadPart{
PartNumber: part.PartNumber,
ETag: strings.ToUpper(part.ETag),
}
}
result, err := o.bucket.CompleteMultipartUpload(oss.InitiateMultipartUploadResult{
UploadID: uploadID,
Bucket: o.bucket.BucketName,
Key: name,
}, ossParts)
if err != nil {
return nil, err
}
return &s3.CompleteMultipartUploadResult{
Location: result.Location,
Bucket: result.Bucket,
Key: result.Key,
ETag: strings.ToLower(strings.ReplaceAll(result.ETag, `"`, ``)),
}, nil
}
func (o *OSS) PartSize(ctx context.Context, size int64) (int64, error) {
if size <= 0 {
return 0, errors.New("size must be greater than 0")
}
if size > maxPartSize*maxNumSize {
return 0, fmt.Errorf("size must be less than %db", maxPartSize*maxNumSize)
}
if size <= minPartSize*maxNumSize {
return minPartSize, nil
}
partSize := size / maxNumSize
if size%maxNumSize != 0 {
partSize++
}
return partSize, nil
}
func (o *OSS) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
result := s3.AuthSignResult{
URL: o.bucketURL + name,
Query: url.Values{"uploadId": {uploadID}},
Header: make(http.Header),
Parts: make([]s3.SignPart, len(partNumbers)),
}
for i, partNumber := range partNumbers {
rawURL := fmt.Sprintf(`%s%s?partNumber=%d&uploadId=%s`, o.bucketURL, name, partNumber, uploadID)
request, err := http.NewRequestWithContext(ctx, http.MethodPut, rawURL, nil)
if err != nil {
return nil, err
}
if o.credentials.GetSecurityToken() != "" {
request.Header.Set(oss.HTTPHeaderOssSecurityToken, o.credentials.GetSecurityToken())
}
request.Header.Set(oss.HTTPHeaderHost, request.Host)
request.Header.Set(oss.HTTPHeaderDate, time.Now().UTC().Format(http.TimeFormat))
authorization := fmt.Sprintf(`OSS %s:%s`, o.credentials.GetAccessKeyID(), o.getSignedStr(request, fmt.Sprintf(`/%s/%s?partNumber=%d&uploadId=%s`, o.bucket.BucketName, name, partNumber, uploadID), o.credentials.GetAccessKeySecret()))
request.Header.Set(oss.HTTPHeaderAuthorization, authorization)
result.Parts[i] = s3.SignPart{
PartNumber: partNumber,
Query: url.Values{"partNumber": {strconv.Itoa(partNumber)}},
URL: request.URL.String(),
Header: request.Header,
}
}
return &result, nil
}
func (o *OSS) PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error) {
return o.bucket.SignURL(name, http.MethodPut, int64(expire/time.Second))
}
func (o *OSS) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
header, err := o.bucket.GetObjectMeta(name)
if err != nil {
return nil, err
}
res := &s3.ObjectInfo{Key: name}
if res.ETag = strings.ToLower(strings.ReplaceAll(header.Get("ETag"), `"`, ``)); res.ETag == "" {
return nil, errors.New("StatObject etag not found")
}
if contentLengthStr := header.Get("Content-Length"); contentLengthStr == "" {
return nil, errors.New("StatObject content-length not found")
} else {
res.Size, err = strconv.ParseInt(contentLengthStr, 10, 64)
if err != nil {
return nil, fmt.Errorf("StatObject content-length parse error: %w", err)
}
if res.Size < 0 {
return nil, errors.New("StatObject content-length must be greater than 0")
}
}
if lastModified := header.Get("Last-Modified"); lastModified == "" {
return nil, errors.New("StatObject last-modified not found")
} else {
res.LastModified, err = time.Parse(http.TimeFormat, lastModified)
if err != nil {
return nil, fmt.Errorf("StatObject last-modified parse error: %w", err)
}
}
return res, nil
}
func (o *OSS) DeleteObject(ctx context.Context, name string) error {
return o.bucket.DeleteObject(name)
}
func (o *OSS) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
result, err := o.bucket.CopyObject(src, dst)
if err != nil {
return nil, err
}
return &s3.CopyObjectInfo{
Key: dst,
ETag: strings.ToLower(strings.ReplaceAll(result.ETag, `"`, ``)),
}, nil
}
func (o *OSS) IsNotFound(err error) bool {
switch e := err.(type) {
case oss.ServiceError:
return e.StatusCode == http.StatusNotFound || e.Code == "NoSuchKey"
case *oss.ServiceError:
return e.StatusCode == http.StatusNotFound || e.Code == "NoSuchKey"
default:
return false
}
}
func (o *OSS) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
return o.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{
UploadID: uploadID,
Key: name,
Bucket: o.bucket.BucketName,
})
}
func (o *OSS) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
result, err := o.bucket.ListUploadedParts(oss.InitiateMultipartUploadResult{
UploadID: uploadID,
Key: name,
Bucket: o.bucket.BucketName,
}, oss.MaxUploads(100), oss.MaxParts(maxParts), oss.PartNumberMarker(partNumberMarker))
if err != nil {
return nil, err
}
res := &s3.ListUploadedPartsResult{
Key: result.Key,
UploadID: result.UploadID,
MaxParts: result.MaxParts,
UploadedParts: make([]s3.UploadedPart, len(result.UploadedParts)),
}
res.NextPartNumberMarker, _ = strconv.Atoi(result.NextPartNumberMarker)
for i, part := range result.UploadedParts {
res.UploadedParts[i] = s3.UploadedPart{
PartNumber: part.PartNumber,
LastModified: part.LastModified,
ETag: part.ETag,
Size: int64(part.Size),
}
}
return res, nil
}
func (o *OSS) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
var opts []oss.Option
if opt != nil {
if opt.ContentType != "" {
opts = append(opts, oss.ContentType(opt.ContentType))
}
if opt.ContentDisposition != "" {
opts = append(opts, oss.ContentDisposition(opt.ContentDisposition))
}
}
if expire <= 0 {
expire = time.Hour * 24 * 365 * 99 // 99 years
} else if expire < time.Second {
expire = time.Second
}
return o.bucket.SignURL(name, http.MethodGet, int64(expire/time.Second), opts...)
}
+81
View File
@@ -0,0 +1,81 @@
package oss
import (
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"hash"
"io"
"net/http"
"sort"
"strings"
)
func (o *OSS) getAdditionalHeaderKeys(req *http.Request) ([]string, map[string]string) {
var keysList []string
keysMap := make(map[string]string)
srcKeys := make(map[string]string)
for k := range req.Header {
srcKeys[strings.ToLower(k)] = ""
}
for _, v := range o.bucket.Client.Config.AdditionalHeaders {
if _, ok := srcKeys[strings.ToLower(v)]; ok {
keysMap[strings.ToLower(v)] = ""
}
}
for k := range keysMap {
keysList = append(keysList, k)
}
sort.Strings(keysList)
return keysList, keysMap
}
func (o *OSS) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string {
// Find out the "x-oss-"'s address in header of the request
ossHeadersMap := make(map[string]string)
additionalList, additionalMap := o.getAdditionalHeaderKeys(req)
for k, v := range req.Header {
if strings.HasPrefix(strings.ToLower(k), "x-oss-") {
ossHeadersMap[strings.ToLower(k)] = v[0]
} else if o.bucket.Client.Config.AuthVersion == oss.AuthV2 {
if _, ok := additionalMap[strings.ToLower(k)]; ok {
ossHeadersMap[strings.ToLower(k)] = v[0]
}
}
}
hs := newHeaderSorter(ossHeadersMap)
// Sort the ossHeadersMap by the ascending order
hs.Sort()
// Get the canonicalizedOSSHeaders
canonicalizedOSSHeaders := ""
for i := range hs.Keys {
canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n"
}
// Give other parameters values
// when sign URL, date is expires
date := req.Header.Get(oss.HTTPHeaderDate)
contentType := req.Header.Get(oss.HTTPHeaderContentType)
contentMd5 := req.Header.Get(oss.HTTPHeaderContentMD5)
// default is v1 signature
signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource
h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret))
// v2 signature
if o.bucket.Client.Config.AuthVersion == oss.AuthV2 {
signStr = req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + strings.Join(additionalList, ";") + "\n" + canonicalizedResource
h = hmac.New(func() hash.Hash { return sha256.New() }, []byte(keySecret))
}
_, _ = io.WriteString(h, signStr)
signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil))
return signedStr
}
+47
View File
@@ -0,0 +1,47 @@
package oss
import (
"bytes"
"sort"
)
// headerSorter defines the key-value structure for storing the sorted data in signHeader.
type headerSorter struct {
Keys []string
Vals []string
}
// newHeaderSorter is an additional function for function SignHeader.
func newHeaderSorter(m map[string]string) *headerSorter {
hs := &headerSorter{
Keys: make([]string, 0, len(m)),
Vals: make([]string, 0, len(m)),
}
for k, v := range m {
hs.Keys = append(hs.Keys, k)
hs.Vals = append(hs.Vals, v)
}
return hs
}
// Sort is an additional function for function SignHeader.
func (hs *headerSorter) Sort() {
sort.Sort(hs)
}
// Len is an additional function for function SignHeader.
func (hs *headerSorter) Len() int {
return len(hs.Vals)
}
// Less is an additional function for function SignHeader.
func (hs *headerSorter) Less(i, j int) bool {
return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0
}
// Swap is an additional function for function SignHeader.
func (hs *headerSorter) Swap(i, j int) {
hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i]
hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i]
}
+127
View File
@@ -0,0 +1,127 @@
package s3
import (
"context"
"net/http"
"net/url"
"time"
)
type InitiateMultipartUploadResult struct {
Bucket string `json:"bucket"`
Key string `json:"key"`
UploadID string `json:"uploadID"`
}
type MultipartUploadRequest struct {
UploadID string `json:"uploadId"`
Bucket string `json:"bucket"`
Key string `json:"key"`
Method string `json:"method"`
URL string `json:"url"`
Query url.Values `json:"query"`
Header http.Header `json:"header"`
PartKey string `json:"partKey"`
PartSize int64 `json:"partSize"`
FirstPart int `json:"firstPart"`
}
type Part struct {
PartNumber int `json:"partNumber"`
ETag string `json:"etag"`
}
type CompleteMultipartUploadResult struct {
Location string `json:"location"`
Bucket string `json:"bucket"`
Key string `json:"key"`
ETag string `json:"etag"`
}
type SignResult struct {
Parts []SignPart `json:"parts"`
}
type ObjectInfo struct {
ETag string `json:"etag"`
Key string `json:"name"`
Size int64 `json:"size"`
LastModified time.Time `json:"lastModified"`
}
type CopyObjectInfo struct {
Key string `json:"name"`
ETag string `json:"etag"`
}
type SignPart struct {
PartNumber int `json:"partNumber"`
URL string `json:"url"`
Query url.Values `json:"query"`
Header http.Header `json:"header"`
}
type AuthSignResult struct {
URL string `json:"url"`
Query url.Values `json:"query"`
Header http.Header `json:"header"`
Parts []SignPart `json:"parts"`
}
type InitiateUpload struct {
UploadID string `json:"uploadId"`
Bucket string `json:"bucket"`
Key string `json:"key"`
Method string `json:"method"`
URL string `json:"url"`
Query url.Values `json:"query"`
Header http.Header `json:"header"`
PartKey string `json:"partKey"`
PartSize int64 `json:"partSize"`
FirstPart int `json:"firstPart"`
}
type UploadedPart struct {
PartNumber int `json:"partNumber"`
LastModified time.Time `json:"lastModified"`
ETag string `json:"etag"`
Size int64 `json:"size"`
}
type ListUploadedPartsResult struct {
Key string `xml:"Key"`
UploadID string `xml:"UploadId"`
NextPartNumberMarker int `xml:"NextPartNumberMarker"`
MaxParts int `xml:"MaxParts"`
UploadedParts []UploadedPart `xml:"Part"`
}
type AccessURLOption struct {
ContentType string `json:"contentType"`
ContentDisposition string `json:"contentDisposition"`
}
type Interface interface {
Engine() string
InitiateMultipartUpload(ctx context.Context, name string) (*InitiateMultipartUploadResult, error)
CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []Part) (*CompleteMultipartUploadResult, error)
PartSize(ctx context.Context, size int64) (int64, error)
AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*AuthSignResult, error)
PresignedPutObject(ctx context.Context, name string, expire time.Duration) (string, error)
DeleteObject(ctx context.Context, name string) error
CopyObject(ctx context.Context, src string, dst string) (*CopyObjectInfo, error)
StatObject(ctx context.Context, name string) (*ObjectInfo, error)
IsNotFound(err error) bool
AbortMultipartUpload(ctx context.Context, uploadID string, name string) error
ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*ListUploadedPartsResult, error)
AccessURL(ctx context.Context, name string, expire time.Duration, opt *AccessURLOption) (string, error)
}