fix(driver): implement canceling and updating progress for putting for some drivers (#7847)
* fix(driver): additionally implement canceling and updating progress for putting for some drivers * refactor: add driver archive api into template * fix(123): use built-in MD5 to avoid caching full * . * fix build failed
This commit is contained in:
@ -255,10 +255,10 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传
|
||||
return d.UploadByOSS(¶ms, stream, up)
|
||||
return d.UploadByOSS(ctx, ¶ms, stream, up)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(¶ms, stream.GetSize(), stream, up)
|
||||
return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up)
|
||||
}
|
||||
|
||||
// 离线下载文件
|
||||
|
@ -2,6 +2,7 @@ package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@ -9,6 +10,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
@ -19,6 +21,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -417,7 +420,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -427,14 +430,20 @@ func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up dri
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.PutObject(params.Key, stream, OssOption(params)...)
|
||||
err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
}, OssOption(params)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
@ -444,7 +453,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -488,6 +497,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
completedNum := atomic.Int32{}
|
||||
// consumers
|
||||
for i := 0; i < ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
@ -500,6 +510,8 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break
|
||||
case <-ticker.C:
|
||||
errCh <- errors.Wrap(err, "ossToken 过期")
|
||||
default:
|
||||
@ -511,12 +523,18 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
||||
Reader: b,
|
||||
Ctx: ctx,
|
||||
}, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||
} else {
|
||||
num := completedNum.Add(1)
|
||||
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
@ -547,7 +565,7 @@ LOOP:
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
if filename := filepath.Base(s.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user