fix(driver): implement canceling and updating progress for putting for some drivers (#7847)

* fix(driver): additionally implement canceling and updating progress for putting for some drivers

* refactor: add driver archive api into template

* fix(123): use built-in MD5 to avoid caching full

* .

* fix build failed
This commit is contained in:
KirCute_ECT
2025-02-01 17:29:55 +08:00
committed by GitHub
parent b9f397d29f
commit 779c293f04
35 changed files with 457 additions and 256 deletions

View File

@ -8,6 +8,7 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws"
@ -363,16 +364,16 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
hi := file.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
if len(gcid) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
tFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
if err != nil {
return err
}
@ -384,8 +385,8 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
r.SetBody(&base.Json{
"kind": FILE,
"parent_id": dstDir.GetID(),
"name": stream.GetName(),
"size": stream.GetSize(),
"name": file.GetName(),
"size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
})
@ -406,14 +407,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
return err
}
uploader := s3manager.NewUploader(s)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
Body: stream,
Body: &stream.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
},
})
return err
}