feat: misc improvements about upload/copy/hash (#5045)

general: add createTime/updateTime support in webdav and some drivers
general: add hash support in some drivers
general: cross-storage rapid-upload support
general: enhance upload to avoid local temp file if possible
general: replace readseekcloser with File interface to speed upstream operations
feat(aliyun_open): same as above
feat(crypt): add hack for 139cloud

Close #4934 
Close #4819 

baidu_netdisk needs to improve the upload code to support rapid-upload
This commit is contained in:
Sean
2023-08-27 21:14:23 +08:00
committed by GitHub
parent 9b765ef696
commit a3748af772
77 changed files with 1731 additions and 615 deletions

View File

@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
obj := stream.GetOld()
obj := stream.GetExist()
var (
e Error
url string
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
putUrl := res.Header().Get("location")
if stream.GetSize() < d.ChunkSize*1024*1024 {
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)

View File

@ -5,7 +5,7 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"github.com/alist-org/alist/v3/pkg/http_range"
"io/ioutil"
"net/http"
"os"
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
var finish int64 = 0
for finish < stream.GetSize() {
var offset int64 = 0
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - finish
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil {
return err
}
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
if err != nil {
return err
}
finish += chunkSize
offset += chunkSize
}
return nil
}