feat(traffic): support limit task worker count & file stream rate (#7948)
* feat: set task workers num & client stream rate limit * feat: server stream rate limit * upgrade xhofe/tache * .
This commit is contained in:
@ -8,8 +8,6 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -20,6 +18,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -144,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytes, err := crypto.Decode(string(result.EncodedData), key)
|
||||
b, err := crypto.Decode(string(result.EncodedData), key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -152,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
downloadInfo := struct {
|
||||
Url string `json:"url"`
|
||||
}{}
|
||||
if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
|
||||
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -290,13 +289,10 @@ func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSPar
|
||||
}
|
||||
|
||||
var bodyBytes []byte
|
||||
r := &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
}
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err = bucket.PutObject(params.Object, r, append(
|
||||
driver115.OssOption(params, ossToken),
|
||||
oss.CallbackResult(&bodyBytes),
|
||||
@ -405,16 +401,12 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
||||
Reader: bytes.NewBuffer(buf),
|
||||
Ctx: ctx,
|
||||
}, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
|
||||
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -249,10 +248,10 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &resp.Data.Bucket,
|
||||
Key: &resp.Data.Key,
|
||||
Body: &stream.ReaderUpdatingProgress{
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
|
@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
limited := driver.NewLimitedUploadStream(ctx, file)
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if j == chunkCount {
|
||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl)
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -631,12 +631,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// Progress
|
||||
p := driver.NewProgress(stream.GetSize(), up)
|
||||
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
// 上传所有分片
|
||||
for _, uploadPartInfo := range uploadPartInfos {
|
||||
index := uploadPartInfo.PartNumber - 1
|
||||
partSize := partInfos[index].PartSize
|
||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||
limitReader := io.LimitReader(stream, partSize)
|
||||
limitReader := io.LimitReader(rateLimited, partSize)
|
||||
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
@ -787,6 +788,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
if part == 0 {
|
||||
part = 1
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for i := int64(0); i < part; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -798,7 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
byteSize = partSize
|
||||
}
|
||||
|
||||
limitReader := io.LimitReader(stream, byteSize)
|
||||
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||
|
@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
log.Debugf("uploadData: %+v", uploadData)
|
||||
requestURL := uploadData.RequestURL
|
||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
req.Header.Set(v[0:i], v[i+1:])
|
||||
}
|
||||
r, err := base.HttpClient.Do(req)
|
||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||
_ = r.Body.Close()
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||
|
@ -19,6 +19,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
|
||||
var erron RespErr
|
||||
jsoniter.Unmarshal(body, &erron)
|
||||
xml.Unmarshal(body, &erron)
|
||||
_ = jsoniter.Unmarshal(body, &erron)
|
||||
_ = xml.Unmarshal(body, &erron)
|
||||
if erron.HasError() {
|
||||
return nil, &erron
|
||||
}
|
||||
@ -508,6 +510,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
@ -517,7 +520,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
break
|
||||
}
|
||||
byteData := make([]byte, sliceSize)
|
||||
if i == count {
|
||||
byteData = byteData[:lastPartSize]
|
||||
@ -526,6 +531,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
// 读取块
|
||||
silceMd5.Reset()
|
||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||
sem.Release(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -535,6 +541,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
defer sem.Release(1)
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -542,7 +549,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -794,6 +802,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||
|
||||
// 创建上传会话
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
||||
@ -820,7 +829,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||
}
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -3,7 +3,6 @@ package alist_v3
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
@ -183,10 +182,11 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", &stream.ReaderUpdatingProgress{
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -14,13 +14,12 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
utils.CopyWithBufferN(buf, file, 1024)
|
||||
_, err := utils.CopyWithBufferN(buf, file, 1024)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, file)
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if d.InternalUpload {
|
||||
url = partInfo.InternalUploadUrl
|
||||
}
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
if count > 0 {
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
@ -251,8 +251,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
rd = utils.NewMultiReadable(srd)
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -263,16 +265,21 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
defer sem.Release(1)
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
@ -281,7 +288,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -13,6 +13,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -314,10 +316,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||
if partseq+1 == count {
|
||||
@ -325,6 +331,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
defer sem.Release(1)
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
@ -335,7 +342,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
r.SetFileReader("file", stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -26,7 +27,7 @@ func InitClient() {
|
||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||
|
||||
RestyClient = NewRestyClient()
|
||||
HttpClient = NewHttpClient()
|
||||
HttpClient = net.NewHttpClient()
|
||||
}
|
||||
|
||||
func NewRestyClient() *resty.Client {
|
||||
@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client {
|
||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
return client
|
||||
}
|
||||
|
||||
func NewHttpClient() *http.Client {
|
||||
return &http.Client{
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@ -249,13 +248,13 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: body,
|
||||
Size: int64(body.Len()),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1,7 +1,9 @@
|
||||
package cloudreve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
@ -173,7 +175,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
var n int
|
||||
buf = make([]byte, chunkSize)
|
||||
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
@ -186,7 +188,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||
req.SetHeader("Content-Type", "application/octet-stream")
|
||||
req.SetHeader("Content-Length", strconv.Itoa(n))
|
||||
req.SetBody(buf)
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
break
|
||||
|
@ -100,7 +100,7 @@ func (d *Cloudreve) login() error {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != nil && err.Error() != "CAPTCHA not match." {
|
||||
if err.Error() != "CAPTCHA not match." {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -202,7 +202,8 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -214,7 +215,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
@ -241,7 +242,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -256,10 +257,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
|
@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
|
||||
url := d.contentBase + "/2/files/upload_session/append_v2"
|
||||
reader := io.LimitReader(stream, PartSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
|
||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
||||
@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
|
||||
if count > 0 {
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
}
|
||||
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
offset += byteSize
|
||||
|
||||
}
|
||||
// 3.finish
|
||||
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
||||
|
@ -2,7 +2,6 @@ package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
stdpath "path"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -120,13 +119,10 @@ func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, u
|
||||
return err
|
||||
}
|
||||
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
||||
return d.conn.Stor(encode(path, d.Encoding), &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
})
|
||||
return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*FTP)(nil)
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -676,13 +675,13 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up
|
||||
afterContentReader := strings.NewReader(afterContent)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
||||
&stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||
Size: length,
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
}))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -698,6 +697,7 @@ func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.Up
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
@ -11,10 +11,10 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error {
|
||||
}
|
||||
d.AccessToken = resp.AccessToken
|
||||
return nil
|
||||
}
|
||||
if gdsaFileErr != nil && os.IsExist(gdsaFileErr) {
|
||||
} else if os.IsExist(gdsaFileErr) {
|
||||
return gdsaFileErr
|
||||
}
|
||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
||||
@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model
|
||||
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(result.Bucket),
|
||||
Key: aws.String(result.Key),
|
||||
Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)),
|
||||
Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)),
|
||||
})
|
||||
return nil, err
|
||||
|
||||
|
@ -309,13 +309,13 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
upToken := utils.Json.Get(res, "upToken").ToString()
|
||||
now := time.Now()
|
||||
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
||||
reader := &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: tempFile,
|
||||
Size: s.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
})
|
||||
var token string
|
||||
if s.GetSize() <= DefaultPartSize {
|
||||
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
||||
|
@ -3,7 +3,6 @@ package ipfs
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
@ -111,13 +110,10 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||
|
||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// TODO upload file, optional
|
||||
_, err := d.sh.Add(&stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
}, ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
||||
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,6 @@ package kodbox
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -13,6 +10,8 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type KodBox struct {
|
||||
@ -229,10 +228,10 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var resp *CommonResp
|
||||
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
||||
r := &stream.ReaderUpdatingProgress{
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
})
|
||||
req.SetFileReader("file", s.GetName(), r).
|
||||
SetResult(&resp).
|
||||
SetFormData(map[string]string{
|
||||
|
@ -2,7 +2,6 @@ package lanzou
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -213,6 +212,10 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
var resp RespText[[]FileOrFolder]
|
||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req.SetFormData(map[string]string{
|
||||
"task": "1",
|
||||
"vie": "2",
|
||||
@ -220,10 +223,7 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||
"id": "WU_FILE_0",
|
||||
"name": s.GetName(),
|
||||
"folder_id_bb_n": dstDir.GetID(),
|
||||
}).SetFileReader("upload_file", s.GetName(), &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}).SetContext(ctx)
|
||||
}).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx)
|
||||
}, &resp, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
uploadLimit.Wait(ctx)
|
||||
err := uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
length = stream.GetSize() - int64(i*blockSize)
|
||||
}
|
||||
|
||||
reader := io.LimitReader(stream, length)
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length))
|
||||
|
||||
req := larkdrive.NewUploadPartFileReqBuilder().
|
||||
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
|
||||
@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
uploadLimit.Wait(ctx)
|
||||
err = uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPart(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -195,13 +194,13 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileS
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &resp.Data.Bucket,
|
||||
Key: &resp.Data.Object,
|
||||
Body: &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: tempFile,
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
},
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
|
@ -156,6 +156,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
|
||||
reader := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for id := 0; id < u.Chunks(); id++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -165,7 +166,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
chunk := make([]byte, chkSize)
|
||||
n, err := io.ReadFull(stream, chunk)
|
||||
n, err := io.ReadFull(reader, chunk)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.put(dstDir, stream, up)
|
||||
return d.put(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
package misskey
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
@ -190,16 +189,16 @@ func (d *Misskey) remove(obj model.Obj) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var file MFile
|
||||
|
||||
fileContent, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req := base.RestyClient.R().
|
||||
SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))).
|
||||
SetContext(ctx).
|
||||
SetFileReader("file", stream.GetName(), reader).
|
||||
SetFormData(map[string]string{
|
||||
"folderId": handleFolderId(dstDir).(string),
|
||||
"name": stream.GetName(),
|
||||
@ -207,7 +206,8 @@ func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.Upd
|
||||
"isSensitive": "false",
|
||||
"force": "false",
|
||||
}).
|
||||
SetResult(&file).SetAuthToken(d.AccessToken)
|
||||
SetResult(&file).
|
||||
SetAuthToken(d.AccessToken)
|
||||
|
||||
resp, err := req.Post(d.Endpoint + "/api/drive/files/create")
|
||||
if err != nil {
|
||||
|
@ -10,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -301,6 +303,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
|
||||
// step.3
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||
@ -312,6 +315,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
break
|
||||
}
|
||||
i, part, byteSize := i, part, initUpdload.PartSize
|
||||
if part.PartNumber == uploadPartData.PartTotal {
|
||||
byteSize = initUpdload.LastPartSize
|
||||
@ -319,7 +325,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
|
||||
// step.4
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||
defer sem.Release(1)
|
||||
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
|
||||
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -328,7 +336,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
|
@ -116,16 +116,3 @@ func (ch *Characteristic) merge(data map[string]string) map[string]interface{} {
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
type InlineReadCloser struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func (rc *InlineReadCloser) Read(p []byte) (int, error) {
|
||||
return rc.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (rc *InlineReadCloser) Close() error {
|
||||
return rc.Closer.Close()
|
||||
}
|
||||
|
@ -2,8 +2,6 @@ package netease_music
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
@ -12,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -69,13 +68,10 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error
|
||||
opt.up = func(_ float64) {}
|
||||
}
|
||||
req.SetContentLength(true)
|
||||
req.SetBody(&InlineReadCloser{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: opt.stream,
|
||||
UpdateProgress: opt.up,
|
||||
},
|
||||
Closer: opt.stream,
|
||||
})
|
||||
req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: opt.stream,
|
||||
UpdateProgress: opt.up,
|
||||
}))
|
||||
} else {
|
||||
req.SetFormData(data)
|
||||
}
|
||||
|
@ -152,12 +152,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
// 1. upload new file
|
||||
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
|
||||
url := d.GetMetaUrl(false, filepath) + "/content"
|
||||
data, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
|
||||
@ -225,7 +221,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -140,12 +140,8 @@ func (d *OnedriveAPP) GetFile(path string) (*File, error) {
|
||||
|
||||
func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
|
||||
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
|
||||
data, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
@ -175,7 +171,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
@ -430,13 +429,10 @@ func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.File
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
}, OssOption(params)...)
|
||||
err = bucket.PutObject(params.Key, driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}), OssOption(params)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -522,11 +518,8 @@ func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSi
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
||||
Reader: b,
|
||||
Ctx: ctx,
|
||||
}, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||
b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf))
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package quark
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
@ -178,7 +179,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
// part up
|
||||
partSize := pre.Metadata.PartSize
|
||||
var bytes []byte
|
||||
var part []byte
|
||||
md5s := make([]string, 0)
|
||||
defaultBytes := make([]byte, partSize)
|
||||
total := stream.GetSize()
|
||||
@ -189,17 +190,18 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
return ctx.Err()
|
||||
}
|
||||
if left > int64(partSize) {
|
||||
bytes = defaultBytes
|
||||
part = defaultBytes
|
||||
} else {
|
||||
bytes = make([]byte, left)
|
||||
part = make([]byte, left)
|
||||
}
|
||||
_, err := io.ReadFull(tempFile, bytes)
|
||||
_, err := io.ReadFull(tempFile, part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
left -= int64(len(bytes))
|
||||
left -= int64(len(part))
|
||||
log.Debugf("left: %d", left)
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes)
|
||||
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
|
||||
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
|
||||
return resp.Data.Finish, err
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) {
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
|
||||
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
||||
data := base.Json{
|
||||
@ -163,6 +164,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||
"partNumber": strconv.Itoa(partNumber),
|
||||
"uploadId": pre.Data.UploadId,
|
||||
}).SetBody(bytes).Put(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||
}
|
||||
@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||
SetQueryParams(map[string]string{
|
||||
"uploadId": pre.Data.UploadId,
|
||||
}).SetBody(body).Post(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||
}
|
||||
|
@ -12,7 +12,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
istream "github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@ -387,8 +386,8 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
buf := make([]byte, 1024*1024*2)
|
||||
fup := &istream.ReaderUpdatingProgress{
|
||||
Reader: &istream.SimpleReaderWithSize{
|
||||
fup := &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: f,
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
@ -402,12 +401,19 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
reader := bytes.NewReader(buf[:n])
|
||||
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||
UploadId: &uploadInitResp.Data.UploadID,
|
||||
Key: &uploadInitResp.Data.Key,
|
||||
Bucket: &uploadInitResp.Data.Bucket,
|
||||
PartNumber: aws.Int64(partNumber),
|
||||
Body: bytes.NewReader(buf[:n]),
|
||||
Body: struct {
|
||||
*driver.RateLimitReader
|
||||
io.Seeker
|
||||
}{
|
||||
RateLimitReader: driver.NewLimitedUploadStream(ctx, reader),
|
||||
Seeker: reader,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -4,18 +4,17 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
@ -174,10 +173,10 @@ func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
Body: &stream.ReaderUpdatingProgress{
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
}),
|
||||
ContentType: &contentType,
|
||||
}
|
||||
_, err := uploader.UploadWithContext(ctx, input)
|
||||
|
@ -3,7 +3,6 @@ package seafile
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@ -215,10 +214,10 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
||||
u := string(res)
|
||||
u = u[1 : len(u)-1] // remove quotes
|
||||
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
||||
r := &stream.ReaderUpdatingProgress{
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
})
|
||||
req.SetFileReader("file", s.GetName(), r).
|
||||
SetFormData(map[string]string{
|
||||
"parent_dir": path,
|
||||
|
@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
defer func() {
|
||||
_ = dstFile.Close()
|
||||
}()
|
||||
err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up)
|
||||
err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
_ = d.fs.Remove(fullPath)
|
||||
}
|
||||
}()
|
||||
err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up)
|
||||
err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
var newFile *FileUpload
|
||||
if stream.GetSize() <= 20971520 {
|
||||
// post upload
|
||||
newFile, err = d.upload(ctx, stream, token)
|
||||
newFile, err = d.upload(ctx, stream, token, up)
|
||||
} else {
|
||||
// chunk upload
|
||||
//err = base.ErrNotImplement
|
||||
|
@ -1,6 +1,7 @@
|
||||
package teambition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) {
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) {
|
||||
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) {
|
||||
prefix := "tcs"
|
||||
if d.isInternational() {
|
||||
prefix = "us-tcs"
|
||||
}
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
var newFile FileUpload
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token
|
||||
"type": file.GetMimetype(),
|
||||
"size": strconv.FormatInt(file.GetSize(), 10),
|
||||
"lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"),
|
||||
}).SetMultipartField("file", file.GetName(), file.GetMimetype(), file).
|
||||
}).
|
||||
SetMultipartField("file", file.GetName(), file.GetMimetype(), reader).
|
||||
Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
|
||||
"Authorization": token,
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Referer": referer,
|
||||
}).SetBody(chunkData).Post(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}).
|
||||
SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))).
|
||||
Post(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod
|
||||
Key: &uploadToken.Upload.Key,
|
||||
ContentDisposition: &uploadToken.Upload.ContentDisposition,
|
||||
ContentType: &uploadToken.Upload.ContentType,
|
||||
Body: stream,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
|
@ -228,7 +228,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
||||
SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))).
|
||||
SetHeader("Cookie", d.Cookie).
|
||||
Post(u)
|
||||
if err != nil {
|
||||
|
@ -3,7 +3,6 @@ package thunder
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -383,10 +382,10 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.Fi
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: &stream.ReaderUpdatingProgress{
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
}),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)),
|
||||
Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@ -414,10 +413,10 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.F
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: &stream.ReaderUpdatingProgress{
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
}),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -59,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
return nil, err
|
||||
}
|
||||
var jsonData any
|
||||
json.Unmarshal(data, &jsonData)
|
||||
err = json.Unmarshal(data, &jsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -122,10 +121,10 @@ func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStream
|
||||
query.Add("guid", guid)
|
||||
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
||||
endpoint.RawQuery = query.Encode()
|
||||
progressReader := &stream.ReaderUpdatingProgress{
|
||||
progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -3,7 +3,6 @@ package url_tree
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -11,6 +10,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -126,13 +126,10 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error {
|
||||
func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
return d.client.Put(&upyun.PutObjectConfig{
|
||||
Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
|
||||
Reader: &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
},
|
||||
Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
|
||||
Key: ¶ms.Video.StoragePath,
|
||||
Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))),
|
||||
Body: driver.NewLimitedUploadStream(ctx,
|
||||
io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
|
@ -2,7 +2,6 @@ package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@ -99,13 +98,11 @@ func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer
|
||||
r.Header.Set("Content-Type", s.GetMimetype())
|
||||
r.ContentLength = s.GetSize()
|
||||
}
|
||||
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), &stream.ReaderWithCtx{
|
||||
Reader: &stream.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
},
|
||||
Ctx: ctx,
|
||||
}, 0644, callback)
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error {
|
||||
if d.client.LoginType() == 1 {
|
||||
d.cron = cron.NewCron(time.Minute * 5)
|
||||
d.cron.Do(func() {
|
||||
d.client.KeepAlive()
|
||||
_ = d.client.KeepAlive()
|
||||
})
|
||||
}
|
||||
|
||||
@ -364,12 +364,13 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
for {
|
||||
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
||||
len64 := int64(channel.Len)
|
||||
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
||||
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cur := total.Add(int64(channel.Len))
|
||||
cur := total.Add(len64)
|
||||
up(float64(cur) * 100.0 / float64(stream.GetSize()))
|
||||
// 上传完成
|
||||
if upData.UploadState != 1 {
|
||||
|
@ -155,7 +155,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
_, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{
|
||||
Name: stream.GetName(),
|
||||
Size: stream.GetSize(),
|
||||
Content: stream,
|
||||
Content: driver.NewLimitedUploadStream(ctx, stream),
|
||||
ContentType: stream.GetMimetype(),
|
||||
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
|
||||
OnProgress: func(current, total int64) {
|
||||
|
@ -2,7 +2,6 @@ package yandex_disk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -118,10 +117,11 @@ func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, &stream.ReaderUpdatingProgress{
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user