feat(traffic): support limit task worker count & file stream rate (#7948)

* feat: set task workers num & client stream rate limit

* feat: server stream rate limit

* upgrade xhofe/tache

* .
This commit is contained in:
KirCute_ECT
2025-02-16 12:22:11 +08:00
committed by GitHub
parent 399336b33c
commit 3b71500f23
79 changed files with 803 additions and 327 deletions

View File

@ -6,7 +6,6 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
"net/http"
"net/url"
@ -249,10 +248,10 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
input := &s3manager.UploadInput{
Bucket: &resp.Data.Bucket,
Key: &resp.Data.Key,
Body: &stream.ReaderUpdatingProgress{
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
},
}),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {

View File

@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
limited := driver.NewLimitedUploadStream(ctx, file)
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if j == chunkCount {
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl)
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}