feat(traffic): support limit task worker count & file stream rate (#7948)
* feat: set task workers num & client stream rate limit * feat: server stream rate limit * upgrade xhofe/tache * .
This commit is contained in:
@ -77,6 +77,29 @@ type Remove interface {
|
||||
}
|
||||
|
||||
type Put interface {
|
||||
// Put a file (provided as a FileStreamer) into the driver
|
||||
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||
// 1. Canceling (when `<-ctx.Done()` returns), by the following methods:
|
||||
// (1) Use request methods that carry context, such as the following:
|
||||
// a. http.NewRequestWithContext
|
||||
// b. resty.Request.SetContext
|
||||
// c. s3manager.Uploader.UploadWithContext
|
||||
// d. utils.CopyWithCtx
|
||||
// (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream`
|
||||
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||
// this is typically applicable to chunked uploads.
|
||||
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||
// (1) Use `utils.CopyWithCtx`
|
||||
// (2) Use `driver.ReaderUpdatingProgress`
|
||||
// (3) Use `driver.Progress` with `io.TeeReader`
|
||||
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
||||
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
||||
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
||||
// if your file chunks are sufficiently small (less than about 50KB).
|
||||
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
||||
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
|
||||
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
|
||||
// memory usage caused by buffering too many file chunks awaiting upload.
|
||||
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
|
||||
}
|
||||
|
||||
@ -113,6 +136,29 @@ type CopyResult interface {
|
||||
}
|
||||
|
||||
type PutResult interface {
|
||||
// Put a file (provided as a FileStreamer) into the driver and return the put obj
|
||||
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
||||
// (1) Use request methods that carry context, such as the following:
|
||||
// a. http.NewRequestWithContext
|
||||
// b. resty.Request.SetContext
|
||||
// c. s3manager.Uploader.UploadWithContext
|
||||
// d. utils.CopyWithCtx
|
||||
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
||||
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||
// this is typically applicable to chunked uploads.
|
||||
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||
// (1) Use `utils.CopyWithCtx`
|
||||
// (2) Use `driver.ReaderUpdatingProgress`
|
||||
// (3) Use `driver.Progress` with `io.TeeReader`
|
||||
// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
|
||||
// in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
|
||||
// before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
|
||||
// if your file chunks are sufficiently small (less than about 50KB).
|
||||
// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
|
||||
// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
|
||||
// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
|
||||
// memory usage caused by buffering too many file chunks awaiting upload.
|
||||
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
||||
}
|
||||
|
||||
@ -159,28 +205,6 @@ type ArchiveDecompressResult interface {
|
||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type UpdateProgress = model.UpdateProgress
|
||||
|
||||
type Progress struct {
|
||||
Total int64
|
||||
Done int64
|
||||
up UpdateProgress
|
||||
}
|
||||
|
||||
func (p *Progress) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
p.Done += int64(n)
|
||||
p.up(float64(p.Done) / float64(p.Total) * 100)
|
||||
return
|
||||
}
|
||||
|
||||
func NewProgress(total int64, up UpdateProgress) *Progress {
|
||||
return &Progress{
|
||||
Total: total,
|
||||
up: up,
|
||||
}
|
||||
}
|
||||
|
||||
type Reference interface {
|
||||
InitReference(storage Driver) error
|
||||
}
|
||||
|
62
internal/driver/utils.go
Normal file
62
internal/driver/utils.go
Normal file
@ -0,0 +1,62 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
)
|
||||
|
||||
type UpdateProgress = model.UpdateProgress
|
||||
|
||||
type Progress struct {
|
||||
Total int64
|
||||
Done int64
|
||||
up UpdateProgress
|
||||
}
|
||||
|
||||
func (p *Progress) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
p.Done += int64(n)
|
||||
p.up(float64(p.Done) / float64(p.Total) * 100)
|
||||
return
|
||||
}
|
||||
|
||||
func NewProgress(total int64, up UpdateProgress) *Progress {
|
||||
return &Progress{
|
||||
Total: total,
|
||||
up: up,
|
||||
}
|
||||
}
|
||||
|
||||
type RateLimitReader = stream.RateLimitReader
|
||||
|
||||
type RateLimitWriter = stream.RateLimitWriter
|
||||
|
||||
type RateLimitFile = stream.RateLimitFile
|
||||
|
||||
func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader {
|
||||
return &RateLimitReader{
|
||||
Reader: r,
|
||||
Limiter: stream.ServerUploadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile {
|
||||
return &RateLimitFile{
|
||||
File: f,
|
||||
Limiter: stream.ServerUploadLimit,
|
||||
Ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func ServerUploadLimitWaitN(ctx context.Context, n int) error {
|
||||
return stream.ServerUploadLimit.WaitN(ctx, n)
|
||||
}
|
||||
|
||||
type ReaderWithCtx = stream.ReaderWithCtx
|
||||
|
||||
type ReaderUpdatingProgress = stream.ReaderUpdatingProgress
|
||||
|
||||
type SimpleReaderWithSize = stream.SimpleReaderWithSize
|
Reference in New Issue
Block a user