feat(alias): add DownloadConcurrency and DownloadPartSize option (#7829)

* fix(net): goroutine logic bug (AlistGo/alist#7215)

* Fix goroutine logic bug

* Fix bug

---------

Co-authored-by: hpy hs <hshpy.pengyu@gmail.com>

* perf(net): sequential and dynamic concurrency

* fix(net): incorrect error return

* feat(alias):  add `DownloadConcurrency` and `DownloadPartSize` option

* feat(net): add `ConcurrencyLimit`

* pref(net): create `chunk` on demand

* refactor

* refactor

* fix(net): `r.Closers.Add` has no effect

* refactor

---------

Co-authored-by: hpy hs <hshpy.pengyu@gmail.com>
This commit is contained in:
j2rong4cn
2025-01-27 20:08:39 +08:00
committed by GitHub
parent bdcf450203
commit 2be0c3d1a0
24 changed files with 396 additions and 238 deletions

View File

@ -27,16 +27,11 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
return nil
} else if link.RangeReadCloser != nil {
attachFileName(w, file)
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead)
defer func() {
_ = link.RangeReadCloser.Close()
}()
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser)
return nil
} else if link.Concurrency != 0 || link.PartSize != 0 {
attachFileName(w, file)
size := file.GetSize()
//var finalClosers model.Closers
finalClosers := utils.EmptyClosers()
header := net.ProcessHeader(r.Header, link.Header)
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
down := net.NewDownloader(func(d *net.Downloader) {
@ -50,16 +45,14 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
HeaderRef: header,
}
rc, err := down.Download(ctx, req)
finalClosers.Add(rc)
return rc, err
}
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), rangeReader)
defer finalClosers.Close()
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), &model.RangeReadCloser{RangeReader: rangeReader})
return nil
} else {
//transparent proxy
header := net.ProcessHeader(r.Header, link.Header)
res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL)
res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL)
if err != nil {
return err
}
@ -72,7 +65,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
if r.Method == http.MethodHead {
return nil
}
_, err = io.Copy(w, res.Body)
_, err = utils.CopyWithBuffer(w, res.Body)
if err != nil {
return err
}

View File

@ -281,10 +281,11 @@ func ArchiveDown(c *gin.Context) {
link, _, err := fs.ArchiveDriverExtract(c, archiveRawPath, model.ArchiveInnerArgs{
ArchiveArgs: model.ArchiveArgs{
LinkArgs: model.LinkArgs{
IP: c.ClientIP(),
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
IP: c.ClientIP(),
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
Redirect: true,
},
Password: password,
},

View File

@ -31,10 +31,11 @@ func Down(c *gin.Context) {
return
} else {
link, _, err := fs.Link(c, rawPath, model.LinkArgs{
IP: c.ClientIP(),
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
IP: c.ClientIP(),
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
Redirect: true,
})
if err != nil {
common.ErrorResp(c, err, 500)

View File

@ -6,13 +6,14 @@ import (
"context"
"encoding/hex"
"fmt"
"github.com/pkg/errors"
"io"
"path"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
@ -173,15 +174,27 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
if link.RangeReadCloser == nil && link.MFile == nil && len(link.URL) == 0 {
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support s3")
}
remoteFileSize := file.GetSize()
remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, start, length int64) (io.ReadCloser, error) {
var rdr io.ReadCloser
length := int64(-1)
start := int64(0)
if rnge != nil {
start, length = rnge.Start, rnge.Length
}
// 参考 server/common/proxy.go
if link.MFile != nil {
_, err := link.MFile.Seek(start, io.SeekStart)
if err != nil {
return nil, err
}
rdr = link.MFile
} else {
remoteFileSize := file.GetSize()
if length >= 0 && start+length >= remoteFileSize {
length = -1
}
rrc := link.RangeReadCloser
if len(link.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: link.URL,
Header: link.Header,
@ -194,35 +207,12 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
if rrc != nil {
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: start, Length: length})
remoteClosers.AddClosers(rrc.GetClosers())
if err != nil {
return nil, err
}
return remoteReader, nil
}
if link.MFile != nil {
_, err := link.MFile.Seek(start, io.SeekStart)
if err != nil {
return nil, err
}
//remoteClosers.Add(remoteLink.MFile)
//keep reuse same MFile and close at last.
remoteClosers.Add(link.MFile)
return io.NopCloser(link.MFile), nil
}
return nil, errs.NotSupport
}
var rdr io.ReadCloser
if rnge != nil {
rdr, err = rangeReaderFunc(ctx, rnge.Start, rnge.Length)
if err != nil {
return nil, err
}
} else {
rdr, err = rangeReaderFunc(ctx, 0, -1)
if err != nil {
return nil, err
rdr = utils.ReadCloser{Reader: remoteReader, Closer: rrc}
} else {
return nil, errs.NotSupport
}
}

View File

@ -263,7 +263,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta
w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store, must-revalidate")
http.Redirect(w, r, u, http.StatusFound)
} else {
link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r})
link, _, err := fs.Link(ctx, reqPath, model.LinkArgs{IP: utils.ClientIP(r), Header: r.Header, HttpReq: r, Redirect: true})
if err != nil {
return http.StatusInternalServerError, err
}