feat: misc improvements about upload/copy/hash (#5045)

general: add createTime/updateTime support in webdav and some drivers
general: add hash support in some drivers
general: cross-storage rapid-upload support
general: enhance upload to avoid local temp file if possible
general: replace readseekcloser with File interface to speed upstream operations
feat(aliyun_open): same as above
feat(crypt): add hack for 139cloud

Close #4934 
Close #4819 

baidu_netdisk needs to improve the upload code to support rapid-upload
This commit is contained in:
Sean
2023-08-27 21:14:23 +08:00
committed by GitHub
parent 9b765ef696
commit a3748af772
77 changed files with 1731 additions and 615 deletions

View File

@ -3,58 +3,35 @@ package common
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"sync"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"io"
"net/http"
"net/url"
)
func HttpClient() *http.Client {
once.Do(func() {
httpClient = base.NewHttpClient()
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
req.Header.Del("Referer")
return nil
}
})
return httpClient
}
var once sync.Once
var httpClient *http.Client
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
if link.ReadSeekCloser != nil {
if link.MFile != nil {
attachFileName(w, file)
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.ReadSeekCloser)
defer link.ReadSeekCloser.Close()
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.MFile)
defer link.MFile.Close()
return nil
} else if link.RangeReadCloser.RangeReader != nil {
} else if link.RangeReadCloser != nil {
attachFileName(w, file)
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeReader)
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeRead)
defer func() {
if link.RangeReadCloser.Closers != nil {
link.RangeReadCloser.Closers.Close()
}
_ = link.RangeReadCloser.Close()
}()
return nil
} else if link.Concurrency != 0 || link.PartSize != 0 {
attachFileName(w, file)
size := file.GetSize()
//var finalClosers model.Closers
finalClosers := utils.NewClosers()
finalClosers := utils.EmptyClosers()
header := net.ProcessHeader(r.Header, link.Header)
rangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
@ -65,7 +42,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
Size: size,
HeaderRef: header,
}
rc, err := down.Download(context.Background(), req)
rc, err := down.Download(ctx, req)
finalClosers.Add(rc)
return rc, err
}
@ -75,7 +52,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
} else {
//transparent proxy
header := net.ProcessHeader(r.Header, link.Header)
res, err := net.RequestHttp(r.Method, header, link.URL)
res, err := net.RequestHttp(context.Background(), r.Method, header, link.URL)
if err != nil {
return err
}