* wip: refactor offline download (#5331) * base tool * working: aria2 * refactor: change type of percentage to float64 * wip: adapt aria2 * wip: use items in offline_download * wip: use tool manager * wip: adapt qBittorrent * chore: fix typo * Squashed commit of the following: commit 4fc0a77565702f9bf498485d42336502f2ee9776 Author: Andy Hsu <i@nn.ci> Date: Fri Oct 20 21:06:25 2023 +0800 fix(baidu_netdisk): upload file > 4GB (close #5392) commit aaffaee2b54fc067d240ea0c20ea3c2f39615d6e Author: gmugu <94156510@qq.com> Date: Thu Oct 19 19:17:53 2023 +0800 perf(webdav): support request with cookies (#5391) commit 8ef8023c20bfeee97ec82155b52eae0d80b1410e Author: NewbieOrange <NewbieOrange@users.noreply.github.com> Date: Thu Oct 19 19:17:09 2023 +0800 fix(aliyundrive_open): upload progress for normal upload (#5398) commit cdfbe6dcf2b361e4c93c2703c2f8c9bddeac0ee6 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Wed Oct 18 16:27:07 2023 +0800 fix: hash gcid empty file (#5394) commit 94d028743abf8e0d736f80c0ec4fb294a1cc064c Author: Andy Hsu <i@nn.ci> Date: Sat Oct 14 13:17:51 2023 +0800 ci: remove `pr-welcome` label when close issue [skip ci] commit 7f7335435c2f32a3eef76fac4c4f783d9d8624fd Author: itsHenry <2671230065@qq.com> Date: Sat Oct 14 13:12:46 2023 +0800 feat(cloudreve): support thumbnail (#5373 close #5348) * feat(cloudreve): support thumbnail * chore: remove unnecessary code commit b9e192b29cffddf14a0dfb2d3885def57a56ce16 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Thu Oct 12 20:57:12 2023 +0800 fix(115): limit request rate (#5367 close #5275) * fix(115):limit request rate * chore(115): fix unit of `limit_rate` --------- Co-authored-by: Andy Hsu <i@nn.ci> commit 69a98eaef612b58596e5c26c341b6d7cedecdf19 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Wed Oct 11 22:01:55 2023 +0800 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 1ebc96a4e5220c979fd581bb3b5640e9436f6665 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:32:00 2023 +0800 fix(wopan): fatal error concurrent map writes (close #5352) commit 66e2324cac75cb3ef05af45dbdd10b124d534aff Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:23:11 2023 +0800 chore(deps): upgrade dependencies commit 7600dc28df137c439e538b4257731c33a63db9b5 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:13:58 2023 +0800 fix(aliyundrive_open): change default api to raw server (close #5358) commit 8ef89ad0a496d5acc398794c0afa4f77c67ad371 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Tue Oct 10 18:08:27 2023 +0800 fix(baidu_netdisk): hash and `error 2` (#5356) * fix(baidu):hash and error:2 * fix:invalid memory address commit 35d672217dde69e65b41b1fcd9786c1cfebcdc45 Author: jeffmingup <1960588251@qq.com> Date: Sun Oct 8 19:29:45 2023 +0800 fix(onedrive_app): incorrect api on `_accessToken` (#5346) commit 1a283bb2720eff6d1b0c1dd6f1667a6449905a9b Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri Oct 6 16:04:39 2023 +0800 feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334) commit a008f54f4d5eda5738abfd54bf1abf1e18c08430 Author: nkh0472 <67589323+nkh0472@users.noreply.github.com> Date: Thu Oct 5 13:10:51 2023 +0800 docs: minor language improvements (#5329) [skip ci] * fix: adapt update progress type * Squashed commit of the following: commit 65c5ec0c34d5f027a65933fe89af53791747bdd4 Author: itsHenry <2671230065@qq.com> Date: Sat Nov 4 13:35:09 2023 +0800 feat(cloudreve): folder size count and switch (#5457 close #5395) commit a6325967d0de18e6b6c744f06cb1ebaa08ec687e Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:11:20 2023 +0800 fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 4dff49470adce36416d8c56594e84868c04d023b Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:10:36 2023 +0800 fix(deps): update golang.org/x/exp digest to 7918f67 (#5366) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit cc86d6f3d1ff2120669c9dda719b7faabb922f52 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Sun Oct 29 14:45:55 2023 +0800 fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit c0f9c8ebafdf8dd2afe5c0b9fba24456819c3155 Author: Andy Hsu <i@nn.ci> Date: Thu Oct 26 19:21:09 2023 +0800 feat: add ignore direct link params (close #5434)
271 lines
7.7 KiB
Go
271 lines
7.7 KiB
Go
package aliyundrive_open
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"encoding/base64"
|
|
"fmt"
|
|
"io"
|
|
"math"
|
|
"net/http"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/alist-org/alist/v3/drivers/base"
|
|
"github.com/alist-org/alist/v3/internal/driver"
|
|
"github.com/alist-org/alist/v3/internal/model"
|
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
"github.com/alist-org/alist/v3/pkg/utils"
|
|
"github.com/avast/retry-go"
|
|
"github.com/go-resty/resty/v2"
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func makePartInfos(size int) []base.Json {
|
|
partInfoList := make([]base.Json, size)
|
|
for i := 0; i < size; i++ {
|
|
partInfoList[i] = base.Json{"part_number": 1 + i}
|
|
}
|
|
return partInfoList
|
|
}
|
|
|
|
func calPartSize(fileSize int64) int64 {
|
|
var partSize int64 = 20 * utils.MB
|
|
if fileSize > partSize {
|
|
if fileSize > 1*utils.TB { // file Size over 1TB
|
|
partSize = 5 * utils.GB // file part size 5GB
|
|
} else if fileSize > 768*utils.GB { // over 768GB
|
|
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
|
} else if fileSize > 512*utils.GB { // over 512GB
|
|
partSize = 82463373 // ≈ 78.6432MB
|
|
} else if fileSize > 384*utils.GB { // over 384GB
|
|
partSize = 54975582 // ≈ 52.4288MB
|
|
} else if fileSize > 256*utils.GB { // over 256GB
|
|
partSize = 41231687 // ≈ 39.3216MB
|
|
} else if fileSize > 128*utils.GB { // over 128GB
|
|
partSize = 27487791 // ≈ 26.2144MB
|
|
}
|
|
}
|
|
return partSize
|
|
}
|
|
|
|
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
|
partInfoList := makePartInfos(count)
|
|
var resp CreateResp
|
|
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
|
req.SetBody(base.Json{
|
|
"drive_id": d.DriveId,
|
|
"file_id": fileId,
|
|
"part_info_list": partInfoList,
|
|
"upload_id": uploadId,
|
|
}).SetResult(&resp)
|
|
})
|
|
return resp.PartInfoList, err
|
|
}
|
|
|
|
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
|
uploadUrl := partInfo.UploadUrl
|
|
if d.InternalUpload {
|
|
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
|
}
|
|
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
res, err := base.HttpClient.Do(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
res.Body.Close()
|
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
|
// 3. complete
|
|
var newFile File
|
|
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
|
req.SetBody(base.Json{
|
|
"drive_id": d.DriveId,
|
|
"file_id": fileId,
|
|
"upload_id": uploadId,
|
|
}).SetResult(&newFile)
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return fileToObj(newFile), nil
|
|
}
|
|
|
|
type ProofRange struct {
|
|
Start int64
|
|
End int64
|
|
}
|
|
|
|
func getProofRange(input string, size int64) (*ProofRange, error) {
|
|
if size == 0 {
|
|
return &ProofRange{}, nil
|
|
}
|
|
tmpStr := utils.GetMD5EncodeStr(input)[0:16]
|
|
tmpInt, err := strconv.ParseUint(tmpStr, 16, 64)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
index := tmpInt % uint64(size)
|
|
pr := &ProofRange{
|
|
Start: int64(index),
|
|
End: int64(index) + 8,
|
|
}
|
|
if pr.End >= size {
|
|
pr.End = size
|
|
}
|
|
return pr, nil
|
|
}
|
|
|
|
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
|
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
length := proofRange.End - proofRange.Start
|
|
buf := bytes.NewBuffer(make([]byte, 0, length))
|
|
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
_, err = io.CopyN(buf, reader, length)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
|
}
|
|
|
|
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
|
// 1. create
|
|
// Part Size Unit: Bytes, Default: 20MB,
|
|
// Maximum number of slices 10,000, ≈195.3125GB
|
|
var partSize = calPartSize(stream.GetSize())
|
|
const dateFormat = "2006-01-02T15:04:05.000Z"
|
|
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
|
|
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
|
|
|
createData := base.Json{
|
|
"drive_id": d.DriveId,
|
|
"parent_file_id": dstDir.GetID(),
|
|
"name": stream.GetName(),
|
|
"type": "file",
|
|
"check_name_mode": "ignore",
|
|
"local_modified_at": mtimeStr,
|
|
"local_created_at": ctimeStr,
|
|
}
|
|
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
|
createData["part_info_list"] = makePartInfos(count)
|
|
// rapid upload
|
|
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
|
if rapidUpload {
|
|
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
|
// read 1024 bytes to calculate pre hash
|
|
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
hash, err := utils.HashReader(utils.SHA1, reader)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
createData["size"] = stream.GetSize()
|
|
createData["pre_hash"] = hash
|
|
}
|
|
var createResp CreateResp
|
|
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
|
req.SetBody(createData).SetResult(&createResp)
|
|
})
|
|
var tmpF model.File
|
|
if err != nil {
|
|
if e.Code != "PreHashMatched" || !rapidUpload {
|
|
return nil, err
|
|
}
|
|
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
|
|
|
hi := stream.GetHash()
|
|
hash := hi.GetHash(utils.SHA1)
|
|
if len(hash) <= 0 {
|
|
tmpF, err = stream.CacheFullInTempFile()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
}
|
|
|
|
delete(createData, "pre_hash")
|
|
createData["proof_version"] = "v1"
|
|
createData["content_hash_name"] = "sha1"
|
|
createData["content_hash"] = hash
|
|
createData["proof_code"], err = d.calProofCode(stream)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
|
}
|
|
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
|
req.SetBody(createData).SetResult(&createResp)
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
|
|
if !createResp.RapidUpload {
|
|
// 2. normal upload
|
|
log.Debugf("[aliyundive_open] normal upload")
|
|
|
|
preTime := time.Now()
|
|
var offset, length int64 = 0, partSize
|
|
//var length
|
|
for i := 0; i < len(createResp.PartInfoList); i++ {
|
|
if utils.IsCanceled(ctx) {
|
|
return nil, ctx.Err()
|
|
}
|
|
// refresh upload url if 50 minutes passed
|
|
if time.Since(preTime) > 50*time.Minute {
|
|
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
preTime = time.Now()
|
|
}
|
|
if remain := stream.GetSize() - offset; length > remain {
|
|
length = remain
|
|
}
|
|
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
|
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
err = retry.Do(func() error {
|
|
//rd.Reset()
|
|
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
|
},
|
|
retry.Attempts(3),
|
|
retry.DelayType(retry.BackOffDelay),
|
|
retry.Delay(time.Second))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
offset += partSize
|
|
up(float64(i*100) / float64(count))
|
|
}
|
|
} else {
|
|
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
|
}
|
|
|
|
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
|
// 3. complete
|
|
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
|
}
|