Compare commits

...

36 Commits

Author SHA1 Message Date
503083e24c chore: rename driver 2025-03-17 00:51:33 +08:00
bcd47e2e10 chore(go.mod): update 115-sdk-go dependency to v0.1.1 and adjust callback handling in driver 2025-03-16 17:22:34 +08:00
79bdfd5427 feat(115_open): implement directory management and file operations 2025-03-16 16:29:10 +08:00
3ddca2b40f Merge branch 'main' into feat/115open 2025-03-15 23:31:24 +08:00
c82e632ee1 fix: potential XSS vulnerabilities (#7923)
* fix: potential XSS vulnerabilities

* feat: support filter and render for readme.md

* chore: set ReadMeAutoRender to true

* fix attachFileName undefined

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2025-03-15 23:28:40 +08:00
d04a75caa2 chore(go.mod): update 115-sdk-go dependency version 2025-03-15 23:27:48 +08:00
ecaacf7c6f wip: 115 open 2025-03-15 23:03:32 +08:00
04f5525f20 fix(s3): incorrectly added slash before the Bucket name (#8083 close #8001) 2025-03-15 00:21:24 +08:00
28b61a93fd feat(webdav): support oc:checksums (#8064 close #7472)
Ref: #7472
2025-03-15 00:21:07 +08:00
0126af4de0 fix(crypt): premature close of MFile (#8132 close #8119)
* fix(crypt): premature close of MFile

* refactor
2025-03-15 00:13:30 +08:00
7579d44517 fix(onedrive): set req.ContentLength (#8081)
* fix(onedrive): set req.ContentLength

* fix(onedrive_app): set req.ContentLength

* fix(cloudreve): set req.ContentLength
2025-03-15 00:12:37 +08:00
5dfea714d8 fix(cloudreve): use milliseconds timestamp in last_modified (#8133) 2025-03-15 00:12:15 +08:00
370a6c15a9 fix(baidu_netdisk): remove duplicate retry (#7972) 2025-03-01 19:00:36 +08:00
2570707a06 feat(baidu_netdisk): support dynamical slice size for low bandwith upload case (#7965)
* 动态分片尺寸

* 补充严格测试结果
2025-03-01 18:46:05 +08:00
4145734c18 refactor(net): pass request header (#8031 close #8008)
* refactor(net): pass request header

* feat(proxy): add `Etag` to response header

* refactor
2025-03-01 18:35:34 +08:00
646c7bcd21 fix(archive): use another sign for extraction (#7982) 2025-03-01 18:34:33 +08:00
cdc41595bc feat(github): support GPG verification (#7996 close #7986)
* feat(github): support GPG verification

* chore
2025-02-24 23:12:23 +08:00
79bef0be9e chore: fix build failed (#8005) 2025-02-16 15:11:48 +08:00
c230f24ebe fix(archive): decode filename when decompressing zips (#7998 close #7988) 2025-02-16 12:25:01 +08:00
30d8c20756 feat(archive): support deprioritize previewing (#7984) 2025-02-16 12:24:10 +08:00
3b71500f23 feat(traffic): support limit task worker count & file stream rate (#7948)
* feat: set task workers num & client stream rate limit

* feat: server stream rate limit

* upgrade xhofe/tache

* .
2025-02-16 12:22:11 +08:00
399336b33c fix(189pc): transfer rename (#7958)
* fix(189pc): transfer rename

* fix: OverwriteUpload

* fix: change search method

* fix

* fix
2025-02-16 12:21:34 +08:00
36b4204623 feat(github): support github proxy (#7979 close #7963) 2025-02-16 12:21:03 +08:00
f25be154c6 fix(ilanzou): add header X-Forwarded-For to solve IP ban (#7977)
* fix: warning

* feat: ip header

* fix: ip header for fs link
2025-02-16 12:20:28 +08:00
ec3fc945a3 fix(feiji): modify the request header (#7902 close #7890) 2025-02-09 18:35:39 +08:00
3f9bed3d5f feat(bootstrap): add .url to proxy types (#7928) 2025-02-09 18:33:38 +08:00
b9ad18bd0a feat(recursive-move): Advanced conflict policy for preventing unintentional overwriting (#7906) 2025-02-09 18:32:57 +08:00
0219c4e15a fix(index): fix the issue where ignored paths are not updated (#7907) 2025-02-09 18:31:43 +08:00
d983a4ebcb refactor(cmd): use std runtime package to get go version info (#7964)
* refactor(cmd): use std `runtime` package to get go version info

- Remove the `GoVersion` variable.
- Remove overriding `GoVersion` by ldflags in `build.sh`.
- Get go version, OS and arch from the constants in the std `runtime` package instead of compile time.

* chore(ci): remove `GoVersion` flag from workflows

Remove GoVersion flag from beta_release.yml and build.yml workflows.

> Reduce compile-time dependencies.
2025-02-09 18:30:56 +08:00
f795807753 feat(github_releases): support dir size for show all version (#7938)
* refactor

* 修改默认 RepoStructure

* feat: 支持使用 gh-proxy
2025-02-09 18:30:38 +08:00
6164e4577b fix: missing args when using alias driver (#7941 close #7932) 2025-02-05 19:22:10 +08:00
39bde328ee fix(lenovonas_share): the size of the directory (#7914) 2025-02-01 17:32:58 +08:00
779c293f04 fix(driver): implement canceling and updating progress for putting for some drivers (#7847)
* fix(driver): additionally implement canceling and updating progress for putting for some drivers

* refactor: add driver archive api into template

* fix(123): use built-in MD5 to avoid caching full

* .

* fix build failed
2025-02-01 17:29:55 +08:00
b9f397d29f fix(139): restore the Account handling, partially reverts #7850 (#7900 close #7784) 2025-01-30 11:25:41 +08:00
d53eecc229 fix(febbox): panic due to slice out of range (#7898 close #7889) 2025-01-30 11:24:07 +08:00
f88fd83d4a feat(ci): use go-cross/cgo-actions for dev build 2025-01-28 18:57:09 +08:00
134 changed files with 2852 additions and 1136 deletions

View File

@ -94,7 +94,6 @@ jobs:
out-dir: build
x-flags: |
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
github.com/alist-org/alist/v3/internal/conf.Version=$tag

View File

@ -15,14 +15,17 @@ jobs:
strategy:
matrix:
platform: [ubuntu-latest]
go-version: [ '1.21' ]
target:
- darwin-amd64
- darwin-arm64
- windows-amd64
- linux-arm64-musl
- linux-amd64-musl
- windows-arm64
- android-arm64
name: Build
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
@ -30,19 +33,29 @@ jobs:
- uses: benjlevesque/short-sha@v3.0
id: short-sha
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
sudo apt install upx
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
- name: Setup web
run: bash build.sh dev web
- name: Build
run: |
bash build.sh dev
uses: go-cross/cgo-actions@v1
with:
targets: ${{ matrix.target }}
musl-target-format: $os-$musl-$arch
out-dir: build
x-flags: |
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
github.com/alist-org/alist/v3/internal/conf.Version=$tag
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: alist_${{ env.SHA }}
path: dist
name: alist_${{ env.SHA }}_${{ matrix.target }}
path: build/*

View File

@ -1,6 +1,5 @@
appName="alist"
builtAt="$(date +'%F %T %z')"
goVersion=$(go version | sed 's/go version //')
gitAuthor="Xhofe <i@nn.ci>"
gitCommit=$(git log --pretty=format:"%h" -1)
@ -22,7 +21,6 @@ echo "frontend version: $webVersion"
ldflags="\
-w -s \
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
-X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \

View File

@ -17,6 +17,7 @@ func Init() {
bootstrap.Log()
bootstrap.InitDB()
data.InitData()
bootstrap.InitStreamLimit()
bootstrap.InitIndex()
bootstrap.InitUpgradePatch()
}

View File

@ -6,6 +6,7 @@ package cmd
import (
"fmt"
"os"
"runtime"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/spf13/cobra"
@ -16,14 +17,15 @@ var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show current version of AList",
Run: func(cmd *cobra.Command, args []string) {
goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Printf(`Built At: %s
Go Version: %s
Author: %s
Commit ID: %s
Version: %s
WebVersion: %s
`,
conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
os.Exit(0)
},
}

View File

@ -215,12 +215,12 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var uploadResult *UploadResult
// 闪传失败,上传
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB改用普通模式上传
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
return nil, err
}
} else {
// 分片上传
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
return nil, err
}
}

View File

@ -2,6 +2,7 @@ package _115
import (
"bytes"
"context"
"crypto/md5"
"crypto/tls"
"encoding/hex"
@ -13,9 +14,11 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
@ -140,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
return nil, err
}
bytes, err := crypto.Decode(string(result.EncodedData), key)
b, err := crypto.Decode(string(result.EncodedData), key)
if err != nil {
return nil, err
}
@ -148,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
downloadInfo := struct {
Url string `json:"url"`
}{}
if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
return nil, err
}
@ -271,7 +274,7 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
}
// UploadByOSS use aliyun sdk to upload
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
ossToken, err := c.client.GetOSSToken()
if err != nil {
return nil, err
@ -286,6 +289,10 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
}
var bodyBytes []byte
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
if err = bucket.PutObject(params.Object, r, append(
driver115.OssOption(params, ossToken),
oss.CallbackResult(&bodyBytes),
@ -301,7 +308,8 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
}
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
@ -313,7 +321,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
err error
)
tmpF, err := stream.CacheFullInTempFile()
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return nil, err
}
@ -372,6 +380,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
quit <- struct{}{}
}()
completedNum := atomic.Int32{}
// consumers
for i := 0; i < options.ThreadsNum; i++ {
go func(threadId int) {
@ -384,24 +393,28 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ {
select {
case <-ctx.Done():
break
case <-ticker.C:
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
errCh <- errors.Wrap(err, "刷新token时出现错误")
}
default:
}
buf := make([]byte, chunk.Size)
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue
}
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}
}
if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", s.GetName(), chunk.Number, err))
} else {
num := completedNum.Add(1)
up(float64(num) * 100.0 / float64(len(chunks)))
}
UploadedPartsCh <- part
}

308
drivers/115_open/driver.go Normal file
View File

@ -0,0 +1,308 @@
package _115_open
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
sdk "github.com/xhofe/115-sdk-go"
)
type Open115 struct {
model.Storage
Addition
client *sdk.Client
}
func (d *Open115) Config() driver.Config {
return config
}
func (d *Open115) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Open115) Init(ctx context.Context) error {
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
sdk.WithAccessToken(d.Addition.AccessToken),
sdk.WithOnRefreshToken(func(s1, s2 string) {
d.Addition.AccessToken = s1
d.Addition.RefreshToken = s2
op.MustSaveDriverStorage(d)
}))
if flags.Debug || flags.Dev {
d.client.SetDebug(true)
}
_, err := d.client.UserInfo(ctx)
if err != nil {
return err
}
return nil
}
func (d *Open115) Drop(ctx context.Context) error {
return nil
}
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var res []model.Obj
pageSize := int64(200)
offset := int64(0)
for {
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
CID: dir.GetID(),
Limit: pageSize,
Offset: offset,
ASC: d.Addition.OrderDirection == "asc",
O: d.Addition.OrderBy,
// Cur: 1,
ShowDir: true,
})
if err != nil {
return nil, err
}
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
obj := Obj(src)
return &obj
})...)
if len(res) >= int(resp.Count) {
break
}
offset += pageSize
}
return res, nil
}
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var ua string
if args.Header != nil {
ua = args.Header.Get("User-Agent")
}
if ua == "" {
ua = base.UserAgent
}
obj, ok := file.(*Obj)
if !ok {
return nil, fmt.Errorf("can't convert obj")
}
pc := obj.Pc
resp, err := d.client.DownURL(ctx, pc, ua)
if err != nil {
return nil, err
}
u, ok := resp[obj.GetID()]
if !ok {
return nil, fmt.Errorf("can't get link")
}
return &model.Link{
URL: u.URL.URL,
Header: http.Header{
"User-Agent": []string{ua},
},
}, nil
}
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
if err != nil {
return nil, err
}
return &Obj{
Fid: resp.FileID,
Pid: parentDir.GetID(),
Fn: dirName,
Fc: "0",
Upt: time.Now().Unix(),
Uet: time.Now().Unix(),
UpPt: time.Now().Unix(),
}, nil
}
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
_, err := d.client.Move(ctx, &sdk.MoveReq{
FileIDs: srcObj.GetID(),
ToCid: dstDir.GetID(),
})
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
FileID: srcObj.GetID(),
FileNma: newName,
})
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
_, err := d.client.Copy(ctx, &sdk.CopyReq{
PID: dstDir.GetID(),
FileID: srcObj.GetID(),
NoDupli: "1",
})
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
_obj, ok := obj.(*Obj)
if !ok {
return fmt.Errorf("can't convert obj")
}
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
FileIDs: _obj.GetID(),
ParentID: _obj.Pid,
})
if err != nil {
return err
}
return nil
}
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
tempF, err := file.CacheFullInTempFile()
if err != nil {
return err
}
// cal full sha1
sha1, err := utils.HashReader(utils.SHA1, tempF)
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
// pre 128k sha1
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
// 1. Init
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
FileName: file.GetName(),
FileSize: file.GetSize(),
Target: dstDir.GetID(),
FileID: strings.ToUpper(sha1),
PreID: strings.ToUpper(sha1128k),
})
if err != nil {
return err
}
if resp.Status == 2 {
return nil
}
// 2. two way verify
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
start, err := strconv.ParseInt(signCheck[0], 10, 64)
if err != nil {
return err
}
end, err := strconv.ParseInt(signCheck[1], 10, 64)
if err != nil {
return err
}
_, err = tempF.Seek(start, io.SeekStart)
if err != nil {
return err
}
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
if err != nil {
return err
}
_, err = tempF.Seek(0, io.SeekStart)
if err != nil {
return err
}
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
FileName: file.GetName(),
FileSize: file.GetSize(),
Target: dstDir.GetID(),
FileID: strings.ToUpper(sha1),
PreID: strings.ToUpper(sha1128k),
SignKey: resp.SignKey,
SignVal: strings.ToUpper(signVal),
})
if err != nil {
return err
}
if resp.Status == 2 {
return nil
}
}
// 3. get upload token
tokenResp, err := d.client.UploadGetToken(ctx)
if err != nil {
return err
}
// 4. upload
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(resp.Bucket)
if err != nil {
return err
}
err = bucket.PutObject(resp.Object, tempF,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.CallbackVar))),
)
if err != nil {
return err
}
return nil
}
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
// return nil, errs.NotImplement
// }
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// // return errs.NotImplement to use an internal archive tool
// return nil, errs.NotImplement
// }
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Open115)(nil)

36
drivers/115_open/meta.go Normal file
View File

@ -0,0 +1,36 @@
package _115_open
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
driver.RootID
// define other
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
AccessToken string
}
var config = driver.Config{
Name: "115 Open",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: false,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Open115{}
})
}

59
drivers/115_open/types.go Normal file
View File

@ -0,0 +1,59 @@
package _115_open
import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
sdk "github.com/xhofe/115-sdk-go"
)
type Obj sdk.GetFilesResp_File
// Thumb implements model.Thumb.
func (o *Obj) Thumb() string {
return o.Thumbnail
}
// CreateTime implements model.Obj.
func (o *Obj) CreateTime() time.Time {
return time.Unix(o.UpPt, 0)
}
// GetHash implements model.Obj.
func (o *Obj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, o.Sha1)
}
// GetID implements model.Obj.
func (o *Obj) GetID() string {
return o.Fid
}
// GetName implements model.Obj.
func (o *Obj) GetName() string {
return o.Fn
}
// GetPath implements model.Obj.
func (o *Obj) GetPath() string {
return ""
}
// GetSize implements model.Obj.
func (o *Obj) GetSize() int64 {
return o.FS
}
// IsDir implements model.Obj.
func (o *Obj) IsDir() bool {
return o.Fc == "0"
}
// ModTime implements model.Obj.
func (o *Obj) ModTime() time.Time {
return time.Unix(o.Upt, 0)
}
var _ model.Obj = (*Obj)(nil)
var _ model.Thumb = (*Obj)(nil)

3
drivers/115_open/util.go Normal file
View File

@ -0,0 +1,3 @@
package _115_open
// do others that not defined in Driver interface

View File

@ -185,32 +185,35 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
}
}
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
// const DEFAULT int64 = 10485760
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
etag := file.GetHash().GetHash(utils.MD5)
if len(etag) < utils.MD5.Width {
// const DEFAULT int64 = 10485760
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
}()
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
etag = hex.EncodeToString(h.Sum(nil))
}
defer func() {
_ = tempFile.Close()
}()
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
etag := hex.EncodeToString(h.Sum(nil))
data := base.Json{
"driveId": 0,
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
"etag": etag,
"fileName": stream.GetName(),
"fileName": file.GetName(),
"parentFileId": dstDir.GetID(),
"size": stream.GetSize(),
"size": file.GetSize(),
"type": 0,
}
var resp UploadResp
@ -225,7 +228,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
return nil
}
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
err = d.newUpload(ctx, &resp, stream, tempFile, up)
err = d.newUpload(ctx, &resp, file, up)
return err
} else {
cfg := &aws.Config{
@ -239,15 +242,21 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
return err
}
uploader := s3manager.NewUploader(s)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
input := &s3manager.UploadInput{
Bucket: &resp.Data.Bucket,
Key: &resp.Data.Key,
Body: tempFile,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {
return err
}
}
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{

View File

@ -69,7 +69,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
return err
}
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
chunkSize := int64(1024 * 1024 * 16)
// fetch s3 pre signed urls
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
batchSize = 10
getS3UploadUrl = d.getS3PreSignedUrls
}
limited := driver.NewLimitedUploadStream(ctx, file)
for i := 1; i <= chunkCount; i += batchSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if j == chunkCount {
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
}
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
if err != nil {
return err
}

View File

@ -2,11 +2,13 @@ package _139
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
@ -69,29 +71,28 @@ func (d *Yun139) Init(ctx context.Context) error {
default:
return errs.NotImplement
}
// if d.ref != nil {
// return nil
// }
// decode, err := base64.StdEncoding.DecodeString(d.Authorization)
// if err != nil {
// return err
// }
// decodeStr := string(decode)
// splits := strings.Split(decodeStr, ":")
// if len(splits) < 2 {
// return fmt.Errorf("authorization is invalid, splits < 2")
// }
// d.Account = splits[1]
// _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
// "qryUserExternInfoReq": base.Json{
// "commonAccountInfo": base.Json{
// "account": d.getAccount(),
// "accountType": 1,
// },
// },
// }, nil)
// return err
return nil
if d.ref != nil {
return nil
}
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
if err != nil {
return err
}
decodeStr := string(decode)
splits := strings.Split(decodeStr, ":")
if len(splits) < 2 {
return fmt.Errorf("authorization is invalid, splits < 2")
}
d.Account = splits[1]
_, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
"qryUserExternInfoReq": base.Json{
"commonAccountInfo": base.Json{
"account": d.getAccount(),
"accountType": 1,
},
},
}, nil)
return err
}
func (d *Yun139) InitReference(storage driver.Driver) error {
@ -630,12 +631,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// Progress
p := driver.NewProgress(stream.GetSize(), up)
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
// 上传所有分片
for _, uploadPartInfo := range uploadPartInfos {
index := uploadPartInfo.PartNumber - 1
partSize := partInfos[index].PartSize
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
limitReader := io.LimitReader(stream, partSize)
limitReader := io.LimitReader(rateLimited, partSize)
// Update Progress
r := io.TeeReader(limitReader, p)
@ -786,6 +788,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if part == 0 {
part = 1
}
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -797,7 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
byteSize = partSize
}
limitReader := io.LimitReader(stream, byteSize)
limitReader := io.LimitReader(rateLimited, byteSize)
// Update Progress
r := io.TeeReader(limitReader, p)
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)

View File

@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
log.Debugf("uploadData: %+v", uploadData)
requestURL := uploadData.RequestURL
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
if err != nil {
return err
}
@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
req.Header.Set(v[0:i], v[i+1:])
}
r, err := base.HttpClient.Do(req)
log.Debugf("%+v %+v", r, r.Request.Header)
r.Body.Close()
if err != nil {
return err
}
log.Debugf("%+v %+v", r, r.Request.Header)
_ = r.Body.Close()
up(float64(i) * 100 / float64(count))
}
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))

View File

@ -1,8 +1,8 @@
package _189pc
import (
"container/ring"
"context"
"fmt"
"net/http"
"strconv"
"strings"
@ -14,6 +14,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
type Cloud189PC struct {
@ -29,7 +30,7 @@ type Cloud189PC struct {
uploadThread int
familyTransferFolder *ring.Ring
familyTransferFolder *Cloud189Folder
cleanFamilyTransferFile func()
storageConfig driver.Config
@ -48,9 +49,18 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
}
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
// 兼容旧上传接口
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
y.storageConfig = config
if y.isFamily() {
// 兼容旧上传接口
if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" {
y.storageConfig.NoOverwriteUpload = true
}
} else {
// 家庭云转存,不支持覆盖上传
if y.Addition.FamilyTransfer {
y.storageConfig.NoOverwriteUpload = true
}
}
// 处理个人云和家庭云参数
if y.isFamily() && y.RootFolderID == "-11" {
y.RootFolderID = ""
@ -91,13 +101,14 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
}
}
// 创建中转文件夹,防止重名文件
// 创建中转文件夹
if y.FamilyTransfer {
if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil {
if err := y.createFamilyTransferFolder(); err != nil {
return err
}
}
// 清理转存文件节流
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
@ -327,35 +338,49 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if !isFamily && y.FamilyTransfer {
// 修改上传目标为家庭云文件夹
transferDstDir := dstDir
dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder)
y.familyTransferFolder = y.familyTransferFolder.Next()
dstDir = y.familyTransferFolder
// 使用临时文件名
srcName := stream.GetName()
stream = &WrapFileStreamer{
FileStreamer: stream,
Name: fmt.Sprintf("0%s.transfer", uuid.NewString()),
}
// 使用家庭云上传
isFamily = true
overwrite = false
defer func() {
if newObj != nil {
// 批量任务有概率删不掉
y.cleanFamilyTransferFile()
// 转存家庭云文件到个人云
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
task := BatchTaskInfo{
FileId: newObj.GetID(),
FileName: newObj.GetName(),
IsFolder: BoolToNumber(newObj.IsDir()),
// 删除家庭云源文件
go y.Delete(context.TODO(), y.FamilyID, newObj)
// 批量任务有概率删不掉
go y.cleanFamilyTransferFile()
// 转存失败返回错误
if err != nil {
return
}
// 删除源文件
if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil {
y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
// 永久删除
if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil {
y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
// 查找转存文件
var file *Cloud189File
file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false)
if err != nil {
if err == errs.ObjectNotFound {
err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName())
}
return
}
newObj = nil
// 重命名转存文件
newObj, err = y.Rename(context.TODO(), file, srcName)
if err != nil {
// 重命名失败删除源文件
_ = y.Delete(context.TODO(), "", file)
}
return
}
}()
}

View File

@ -18,6 +18,7 @@ import (
"strings"
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils/random"
)
@ -208,3 +209,12 @@ func IF[V any](o bool, t V, f V) V {
}
return f
}
type WrapFileStreamer struct {
model.FileStreamer
Name string
}
func (w *WrapFileStreamer) GetName() string {
return w.Name
}

View File

@ -2,7 +2,6 @@ package _189pc
import (
"bytes"
"container/ring"
"context"
"crypto/md5"
"encoding/base64"
@ -20,9 +19,12 @@ import (
"strings"
"time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
}
var erron RespErr
jsoniter.Unmarshal(body, &erron)
xml.Unmarshal(body, &erron)
_ = jsoniter.Unmarshal(body, &erron)
_ = xml.Unmarshal(body, &erron)
if erron.HasError() {
return nil, &erron
}
@ -185,39 +187,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
return body, nil
}
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
fullUrl := API_URL
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/listFiles.action"
res := make([]model.Obj, 0, 130)
res := make([]model.Obj, 0, 100)
for pageNum := 1; ; pageNum++ {
var resp Cloud189FilesResp
_, err := y.get(fullUrl, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"folderId": fileId,
"fileType": "0",
"mediaAttr": "0",
"iconOption": "5",
"pageNum": fmt.Sprint(pageNum),
"pageSize": "130",
})
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"orderBy": toFamilyOrderBy(y.OrderBy),
"descending": toDesc(y.OrderDirection),
})
} else {
r.SetQueryParams(map[string]string{
"recursive": "0",
"orderBy": y.OrderBy,
"descending": toDesc(y.OrderDirection),
})
}
}, &resp, isFamily)
resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection)
if err != nil {
return nil, err
}
@ -236,6 +208,63 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool)
return res, nil
}
func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) {
fullUrl := API_URL
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/listFiles.action"
var resp Cloud189FilesResp
_, err := y.get(fullUrl, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"folderId": fileId,
"fileType": "0",
"mediaAttr": "0",
"iconOption": "5",
"pageNum": fmt.Sprint(pageNum),
"pageSize": fmt.Sprint(pageSize),
})
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"orderBy": toFamilyOrderBy(orderBy),
"descending": toDesc(orderDirection),
})
} else {
r.SetQueryParams(map[string]string{
"recursive": "0",
"orderBy": orderBy,
"descending": toDesc(orderDirection),
})
}
}, &resp, isFamily)
if err != nil {
return nil, err
}
return &resp, nil
}
func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) {
for pageNum := 1; ; pageNum++ {
resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc")
if err != nil {
return nil, err
}
// 获取完毕跳出
if resp.FileListAO.Count == 0 {
return nil, errs.ObjectNotFound
}
for i := 0; i < len(resp.FileListAO.FileList); i++ {
file := resp.FileListAO.FileList[i]
if file.Name == searchName {
return &file, nil
}
}
}
}
func (y *Cloud189PC) login() (err error) {
// 初始化登陆所需参数
if y.loginParam == nil {
@ -481,6 +510,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
fileMd5 := md5.New()
silceMd5 := md5.New()
@ -490,7 +520,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
byteData := make([]byte, sliceSize)
if i == count {
byteData = byteData[:lastPartSize]
@ -499,6 +531,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// 读取块
silceMd5.Reset()
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
sem.Release(1)
return nil, err
}
@ -508,6 +541,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error {
defer sem.Release(1)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
@ -515,7 +549,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
if err != nil {
return err
}
@ -767,6 +802,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
if err != nil {
return nil, err
}
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
// 创建上传会话
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
@ -793,7 +829,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}
@ -902,8 +938,7 @@ func (y *Cloud189PC) isLogin() bool {
}
// 创建家庭云中转文件夹
func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
folders := ring.New(count)
func (y *Cloud189PC) createFamilyTransferFolder() error {
var rootFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
@ -912,81 +947,61 @@ func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
})
}, &rootFolder, true)
if err != nil {
return nil, err
return err
}
folderCount := 0
// 获取已有目录
files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true)
if err != nil {
return nil, err
}
for _, file := range files {
if folder, ok := file.(*Cloud189Folder); ok {
folders.Value = folder
folders = folders.Next()
folderCount++
}
}
// 创建新的目录
for folderCount < count {
var newFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"folderName": uuid.NewString(),
"familyId": y.FamilyID,
"parentId": rootFolder.GetID(),
})
}, &newFolder, true)
if err != nil {
return nil, err
}
folders.Value = &newFolder
folders = folders.Next()
folderCount++
}
return folders, nil
y.familyTransferFolder = &rootFolder
return nil
}
// 清理中转文件夹
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
var tasks []BatchTaskInfo
r := y.familyTransferFolder
for p := r.Next(); p != r; p = p.Next() {
folder := p.Value.(*Cloud189Folder)
files, err := y.getFiles(ctx, folder.GetID(), true)
transferFolderId := y.familyTransferFolder.GetID()
for pageNum := 1; ; pageNum++ {
resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc")
if err != nil {
return err
}
for _, file := range files {
// 获取完毕跳出
if resp.FileListAO.Count == 0 {
break
}
var tasks []BatchTaskInfo
for i := 0; i < len(resp.FileListAO.FolderList); i++ {
folder := resp.FileListAO.FolderList[i]
tasks = append(tasks, BatchTaskInfo{
FileId: folder.GetID(),
FileName: folder.GetName(),
IsFolder: BoolToNumber(folder.IsDir()),
})
}
for i := 0; i < len(resp.FileListAO.FileList); i++ {
file := resp.FileListAO.FileList[i]
tasks = append(tasks, BatchTaskInfo{
FileId: file.GetID(),
FileName: file.GetName(),
IsFolder: BoolToNumber(file.IsDir()),
})
}
}
if len(tasks) > 0 {
// 删除
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
if err != nil {
if len(tasks) > 0 {
// 删除
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 永久删除
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 永久删除
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
return err
}
return nil
}
@ -1063,6 +1078,34 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s
}
}
// 永久删除文件
func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error {
task := BatchTaskInfo{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
}
// 删除源文件
resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task)
if err != nil {
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 清除回收站
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
if err != nil {
return err
}
return nil
}
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {

View File

@ -63,6 +63,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
Size: obj.GetSize(),
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
HashInfo: obj.GetHash(),
}, nil
}

View File

@ -181,25 +181,29 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream)
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
if err != nil {
return err
}
req.Header.Set("Authorization", d.Token)
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName()))
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
req.Header.Set("Password", d.MetaPassword)
if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 {
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
req.Header.Set("X-File-Md5", md5)
}
if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
req.Header.Set("X-File-Sha1", sha1)
}
if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
req.Header.Set("X-File-Sha256", sha256)
}
req.ContentLength = stream.GetSize()
req.ContentLength = s.GetSize()
// client := base.NewHttpClient()
// client.Timeout = time.Hour * 6
res, err := base.HttpClient.Do(req)

View File

@ -14,13 +14,12 @@ import (
"os"
"time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
}
if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
utils.CopyWithBufferN(buf, file, 1024)
_, err := utils.CopyWithBufferN(buf, file, 1024)
if err != nil {
return err
}
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
file.Reader = localFile
}
rateLimited := driver.NewLimitedUploadStream(ctx, file)
for i, partInfo := range resp.PartInfoList {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if d.InternalUpload {
url = partInfo.InternalUploadUrl
}
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
if err != nil {
return err
}
@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
if err != nil {
return err
}
res.Body.Close()
_ = res.Body.Close()
if count > 0 {
up(float64(i) * 100 / float64(count))
}

View File

@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
if err != nil {
return err
}
res.Body.Close()
_ = res.Body.Close()
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
return fmt.Errorf("upload status: %d", res.StatusCode)
}
@ -251,8 +251,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
rd = utils.NewMultiReadable(srd)
}
err = retry.Do(func() error {
rd.Reset()
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),

View File

@ -2,6 +2,7 @@ package drivers
import (
_ "github.com/alist-org/alist/v3/drivers/115"
_ "github.com/alist-org/alist/v3/drivers/115_open"
_ "github.com/alist-org/alist/v3/drivers/115_share"
_ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link"

View File

@ -12,13 +12,14 @@ import (
"strconv"
"time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
log "github.com/sirupsen/logrus"
)
@ -187,7 +188,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
streamSize := stream.GetSize()
sliceSize := d.getSliceSize()
sliceSize := d.getSliceSize(streamSize)
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
lastBlockSize := streamSize % sliceSize
if streamSize > 0 && lastBlockSize == 0 {
@ -195,7 +196,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
//cal md5 for first 256k data
const SliceSize int64 = 256 * 1024
const SliceSize int64 = 256 * utils.KB
// cal md5
blockList := make([]string, 0, count)
byteSize := sliceSize
@ -259,20 +260,22 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
}
// step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread)
sem := semaphore.NewWeighted(3)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
defer sem.Release(1)
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
@ -281,7 +284,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
err := d.uploadSlice(ctx, params, stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
if err != nil {
return err
}

View File

@ -8,16 +8,17 @@ import (
type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"`
driver.RootPath
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
}
var config = driver.Config{

View File

@ -136,7 +136,7 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
return res, nil
}
func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) {
func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
var resp DownloadResp
params := map[string]string{
"method": "filemetas",
@ -164,7 +164,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
}, nil
}
func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) {
func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
var resp DownloadResp2
param := map[string]string{
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
@ -230,22 +230,72 @@ func joinTime(form map[string]string, ctime, mtime int64) {
const (
DefaultSliceSize int64 = 4 * utils.MB
VipSliceSize = 16 * utils.MB
SVipSliceSize = 32 * utils.MB
VipSliceSize int64 = 16 * utils.MB
SVipSliceSize int64 = 32 * utils.MB
MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048
SliceStep int64 = 1 * utils.MB
)
func (d *BaiduNetdisk) getSliceSize() int64 {
if d.CustomUploadPartSize != 0 {
return d.CustomUploadPartSize
}
switch d.vipType {
case 1:
return VipSliceSize
case 2:
return SVipSliceSize
default:
func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
// 非会员固定为 4MB
if d.vipType == 0 {
if d.CustomUploadPartSize != 0 {
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
}
if filesize > MaxSliceNum*DefaultSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return DefaultSliceSize
}
if d.CustomUploadPartSize != 0 {
if d.CustomUploadPartSize < DefaultSliceSize {
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
return DefaultSliceSize
}
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
return VipSliceSize
}
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
return SVipSliceSize
}
return d.CustomUploadPartSize
}
maxSliceSize := DefaultSliceSize
switch d.vipType {
case 1:
maxSliceSize = VipSliceSize
case 2:
maxSliceSize = SVipSliceSize
}
// upload on low bandwidth
if d.LowBandwithUploadMode {
size := DefaultSliceSize
for size <= maxSliceSize {
if filesize <= MaxSliceNum*size {
return size
}
size += SliceStep
}
}
if filesize > MaxSliceNum*maxSliceSize {
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
}
return maxSliceSize
}
// func encodeURIComponent(str string) string {

View File

@ -13,6 +13,8 @@ import (
"strings"
"time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -314,10 +316,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
if partseq+1 == count {
@ -325,6 +331,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
}
threadG.Go(func(ctx context.Context) error {
defer sem.Release(1)
uploadParams := map[string]string{
"method": "upload",
"path": params["path"],
@ -335,7 +342,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(uploadParams)
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
r.SetFileReader("file", stream.GetName(),
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
}, nil)
if err != nil {
return err

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/net"
"github.com/go-resty/resty/v2"
)
@ -26,7 +27,7 @@ func InitClient() {
NoRedirectClient.SetHeader("user-agent", UserAgent)
RestyClient = NewRestyClient()
HttpClient = NewHttpClient()
HttpClient = net.NewHttpClient()
}
func NewRestyClient() *resty.Client {
@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client {
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
return client
}
func NewHttpClient() *http.Client {
return &http.Client{
Timeout: time.Hour * 48,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
},
}
}

View File

@ -215,7 +215,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
return nil
}
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
var resp UploadDataRsp
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
}, &resp)
@ -227,11 +227,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
filePart, err := writer.CreateFormFile("file", stream.GetName())
filePart, err := writer.CreateFormFile("file", file.GetName())
if err != nil {
return err
}
_, err = utils.CopyWithBuffer(filePart, stream)
_, err = utils.CopyWithBuffer(filePart, file)
if err != nil {
return err
}
@ -248,7 +248,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
if err != nil {
return err
}
req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: body,
Size: int64(body.Len()),
},
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
if err != nil {
return err
}

View File

@ -1,7 +1,9 @@
package cloudreve
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"path"
@ -147,7 +149,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
"size": stream.GetSize(),
"name": stream.GetName(),
"policy_id": r.Policy.Id,
"last_modified": stream.ModTime().Unix(),
"last_modified": stream.ModTime().UnixMilli(),
}
// 获取上传会话信息
@ -173,7 +175,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
var n int
buf = make([]byte, chunkSize)
n, err = io.ReadAtLeast(stream, buf, chunkSize)
if err != nil && err != io.ErrUnexpectedEOF {
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
if err == io.EOF {
return nil
}
@ -186,7 +188,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetHeader("Content-Length", strconv.Itoa(n))
req.SetBody(buf)
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)))
}, nil)
if err != nil {
break

View File

@ -100,7 +100,7 @@ func (d *Cloudreve) login() error {
if err == nil {
break
}
if err != nil && err.Error() != "CAPTCHA not match." {
if err.Error() != "CAPTCHA not match." {
break
}
}
@ -202,19 +202,21 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
if err != nil {
return err
}
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
res.Body.Close()
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
@ -241,12 +243,13 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)
@ -256,10 +259,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body)
res.Body.Close()
_ = res.Body.Close()
return errors.New(string(data))
}
res.Body.Close()
_ = res.Body.Close()
up(float64(finish) * 100 / float64(stream.GetSize()))
}
// 上传成功发送回调请求

View File

@ -263,12 +263,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}
rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
if err != nil {
return nil, err
}
@ -287,8 +282,9 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
// 可以直接返回读取完也不会调用Close直到连接断开Close
return remoteLink.MFile, nil
//keep reuse same MFile and close at last.
remoteClosers.Add(remoteLink.MFile)
return io.NopCloser(remoteLink.MFile), nil
}
return nil, errs.NotSupport
@ -304,7 +300,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{
Header: remoteLink.Header,
RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration,
}

View File

@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
}
url := d.contentBase + "/2/files/upload_session/append_v2"
reader := io.LimitReader(stream, PartSize)
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
req, err := http.NewRequest(http.MethodPost, url, reader)
if err != nil {
log.Errorf("failed to update file when append to upload session, err: %+v", err)
@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
return err
}
_ = res.Body.Close()
if count > 0 {
up(float64(i+1) * 100 / float64(count))
}
up(float64(i+1) * 100 / float64(count))
offset += byteSize
}
// 3.finish
toPath := dstDir.GetPath() + "/" + stream.GetName()

View File

@ -3,6 +3,7 @@ package febbox
import (
"encoding/json"
"errors"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/op"
"github.com/go-resty/resty/v2"
@ -135,6 +136,9 @@ func (d *FebBox) getDownloadLink(id string, ip string) (string, error) {
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
return "", err
}
if len(fileDownloadResp.Data) == 0 {
return "", fmt.Errorf("can not get download link, code:%d, msg:%s", fileDownloadResp.Code, fileDownloadResp.Msg)
}
return fileDownloadResp.Data[0].DownloadURL, nil
}

View File

@ -114,13 +114,15 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error {
}
}
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
if err := d.login(); err != nil {
return err
}
// TODO: support cancel
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
return d.conn.Stor(encode(path, d.Encoding), stream)
path := stdpath.Join(dstDir.GetPath(), s.GetName())
return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}))
}
var _ driver.Driver = (*FTP)(nil)

View File

@ -3,7 +3,6 @@ package github
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
@ -12,12 +11,14 @@ import (
"sync"
"text/template"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
@ -33,6 +34,7 @@ type Github struct {
moveMsgTmpl *template.Template
isOnBranch bool
commitMutex sync.Mutex
pgpEntity *openpgp.Entity
}
func (d *Github) Config() driver.Config {
@ -84,10 +86,13 @@ func (d *Github) Init(ctx context.Context) error {
}
d.client = base.NewRestyClient().
SetHeader("Accept", "application/vnd.github.object+json").
SetHeader("Authorization", "Bearer "+d.Token).
SetHeader("X-GitHub-Api-Version", "2022-11-28").
SetLogger(log.StandardLogger()).
SetDebug(false)
token := strings.TrimSpace(d.Token)
if token != "" {
d.client = d.client.SetHeader("Authorization", "Bearer "+token)
}
if d.Ref == "" {
repo, err := d.getRepo()
if err != nil {
@ -99,6 +104,26 @@ func (d *Github) Init(ctx context.Context) error {
_, err = d.getBranchHead()
d.isOnBranch = err == nil
}
if d.GPGPrivateKey != "" {
if d.CommitterName == "" || d.AuthorName == "" {
user, e := d.getAuthenticatedUser()
if e != nil {
return e
}
if d.CommitterName == "" {
d.CommitterName = user.Name
d.CommitterEmail = user.Email
}
if d.AuthorName == "" {
d.AuthorName = user.Name
d.AuthorEmail = user.Email
}
}
d.pgpEntity, err = loadPrivateKey(d.GPGPrivateKey, d.GPGKeyPassphrase)
if err != nil {
return err
}
}
return nil
}
@ -148,8 +173,13 @@ func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
if obj.Type == "submodule" {
return nil, errors.New("cannot download a submodule")
}
url := obj.DownloadURL
ghProxy := strings.TrimSpace(d.Addition.GitHubProxy)
if ghProxy != "" {
url = strings.Replace(url, "https://raw.githubusercontent.com", ghProxy, 1)
}
return &model.Link{
URL: obj.DownloadURL,
URL: url,
}, nil
}
@ -166,10 +196,39 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
if parent.Entries == nil {
return errs.NotFolder
}
// if parent folder contains .gitkeep only, mark it and delete .gitkeep later
gitKeepSha := ""
subDirSha, err := d.newTree("", []interface{}{
map[string]string{
"path": ".gitkeep",
"mode": "100644",
"type": "blob",
"content": "",
},
})
if err != nil {
return err
}
newTree := make([]interface{}, 0, 2)
newTree = append(newTree, TreeObjReq{
Path: dirName,
Mode: "040000",
Type: "tree",
Sha: subDirSha,
})
if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" {
gitKeepSha = parent.Entries[0].Sha
newTree = append(newTree, TreeObjReq{
Path: ".gitkeep",
Mode: "100644",
Type: "blob",
Sha: nil,
})
}
newSha, err := d.newTree(parent.Sha, newTree)
if err != nil {
return err
}
rootSha, err := d.renewParentTrees(parentDir.GetPath(), parent.Sha, newSha, "/")
if err != nil {
return err
}
commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{
@ -182,13 +241,7 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
if err != nil {
return err
}
if err = d.createGitKeep(stdpath.Join(parentDir.GetPath(), dirName), commitMessage); err != nil {
return err
}
if gitKeepSha != "" {
err = d.delete(stdpath.Join(parentDir.GetPath(), ".gitkeep"), gitKeepSha, commitMessage)
}
return err
return d.commit(commitMessage, rootSha)
}
func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -631,33 +684,15 @@ func (d *Github) get(path string) (*Object, error) {
return &resp, err
}
func (d *Github) createGitKeep(path, message string) error {
body := map[string]interface{}{
"message": message,
"content": "",
"branch": d.Ref,
}
d.addCommitterAndAuthor(&body)
res, err := d.client.R().SetBody(body).Put(d.getContentApiUrl(stdpath.Join(path, ".gitkeep")))
if err != nil {
return err
}
if res.StatusCode() != 200 && res.StatusCode() != 201 {
return toErr(res)
}
return nil
}
func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) {
func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) {
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
afterContent := "\"}"
length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent))
length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent))
beforeContentReader := strings.NewReader(beforeContent)
contentReader, contentWriter := io.Pipe()
go func() {
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
if _, err := utils.CopyWithBuffer(encoder, stream); err != nil {
if _, err := utils.CopyWithBuffer(encoder, s); err != nil {
_ = contentWriter.CloseWithError(err)
return
}
@ -667,23 +702,29 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
afterContentReader := strings.NewReader(afterContent)
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
&ReaderWithProgress{
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
Length: length,
Progress: up,
})
driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
Size: length,
},
UpdateProgress: up,
}))
if err != nil {
return "", err
}
req.Header.Set("Accept", "application/vnd.github+json")
req.Header.Set("Authorization", "Bearer "+d.Token)
req.Header.Set("X-GitHub-Api-Version", "2022-11-28")
token := strings.TrimSpace(d.Token)
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
req.ContentLength = length
res, err := base.HttpClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
resBody, err := io.ReadAll(res.Body)
if err != nil {
return "", err
@ -703,23 +744,6 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
return resp.Sha, nil
}
func (d *Github) delete(path, sha, message string) error {
body := map[string]interface{}{
"message": message,
"sha": sha,
"branch": d.Ref,
}
d.addCommitterAndAuthor(&body)
res, err := d.client.R().SetBody(body).Delete(d.getContentApiUrl(path))
if err != nil {
return err
}
if res.StatusCode() != 200 {
return toErr(res)
}
return nil
}
func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) {
for path != until {
path = stdpath.Dir(path)
@ -781,11 +805,11 @@ func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) {
}
func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) {
res, err := d.client.R().
SetBody(&TreeReq{
BaseTree: baseSha,
Trees: tree,
}).
body := &TreeReq{Trees: tree}
if baseSha != "" {
body.BaseTree = baseSha
}
res, err := d.client.R().SetBody(body).
Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo))
if err != nil {
return "", err
@ -808,6 +832,13 @@ func (d *Github) commit(message, treeSha string) error {
"parents": []string{oldCommit},
}
d.addCommitterAndAuthor(&body)
if d.pgpEntity != nil {
signature, e := signCommit(&body, d.pgpEntity)
if e != nil {
return e
}
body["signature"] = signature
}
res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo))
if err != nil {
return err
@ -911,6 +942,21 @@ func (d *Github) getRepo() (*RepoResp, error) {
return &resp, nil
}
func (d *Github) getAuthenticatedUser() (*UserResp, error) {
res, err := d.client.R().Get("https://api.github.com/user")
if err != nil {
return nil, err
}
if res.StatusCode() != 200 {
return nil, toErr(res)
}
resp := &UserResp{}
if err = utils.Json.Unmarshal(res.Body(), resp); err != nil {
return nil, err
}
return resp, nil
}
func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) {
if d.CommitterName != "" {
committer := map[string]string{

View File

@ -7,20 +7,23 @@ import (
type Addition struct {
driver.RootPath
Token string `json:"token" type:"string" required:"true"`
Owner string `json:"owner" type:"string" required:"true"`
Repo string `json:"repo" type:"string" required:"true"`
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
CommitterName string `json:"committer_name" type:"string"`
CommitterEmail string `json:"committer_email" type:"string"`
AuthorName string `json:"author_name" type:"string"`
AuthorEmail string `json:"author_email" type:"string"`
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
Token string `json:"token" type:"string" required:"true"`
Owner string `json:"owner" type:"string" required:"true"`
Repo string `json:"repo" type:"string" required:"true"`
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"`
GPGPrivateKey string `json:"gpg_private_key" type:"text"`
GPGKeyPassphrase string `json:"gpg_key_passphrase" type:"string"`
CommitterName string `json:"committer_name" type:"string"`
CommitterEmail string `json:"committer_email" type:"string"`
AuthorName string `json:"author_name" type:"string"`
AuthorEmail string `json:"author_email" type:"string"`
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
}
var config = driver.Config{

View File

@ -79,7 +79,7 @@ type TreeResp struct {
}
type TreeReq struct {
BaseTree string `json:"base_tree"`
BaseTree interface{} `json:"base_tree,omitempty"`
Trees []interface{} `json:"tree"`
}
@ -100,3 +100,8 @@ type UpdateRefReq struct {
type RepoResp struct {
DefaultBranch string `json:"default_branch"`
}
type UserResp struct {
Name string `json:"name"`
Email string `json:"email"`
}

View File

@ -1,32 +1,22 @@
package github
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"text/template"
"time"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/ProtonMail/go-crypto/openpgp/armor"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"io"
"math"
"strings"
"text/template"
)
type ReaderWithProgress struct {
Reader io.Reader
Length int64
Progress func(percentage float64)
offset int64
}
func (r *ReaderWithProgress) Read(p []byte) (int, error) {
n, err := r.Reader.Read(p)
r.offset += int64(n)
r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0))
return n, err
}
type MessageTemplateVars struct {
UserName string
ObjName string
@ -113,3 +103,65 @@ func getUsername(ctx context.Context) string {
}
return user.Username
}
func loadPrivateKey(key, passphrase string) (*openpgp.Entity, error) {
entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key))
if err != nil {
return nil, err
}
if len(entityList) < 1 {
return nil, fmt.Errorf("no keys found in key ring")
}
entity := entityList[0]
pass := []byte(passphrase)
if entity.PrivateKey != nil && entity.PrivateKey.Encrypted {
if err = entity.PrivateKey.Decrypt(pass); err != nil {
return nil, fmt.Errorf("password incorrect: %+v", err)
}
}
for _, subKey := range entity.Subkeys {
if subKey.PrivateKey != nil && subKey.PrivateKey.Encrypted {
if err = subKey.PrivateKey.Decrypt(pass); err != nil {
return nil, fmt.Errorf("password incorrect: %+v", err)
}
}
}
return entity, nil
}
func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, error) {
var commit strings.Builder
commit.WriteString(fmt.Sprintf("tree %s\n", (*m)["tree"].(string)))
parents := (*m)["parents"].([]string)
for _, p := range parents {
commit.WriteString(fmt.Sprintf("parent %s\n", p))
}
now := time.Now()
_, offset := now.Zone()
hour := offset / 3600
author := (*m)["author"].(map[string]string)
commit.WriteString(fmt.Sprintf("author %s <%s> %d %+03d00\n", author["name"], author["email"], now.Unix(), hour))
author["date"] = now.Format(time.RFC3339)
committer := (*m)["committer"].(map[string]string)
commit.WriteString(fmt.Sprintf("committer %s <%s> %d %+03d00\n", committer["name"], committer["email"], now.Unix(), hour))
committer["date"] = now.Format(time.RFC3339)
commit.WriteString(fmt.Sprintf("\n%s", (*m)["message"].(string)))
data := commit.String()
var sigBuffer bytes.Buffer
err := openpgp.DetachSign(&sigBuffer, entity, strings.NewReader(data), nil)
if err != nil {
return "", fmt.Errorf("signing failed: %v", err)
}
var armoredSig bytes.Buffer
armorWriter, err := armor.Encode(&armoredSig, "PGP SIGNATURE", nil)
if err != nil {
return "", err
}
if _, err = io.Copy(armorWriter, &sigBuffer); err != nil {
return "", err
}
_ = armorWriter.Close()
return armoredSig.String(), nil
}

View File

@ -4,8 +4,6 @@ import (
"context"
"fmt"
"net/http"
"time"
"strings"
"github.com/alist-org/alist/v3/internal/driver"
@ -18,7 +16,7 @@ type GithubReleases struct {
model.Storage
Addition
releases []Release
points []MountPoint
}
func (d *GithubReleases) Config() driver.Config {
@ -30,17 +28,11 @@ func (d *GithubReleases) GetAddition() driver.Additional {
}
func (d *GithubReleases) Init(ctx context.Context) error {
SetHeader(d.Addition.Token)
repos, err := ParseRepos(d.Addition.RepoStructure, d.Addition.ShowAllVersion)
if err != nil {
return err
}
d.releases = repos
d.ParseRepos(d.Addition.RepoStructure)
return nil
}
func (d *GithubReleases) Drop(ctx context.Context) error {
ClearCache()
return nil
}
@ -48,67 +40,83 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
files := make([]File, 0)
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
for _, repo := range d.releases {
if repo.Path == path { // 与仓库路径相同
resp, err := GetRepoReleaseInfo(repo.RepoName, repo.ID, path, d.Storage.CacheExpiration)
if err != nil {
return nil, err
}
files = append(files, resp.Files...)
for i := range d.points {
point := &d.points[i]
if d.Addition.ShowReadme {
resp, err := GetGithubOtherFile(repo.RepoName, path, d.Storage.CacheExpiration)
if err != nil {
return nil, err
if !d.Addition.ShowAllVersion { // latest
point.RequestRelease(d.GetRequest, args.Refresh)
if point.Point == path { // 与仓库路径相同
files = append(files, point.GetLatestRelease()...)
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
continue
}
files = append(files, *resp...)
}
} else if strings.HasPrefix(repo.Path, path) { // 仓库路径是目录的子目录
nextDir := GetNextDir(repo.Path, path)
if nextDir == "" {
continue
}
if d.Addition.ShowAllVersion {
files = append(files, File{
FileName: nextDir,
Size: 0,
CreateAt: time.Time{},
UpdateAt: time.Time{},
Url: "",
Type: "dir",
Path: fmt.Sprintf("%s/%s", path, nextDir),
})
continue
}
repo, _ := GetRepoReleaseInfo(repo.RepoName, repo.Version, path, d.Storage.CacheExpiration)
hasSameDir := false
for index, file := range files {
if file.FileName == nextDir {
hasSameDir = true
files[index].Size += repo.Size
files[index].UpdateAt = func(a time.Time, b time.Time) time.Time {
if a.After(b) {
return a
}
return b
}(files[index].UpdateAt, repo.UpdateAt)
break
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetLatestSize()
break
}
}
if !hasSameDir {
files = append(files, File{
Path: path + "/" + nextDir,
FileName: nextDir,
Size: point.GetLatestSize(),
UpdateAt: point.Release.PublishedAt,
CreateAt: point.Release.CreatedAt,
Type: "dir",
Url: "",
})
}
}
} else { // all version
point.RequestReleases(d.GetRequest, args.Refresh)
if !hasSameDir {
files = append(files, File{
FileName: nextDir,
Size: repo.Size,
CreateAt: repo.CreateAt,
UpdateAt: repo.UpdateAt,
Url: repo.Url,
Type: "dir",
Path: fmt.Sprintf("%s/%s", path, nextDir),
})
if point.Point == path { // 与仓库路径相同
files = append(files, point.GetAllVersion()...)
if d.Addition.ShowReadme {
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
}
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
nextDir := GetNextDir(point.Point, path)
if nextDir == "" {
continue
}
hasSameDir := false
for index := range files {
if files[index].GetName() == nextDir {
hasSameDir = true
files[index].Size += point.GetAllVersionSize()
break
}
}
if !hasSameDir {
files = append(files, File{
FileName: nextDir,
Path: path + "/" + nextDir,
Size: point.GetAllVersionSize(),
UpdateAt: (*point.Releases)[0].PublishedAt,
CreateAt: (*point.Releases)[0].CreatedAt,
Type: "dir",
Url: "",
})
}
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
tagName := GetNextDir(path, point.Point)
if tagName == "" {
continue
}
files = append(files, point.GetReleaseByTagName(tagName)...)
}
}
}
@ -119,35 +127,41 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
}
func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
url := file.GetID()
gh_proxy := strings.TrimSpace(d.Addition.GitHubProxy)
if gh_proxy != "" {
url = strings.Replace(url, "https://github.com", gh_proxy, 1)
}
link := model.Link{
URL: file.GetID(),
URL: url,
Header: http.Header{},
}
return &link, nil
}
func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
// TODO create folder, optional
return nil, errs.NotImplement
}
func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO move obj, optional
return nil, errs.NotImplement
}
func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
// TODO rename obj, optional
return nil, errs.NotImplement
}
func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO copy obj, optional
return nil, errs.NotImplement
}
func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error {
// TODO remove obj, optional
return errs.NotImplement
}
func (d *GithubReleases) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return nil, errs.NotImplement
}
var _ driver.Driver = (*GithubReleases)(nil)

View File

@ -7,10 +7,11 @@ import (
type Addition struct {
driver.RootID
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"/path/to/alist-gh:alistGo/alist\n/path/to2/alist-web-gh:AlistGo/alist-web" help:"structure:[path:]org/repo"`
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
}
var config = driver.Config{

View File

@ -0,0 +1,86 @@
package github_releases
type Release struct {
Url string `json:"url"`
AssetsUrl string `json:"assets_url"`
UploadUrl string `json:"upload_url"`
HtmlUrl string `json:"html_url"`
Id int `json:"id"`
Author User `json:"author"`
NodeId string `json:"node_id"`
TagName string `json:"tag_name"`
TargetCommitish string `json:"target_commitish"`
Name string `json:"name"`
Draft bool `json:"draft"`
Prerelease bool `json:"prerelease"`
CreatedAt string `json:"created_at"`
PublishedAt string `json:"published_at"`
Assets []Asset `json:"assets"`
TarballUrl string `json:"tarball_url"`
ZipballUrl string `json:"zipball_url"`
Body string `json:"body"`
Reactions Reactions `json:"reactions"`
}
type User struct {
Login string `json:"login"`
Id int `json:"id"`
NodeId string `json:"node_id"`
AvatarUrl string `json:"avatar_url"`
GravatarId string `json:"gravatar_id"`
Url string `json:"url"`
HtmlUrl string `json:"html_url"`
FollowersUrl string `json:"followers_url"`
FollowingUrl string `json:"following_url"`
GistsUrl string `json:"gists_url"`
StarredUrl string `json:"starred_url"`
SubscriptionsUrl string `json:"subscriptions_url"`
OrganizationsUrl string `json:"organizations_url"`
ReposUrl string `json:"repos_url"`
EventsUrl string `json:"events_url"`
ReceivedEventsUrl string `json:"received_events_url"`
Type string `json:"type"`
UserViewType string `json:"user_view_type"`
SiteAdmin bool `json:"site_admin"`
}
type Asset struct {
Url string `json:"url"`
Id int `json:"id"`
NodeId string `json:"node_id"`
Name string `json:"name"`
Label string `json:"label"`
Uploader User `json:"uploader"`
ContentType string `json:"content_type"`
State string `json:"state"`
Size int64 `json:"size"`
DownloadCount int `json:"download_count"`
CreatedAt string `json:"created_at"`
UpdatedAt string `json:"updated_at"`
BrowserDownloadUrl string `json:"browser_download_url"`
}
type Reactions struct {
Url string `json:"url"`
TotalCount int `json:"total_count"`
PlusOne int `json:"+1"`
MinusOne int `json:"-1"`
Laugh int `json:"laugh"`
Hooray int `json:"hooray"`
Confused int `json:"confused"`
Heart int `json:"heart"`
Rocket int `json:"rocket"`
Eyes int `json:"eyes"`
}
type FileInfo struct {
Name string `json:"name"`
Path string `json:"path"`
Sha string `json:"sha"`
Size int64 `json:"size"`
Url string `json:"url"`
HtmlUrl string `json:"html_url"`
GitUrl string `json:"git_url"`
DownloadUrl string `json:"download_url"`
Type string `json:"type"`
}

View File

@ -1,19 +1,181 @@
package github_releases
import (
"encoding/json"
"strings"
"time"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type MountPoint struct {
Point string // 挂载点
Repo string // 仓库名 owner/repo
Release *Release // Release 指针 latest
Releases *[]Release // []Release 指针
OtherFile *[]FileInfo // 仓库根目录下的其他文件
}
// 请求最新版本
func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) {
if m.Repo == "" {
return
}
if m.Release == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases/latest")
m.Release = new(Release)
json.Unmarshal(resp.Body(), m.Release)
}
}
// 请求所有版本
func (m *MountPoint) RequestReleases(get func(url string) (*resty.Response, error), refresh bool) {
if m.Repo == "" {
return
}
if m.Releases == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases")
m.Releases = new([]Release)
json.Unmarshal(resp.Body(), m.Releases)
}
}
// 获取最新版本
func (m *MountPoint) GetLatestRelease() []File {
files := make([]File, 0)
for _, asset := range m.Release.Assets {
files = append(files, File{
Path: m.Point + "/" + asset.Name,
FileName: asset.Name,
Size: asset.Size,
Type: "file",
UpdateAt: asset.UpdatedAt,
CreateAt: asset.CreatedAt,
Url: asset.BrowserDownloadUrl,
})
}
return files
}
// 获取最新版本大小
func (m *MountPoint) GetLatestSize() int64 {
size := int64(0)
for _, asset := range m.Release.Assets {
size += asset.Size
}
return size
}
// 获取所有版本
func (m *MountPoint) GetAllVersion() []File {
files := make([]File, 0)
for _, release := range *m.Releases {
file := File{
Path: m.Point + "/" + release.TagName,
FileName: release.TagName,
Size: m.GetSizeByTagName(release.TagName),
Type: "dir",
UpdateAt: release.PublishedAt,
CreateAt: release.CreatedAt,
Url: release.HtmlUrl,
}
for _, asset := range release.Assets {
file.Size += asset.Size
}
files = append(files, file)
}
return files
}
// 根据版本号获取版本
func (m *MountPoint) GetReleaseByTagName(tagName string) []File {
for _, item := range *m.Releases {
if item.TagName == tagName {
files := make([]File, 0)
for _, asset := range item.Assets {
files = append(files, File{
Path: m.Point + "/" + tagName + "/" + asset.Name,
FileName: asset.Name,
Size: asset.Size,
Type: "file",
UpdateAt: asset.UpdatedAt,
CreateAt: asset.CreatedAt,
Url: asset.BrowserDownloadUrl,
})
}
return files
}
}
return nil
}
// 根据版本号获取版本大小
func (m *MountPoint) GetSizeByTagName(tagName string) int64 {
if m.Releases == nil {
return 0
}
for _, item := range *m.Releases {
if item.TagName == tagName {
size := int64(0)
for _, asset := range item.Assets {
size += asset.Size
}
return size
}
}
return 0
}
// 获取所有版本大小
func (m *MountPoint) GetAllVersionSize() int64 {
if m.Releases == nil {
return 0
}
size := int64(0)
for _, release := range *m.Releases {
for _, asset := range release.Assets {
size += asset.Size
}
}
return size
}
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
if m.OtherFile == nil || refresh {
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
m.OtherFile = new([]FileInfo)
json.Unmarshal(resp.Body(), m.OtherFile)
}
files := make([]File, 0)
defaultTime := "1970-01-01T00:00:00Z"
for _, file := range *m.OtherFile {
if strings.HasSuffix(file.Name, ".md") || strings.HasPrefix(file.Name, "LICENSE") {
files = append(files, File{
Path: m.Point + "/" + file.Name,
FileName: file.Name,
Size: file.Size,
Type: "file",
UpdateAt: defaultTime,
CreateAt: defaultTime,
Url: file.DownloadUrl,
})
}
}
return files
}
type File struct {
FileName string `json:"name"`
Size int64 `json:"size"`
CreateAt time.Time `json:"time"`
UpdateAt time.Time `json:"chtime"`
Url string `json:"url"`
Type string `json:"type"`
Path string `json:"path"`
Path string // 文件路径
FileName string // 文件名
Size int64 // 文件大小
Type string // 文件类型
UpdateAt string // 更新时间 eg:"2025-01-27T16:10:16Z"
CreateAt string // 创建时间
Url string // 下载链接
}
func (f File) GetHash() utils.HashInfo {
@ -33,11 +195,13 @@ func (f File) GetName() string {
}
func (f File) ModTime() time.Time {
return f.UpdateAt
t, _ := time.Parse(time.RFC3339, f.CreateAt)
return t
}
func (f File) CreateTime() time.Time {
return f.CreateAt
t, _ := time.Parse(time.RFC3339, f.CreateAt)
return t
}
func (f File) IsDir() bool {
@ -47,22 +211,3 @@ func (f File) IsDir() bool {
func (f File) GetID() string {
return f.Url
}
func (f File) Thumb() string {
return ""
}
type ReleasesData struct {
Files []File `json:"files"`
Size int64 `json:"size"`
UpdateAt time.Time `json:"chtime"`
CreateAt time.Time `json:"time"`
Url string `json:"url"`
}
type Release struct {
Path string // 挂载路径
RepoName string // 仓库名称
Version string // 版本号, tag
ID string // 版本ID
}

View File

@ -2,28 +2,36 @@ package github_releases
import (
"fmt"
"regexp"
"path/filepath"
"strings"
"sync"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
log "github.com/sirupsen/logrus"
)
var (
cache = make(map[string]*resty.Response)
created = make(map[string]time.Time)
mu sync.Mutex
req *resty.Request
)
// 发送 GET 请求
func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
req := base.RestyClient.R()
req.SetHeader("Accept", "application/vnd.github+json")
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
if d.Addition.Token != "" {
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", d.Addition.Token))
}
res, err := req.Get(url)
if err != nil {
return nil, err
}
if res.StatusCode() != 200 {
log.Warn("failed to get request: ", res.StatusCode(), res.String())
}
return res, nil
}
// 解析仓库列表
func ParseRepos(text string, allVersion bool) ([]Release, error) {
// 解析挂载结构
func (d *GithubReleases) ParseRepos(text string) ([]MountPoint, error) {
lines := strings.Split(text, "\n")
var repos []Release
points := make([]MountPoint, 0)
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
@ -41,177 +49,37 @@ func ParseRepos(text string, allVersion bool) ([]Release, error) {
return nil, fmt.Errorf("invalid format: %s", line)
}
if allVersion {
releases, _ := GetAllVersion(repo, path)
repos = append(repos, *releases...)
} else {
repos = append(repos, Release{
Path: path,
RepoName: repo,
Version: "latest",
ID: "latest",
})
}
points = append(points, MountPoint{
Point: path,
Repo: repo,
Release: nil,
Releases: nil,
})
}
return repos, nil
d.points = points
return points, nil
}
// 获取下一级目录
func GetNextDir(wholePath string, basePath string) string {
if !strings.HasSuffix(basePath, "/") {
basePath += "/"
}
basePath = fmt.Sprintf("%s/", strings.TrimRight(basePath, "/"))
if !strings.HasPrefix(wholePath, basePath) {
return ""
}
remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/")
if remainingPath != "" {
parts := strings.Split(remainingPath, "/")
return parts[0]
nextDir := parts[0]
if strings.HasPrefix(wholePath, strings.TrimRight(basePath, "/")+"/"+nextDir) {
return nextDir
}
}
return ""
}
// 发送 GET 请求
func GetRequest(url string, cacheExpiration int) (*resty.Response, error) {
mu.Lock()
if res, ok := cache[url]; ok && time.Now().Before(created[url].Add(time.Duration(cacheExpiration)*time.Minute)) {
mu.Unlock()
return res, nil
}
mu.Unlock()
res, err := req.Get(url)
if err != nil {
return nil, err
}
if res.StatusCode() != 200 {
log.Warn("failed to get request: ", res.StatusCode(), res.String())
}
mu.Lock()
cache[url] = res
created[url] = time.Now()
mu.Unlock()
return res, nil
}
// 获取 README、LICENSE 等文件
func GetGithubOtherFile(repo string, basePath string, cacheExpiration int) (*[]File, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/contents/", strings.Trim(repo, "/"))
res, _ := GetRequest(url, cacheExpiration)
body := jsoniter.Get(res.Body())
var files []File
for i := 0; i < body.Size(); i++ {
filename := body.Get(i, "name").ToString()
re := regexp.MustCompile(`(?i)^(.*\.md|LICENSE)$`)
if !re.MatchString(filename) {
continue
}
files = append(files, File{
FileName: filename,
Size: body.Get(i, "size").ToInt64(),
CreateAt: time.Time{},
UpdateAt: time.Now(),
Url: body.Get(i, "download_url").ToString(),
Type: body.Get(i, "type").ToString(),
Path: fmt.Sprintf("%s/%s", basePath, filename),
})
}
return &files, nil
}
// 获取 GitHub Release 详细信息
func GetRepoReleaseInfo(repo string, version string, basePath string, cacheExpiration int) (*ReleasesData, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", strings.Trim(repo, "/"), version)
res, _ := GetRequest(url, cacheExpiration)
body := res.Body()
if jsoniter.Get(res.Body(), "status").ToInt64() != 0 {
return &ReleasesData{}, fmt.Errorf("%s", res.String())
}
assets := jsoniter.Get(res.Body(), "assets")
var files []File
for i := 0; i < assets.Size(); i++ {
filename := assets.Get(i, "name").ToString()
files = append(files, File{
FileName: filename,
Size: assets.Get(i, "size").ToInt64(),
Url: assets.Get(i, "browser_download_url").ToString(),
Type: assets.Get(i, "content_type").ToString(),
Path: fmt.Sprintf("%s/%s", basePath, filename),
CreateAt: func() time.Time {
t, _ := time.Parse(time.RFC3339, assets.Get(i, "created_at").ToString())
return t
}(),
UpdateAt: func() time.Time {
t, _ := time.Parse(time.RFC3339, assets.Get(i, "updated_at").ToString())
return t
}(),
})
}
return &ReleasesData{
Files: files,
Url: jsoniter.Get(body, "html_url").ToString(),
Size: func() int64 {
size := int64(0)
for _, file := range files {
size += file.Size
}
return size
}(),
UpdateAt: func() time.Time {
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "published_at").ToString())
return t
}(),
CreateAt: func() time.Time {
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "created_at").ToString())
return t
}(),
}, nil
}
// 获取所有的版本号
func GetAllVersion(repo string, path string) (*[]Release, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/releases", strings.Trim(repo, "/"))
res, _ := GetRequest(url, 0)
body := jsoniter.Get(res.Body())
releases := make([]Release, 0)
for i := 0; i < body.Size(); i++ {
version := body.Get(i, "tag_name").ToString()
releases = append(releases, Release{
Path: fmt.Sprintf("%s/%s", path, version),
Version: version,
RepoName: repo,
ID: body.Get(i, "id").ToString(),
})
}
return &releases, nil
}
func ClearCache() {
mu.Lock()
cache = make(map[string]*resty.Response)
created = make(map[string]time.Time)
mu.Unlock()
}
func SetHeader(token string) {
req = base.RestyClient.R()
if token != "" {
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token))
}
req.SetHeader("Accept", "application/vnd.github+json")
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
// 判断当前目录是否是目标目录的祖先目录
func IsAncestorDir(parentDir string, targetDir string) bool {
absTargetDir, _ := filepath.Abs(targetDir)
absParentDir, _ := filepath.Abs(parentDir)
return strings.HasPrefix(absTargetDir, absParentDir)
}

View File

@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
putUrl := res.Header().Get("location")
if stream.GetSize() < d.ChunkSize*1024*1024 {
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
SetBody(driver.NewLimitedUploadStream(ctx, stream))
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)

View File

@ -11,10 +11,10 @@ import (
"strconv"
"time"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/golang-jwt/jwt/v4"
@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error {
}
d.AccessToken = resp.AccessToken
return nil
}
if gdsaFileErr != nil && os.IsExist(gdsaFileErr) {
} else if os.IsExist(gdsaFileErr) {
return gdsaFileErr
}
url := "https://www.googleapis.com/oauth2/v4/token"
@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer
if err != nil {
return err
}
reader = driver.NewLimitedUploadStream(ctx, reader)
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),

View File

@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
}
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(stream).SetContext(ctx)
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
}, nil, postHeaders)
if err != nil {

View File

@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
}
reader := driver.NewLimitedUploadStream(ctx, fileStream)
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(result.Bucket),
Key: aws.String(result.Key),
Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)),
Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)),
})
return nil, err

View File

@ -120,7 +120,7 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
if err != nil {
return nil, err
}
ts, ts_str, err := getTimestamp(d.conf.secret)
ts, ts_str, _ := getTimestamp(d.conf.secret)
params := []string{
"uuid=" + url.QueryEscape(d.UUID),
@ -149,11 +149,17 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
u.RawQuery = strings.Join(params, "&")
realURL := u.String()
// get the url after redirect
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
//"Origin": d.conf.site,
req := base.NoRedirectClient.R()
req.SetHeaders(map[string]string{
"Referer": d.conf.site + "/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
}).Get(realURL)
})
if d.Addition.Ip != "" {
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
}
res, err := req.Get(realURL)
if err != nil {
return nil, err
}
@ -266,10 +272,10 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error {
const DefaultPartSize = 1024 * 1024 * 8
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := stream.CacheFullInTempFile()
tempFile, err := s.CacheFullInTempFile()
if err != nil {
return nil, err
}
@ -288,8 +294,8 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"fileId": "",
"fileName": stream.GetName(),
"fileSize": stream.GetSize()/1024 + 1,
"fileName": s.GetName(),
"fileSize": s.GetSize()/1024 + 1,
"folderId": dstDir.GetID(),
"md5": etag,
"type": 1,
@ -301,13 +307,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
upToken := utils.Json.Get(res, "upToken").ToString()
now := time.Now()
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: tempFile,
Size: s.GetSize(),
},
UpdateProgress: up,
})
var token string
if stream.GetSize() <= DefaultPartSize {
res, err := d.upClient.R().SetMultipartFormData(map[string]string{
if s.GetSize() <= DefaultPartSize {
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
"token": upToken,
"key": key,
"fname": stream.GetName(),
}).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile).
"fname": s.GetName(),
}).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader).
Post("https://upload.qiniup.com/")
if err != nil {
return nil, err
@ -321,10 +334,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
}
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
parts := make([]Part, 0)
partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize
partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize
for i := 1; i <= int(partNum); i++ {
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u)
res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u)
if err != nil {
return nil, err
}
@ -335,7 +348,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
})
}
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
"fnmae": stream.GetName(),
"fnmae": s.GetName(),
"parts": parts,
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
if err != nil {
@ -373,9 +386,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
ID: strconv.FormatInt(file.FileId, 10),
//Path: ,
Name: file.FileName,
Size: stream.GetSize(),
Modified: stream.ModTime(),
Ctime: stream.CreateTime(),
Size: s.GetSize(),
Modified: s.ModTime(),
Ctime: s.CreateTime(),
IsFolder: false,
HashInfo: utils.NewHashInfo(utils.MD5, etag),
}, nil

View File

@ -9,6 +9,7 @@ type Addition struct {
driver.RootID
Username string `json:"username" type:"string" required:"true"`
Password string `json:"password" type:"string" required:"true"`
Ip string `json:"ip" type:"string"`
Token string
UUID string

View File

@ -73,8 +73,13 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr
"Referer": d.conf.site + "/",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5",
})
if d.Addition.Ip != "" {
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
}
if callback != nil {
callback(req)
}

View File

@ -108,9 +108,12 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
return d.sh.FilesRm(ctx, obj.GetPath(), true)
}
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
// TODO upload file, optional
_, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName())))
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
return err
}

View File

@ -3,8 +3,6 @@ package kodbox
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"net/http"
"path/filepath"
"strings"
@ -12,6 +10,8 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
type KodBox struct {
@ -225,14 +225,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
return nil
}
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
var resp *CommonResp
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
req.SetFileReader("file", stream.GetName(), stream).
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req.SetFileReader("file", s.GetName(), r).
SetResult(&resp).
SetFormData(map[string]string{
"path": dstDir.GetPath(),
})
}).
SetContext(ctx)
})
if err != nil {
return nil, err
@ -244,8 +249,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
return &model.ObjThumb{
Object: model.Object{
Path: resp.Info.(string),
Name: stream.GetName(),
Size: stream.GetSize(),
Name: s.GetName(),
Size: s.GetSize(),
IsFolder: false,
Modified: time.Now(),
Ctime: time.Now(),

View File

@ -208,18 +208,22 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
return errs.NotSupport
}
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if d.IsCookie() || d.IsAccount() {
var resp RespText[[]FileOrFolder]
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req.SetFormData(map[string]string{
"task": "1",
"vie": "2",
"ve": "2",
"id": "WU_FILE_0",
"name": stream.GetName(),
"name": s.GetName(),
"folder_id_bb_n": dstDir.GetID(),
}).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx)
}).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx)
}, &resp, true)
if err != nil {
return nil, err

View File

@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
Build()
// 发起请求
uploadLimit.Wait(ctx)
err := uploadLimit.Wait(ctx)
if err != nil {
return nil, err
}
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
if err != nil {
return nil, err
@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
length = stream.GetSize() - int64(i*blockSize)
}
reader := io.LimitReader(stream, length)
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length))
req := larkdrive.NewUploadPartFileReqBuilder().
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
Build()
// 发起请求
uploadLimit.Wait(ctx)
err = uploadLimit.Wait(ctx)
if err != nil {
return nil, err
}
resp, err := c.client.Drive.File.UploadPart(ctx, req)
if err != nil {

View File

@ -47,7 +47,11 @@ func (f File) GetPath() string {
}
func (f File) GetSize() int64 {
return f.Size
if f.IsDir() {
return 0
} else {
return f.Size
}
}
func (f File) GetName() string {
@ -70,10 +74,6 @@ func (f File) GetID() string {
return f.GetPath()
}
func (f File) Thumb() string {
return ""
}
type Files struct {
Data struct {
List []File `json:"list"`

View File

@ -161,7 +161,7 @@ func (d *MediaTrack) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
src := "assets/" + uuid.New().String()
var resp UploadResp
_, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) {
@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
tempFile, err := stream.CacheFullInTempFile()
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
@ -188,13 +188,19 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
_ = tempFile.Close()
}()
uploader := s3manager.NewUploader(s)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
input := &s3manager.UploadInput{
Bucket: &resp.Data.Bucket,
Key: &resp.Data.Object,
Body: tempFile,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: tempFile,
Size: file.GetSize(),
},
UpdateProgress: up,
}),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {
@ -213,12 +219,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
hash := hex.EncodeToString(h.Sum(nil))
data := base.Json{
"category": 0,
"description": stream.GetName(),
"description": file.GetName(),
"hash": hash,
"mime": stream.GetMimetype(),
"size": stream.GetSize(),
"mime": file.GetMimetype(),
"size": file.GetSize(),
"src": src,
"title": stream.GetName(),
"title": file.GetName(),
"type": 0,
}
_, err = d.request(url, http.MethodPost, func(req *resty.Request) {

View File

@ -156,6 +156,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
return err
}
reader := driver.NewLimitedUploadStream(ctx, stream)
for id := 0; id < u.Chunks(); id++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -165,7 +166,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
return err
}
chunk := make([]byte, chkSize)
n, err := io.ReadFull(stream, chunk)
n, err := io.ReadFull(reader, chunk)
if err != nil && err != io.EOF {
return err
}

View File

@ -64,7 +64,7 @@ func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.put(dstDir, stream, up)
return d.put(ctx, dstDir, stream, up)
}
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -1,7 +1,6 @@
package misskey
import (
"bytes"
"context"
"errors"
"io"
@ -190,16 +189,16 @@ func (d *Misskey) remove(obj model.Obj) error {
}
}
func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
var file MFile
fileContent, err := io.ReadAll(stream)
if err != nil {
return nil, err
}
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: stream,
UpdateProgress: up,
})
req := base.RestyClient.R().
SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))).
SetContext(ctx).
SetFileReader("file", stream.GetName(), reader).
SetFormData(map[string]string{
"folderId": handleFolderId(dstDir).(string),
"name": stream.GetName(),
@ -207,7 +206,8 @@ func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.Upd
"isSensitive": "false",
"force": "false",
}).
SetResult(&file).SetAuthToken(d.AccessToken)
SetResult(&file).
SetAuthToken(d.AccessToken)
resp, err := req.Post(d.Endpoint + "/api/drive/files/create")
if err != nil {

View File

@ -10,6 +10,8 @@ import (
"strings"
"time"
"golang.org/x/sync/semaphore"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
@ -301,6 +303,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
// step.3
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
@ -312,6 +315,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, part, byteSize := i, part, initUpdload.PartSize
if part.PartNumber == uploadPartData.PartTotal {
byteSize = initUpdload.LastPartSize
@ -319,7 +325,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
// step.4
threadG.Go(func(ctx context.Context) error {
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
defer sem.Release(1)
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
if err != nil {
return err
}
@ -328,7 +336,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil {
return err
}
resp.Body.Close()
_ = resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
}

View File

@ -88,7 +88,7 @@ func (d *NeteaseMusic) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
return d.putSongStream(stream)
return d.putSongStream(ctx, stream, up)
}
func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {

View File

@ -2,6 +2,7 @@ package netease_music
import (
"context"
"github.com/alist-org/alist/v3/internal/driver"
"io"
"net/http"
"strconv"
@ -71,6 +72,8 @@ func (lrc *LyricObj) getLyricLink() *model.Link {
type ReqOption struct {
crypto string
stream model.FileStreamer
up driver.UpdateProgress
ctx context.Context
data map[string]string
headers map[string]string
cookies []*http.Cookie

View File

@ -1,8 +1,10 @@
package netease_music
import (
"context"
"crypto/md5"
"encoding/hex"
"github.com/alist-org/alist/v3/internal/driver"
"io"
"net/http"
"strconv"
@ -47,9 +49,12 @@ func (u *uploader) init(stream model.FileStreamer) error {
}
h := md5.New()
utils.CopyWithBuffer(h, stream)
_, err := utils.CopyWithBuffer(h, stream)
if err != nil {
return err
}
u.md5 = hex.EncodeToString(h.Sum(nil))
_, err := u.file.Seek(0, io.SeekStart)
_, err = u.file.Seek(0, io.SeekStart)
if err != nil {
return err
}
@ -167,7 +172,7 @@ func (u *uploader) publishInfo(resourceId string) error {
return nil
}
func (u *uploader) upload(stream model.FileStreamer) error {
func (u *uploader) upload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
bucket := "jd-musicrep-privatecloud-audio-public"
token, err := u.allocToken(bucket)
if err != nil {
@ -192,6 +197,8 @@ func (u *uploader) upload(stream model.FileStreamer) error {
http.MethodPost,
ReqOption{
stream: stream,
up: up,
ctx: ctx,
headers: map[string]string{
"x-nos-token": token.token,
"Content-Type": "audio/mpeg",

View File

@ -1,7 +1,7 @@
package netease_music
import (
"io"
"context"
"net/http"
"path"
"regexp"
@ -10,6 +10,7 @@ import (
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
@ -58,20 +59,35 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error
url = "https://music.163.com/api/linux/forward"
}
if opt.ctx != nil {
req.SetContext(opt.ctx)
}
if method == http.MethodPost {
if opt.stream != nil {
if opt.up == nil {
opt.up = func(_ float64) {}
}
req.SetContentLength(true)
req.SetBody(io.ReadCloser(opt.stream))
req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{
Reader: opt.stream,
UpdateProgress: opt.up,
}))
} else {
req.SetFormData(data)
}
res, err := req.Post(url)
return res.Body(), err
if err != nil {
return nil, err
}
return res.Body(), nil
}
if method == http.MethodGet {
res, err := req.Get(url)
return res.Body(), err
if err != nil {
return nil, err
}
return res.Body(), nil
}
return nil, errs.NotImplement
@ -206,7 +222,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
return err
}
func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
tmp, err := stream.CacheFullInTempFile()
if err != nil {
return err
@ -231,7 +247,7 @@ func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
}
if u.meta.needUpload {
err = u.upload(stream)
err = u.upload(ctx, stream, up)
if err != nil {
return err
}

View File

@ -8,7 +8,6 @@ import (
"io"
"net/http"
stdpath "path"
"strconv"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@ -152,12 +151,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F
// 1. upload new file
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
url := d.GetMetaUrl(false, filepath) + "/content"
data, err := io.ReadAll(stream)
if err != nil {
return err
}
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
}, nil)
if err != nil {
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
@ -225,12 +220,13 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)

View File

@ -8,7 +8,6 @@ import (
"io"
"net/http"
stdpath "path"
"strconv"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@ -140,12 +139,8 @@ func (d *OnedriveAPP) GetFile(path string) (*File, error) {
func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
data, err := io.ReadAll(stream)
if err != nil {
return err
}
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
}, nil)
return err
}
@ -175,12 +170,13 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
if err != nil {
return err
}
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
finish += byteSize
res, err := base.HttpClient.Do(req)

View File

@ -255,10 +255,10 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB改用普通模式上传
return d.UploadByOSS(&params, stream, up)
return d.UploadByOSS(ctx, &params, stream, up)
}
// 分片上传
return d.UploadByMultipart(&params, stream.GetSize(), stream, up)
return d.UploadByMultipart(ctx, &params, stream.GetSize(), stream, up)
}
// 离线下载文件

View File

@ -2,6 +2,7 @@ package pikpak
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
"encoding/hex"
@ -19,6 +20,7 @@ import (
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/alist-org/alist/v3/drivers/base"
@ -417,7 +419,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
return nil
}
func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.FileStreamer, up driver.UpdateProgress) error {
ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret)
if err != nil {
return err
@ -427,14 +429,17 @@ func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up dri
return err
}
err = bucket.PutObject(params.Key, stream, OssOption(params)...)
err = bucket.PutObject(params.Key, driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}), OssOption(params)...)
if err != nil {
return err
}
return nil
}
func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
@ -444,7 +449,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
err error
)
tmpF, err := stream.CacheFullInTempFile()
tmpF, err := s.CacheFullInTempFile()
if err != nil {
return err
}
@ -488,6 +493,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
quit <- struct{}{}
}()
completedNum := atomic.Int32{}
// consumers
for i := 0; i < ThreadsNum; i++ {
go func(threadId int) {
@ -500,6 +506,8 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ {
select {
case <-ctx.Done():
break
case <-ticker.C:
errCh <- errors.Wrap(err, "ossToken 过期")
default:
@ -510,13 +518,16 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
continue
}
b := bytes.NewBuffer(buf)
b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf))
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
break
}
}
if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", s.GetName(), chunk.Number, err))
} else {
num := completedNum.Add(1)
up(float64(num) * 100.0 / float64(len(chunks)))
}
UploadedPartsCh <- part
}
@ -547,7 +558,7 @@ LOOP:
// EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) {
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
if filename := filepath.Base(s.GetName()); !strings.ContainsAny(filename, "&<") {
return err
}
}

View File

@ -1,6 +1,7 @@
package quark
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha1"
@ -178,7 +179,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
// part up
partSize := pre.Metadata.PartSize
var bytes []byte
var part []byte
md5s := make([]string, 0)
defaultBytes := make([]byte, partSize)
total := stream.GetSize()
@ -189,17 +190,18 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
return ctx.Err()
}
if left > int64(partSize) {
bytes = defaultBytes
part = defaultBytes
} else {
bytes = make([]byte, left)
part = make([]byte, left)
}
_, err := io.ReadFull(tempFile, bytes)
_, err := io.ReadFull(tempFile, part)
if err != nil {
return err
}
left -= int64(len(bytes))
left -= int64(len(part))
log.Debugf("left: %d", left)
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes)
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
if err != nil {
return err

View File

@ -6,6 +6,7 @@ import (
"encoding/base64"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
return resp.Data.Finish, err
}
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) {
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
timeStr := time.Now().UTC().Format(http.TimeFormat)
data := base.Json{
@ -163,10 +164,13 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
"partNumber": strconv.Itoa(partNumber),
"uploadId": pre.Data.UploadId,
}).SetBody(bytes).Put(u)
if err != nil {
return "", err
}
if res.StatusCode() != 200 {
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
}
return res.Header().Get("ETag"), nil
return res.Header().Get("Etag"), nil
}
func (d *QuarkOrUC) upCommit(pre UpPreResp, md5s []string) error {
@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
SetQueryParams(map[string]string{
"uploadId": pre.Data.UploadId,
}).SetBody(body).Post(u)
if err != nil {
return err
}
if res.StatusCode() != 200 {
return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
}

View File

@ -3,6 +3,7 @@ package quqi
import (
"bytes"
"context"
"errors"
"io"
"strconv"
"strings"
@ -385,20 +386,34 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
}
uploader := s3manager.NewUploader(s)
buf := make([]byte, 1024*1024*2)
fup := &driver.ReaderUpdatingProgress{
Reader: &driver.SimpleReaderWithSize{
Reader: f,
Size: int64(len(buf)),
},
UpdateProgress: up,
}
for partNumber := int64(1); ; partNumber++ {
n, err := io.ReadFull(f, buf)
if err != nil && err != io.ErrUnexpectedEOF {
n, err := io.ReadFull(fup, buf)
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
if err == io.EOF {
break
}
return nil, err
}
reader := bytes.NewReader(buf[:n])
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
UploadId: &uploadInitResp.Data.UploadID,
Key: &uploadInitResp.Data.Key,
Bucket: &uploadInitResp.Data.Bucket,
PartNumber: aws.Int64(partNumber),
Body: bytes.NewReader(buf[:n]),
Body: struct {
*driver.RateLimitReader
io.Seeker
}{
RateLimitReader: driver.NewLimitedUploadStream(ctx, reader),
Seeker: reader,
},
})
if err != nil {
return nil, err

View File

@ -304,10 +304,6 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) {
}
return &model.Link{
Header: http.Header{
"Origin": []string{"https://quqi.com"},
"Cookie": []string{d.Cookie},
},
RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers},
Expiration: &expiration,
}, nil

View File

@ -4,18 +4,17 @@ import (
"bytes"
"context"
"fmt"
"github.com/alist-org/alist/v3/server/common"
"io"
"net/url"
stdpath "path"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/server/common"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
@ -163,18 +162,21 @@ func (d *S3) Remove(ctx context.Context, obj model.Obj) error {
return d.removeFile(obj.GetPath())
}
func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
uploader := s3manager.NewUploader(d.Session)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1)
}
key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false)
contentType := stream.GetMimetype()
key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false)
contentType := s.GetMimetype()
log.Debugln("key:", key)
input := &s3manager.UploadInput{
Bucket: &d.Bucket,
Key: &key,
Body: stream,
Bucket: &d.Bucket,
Key: &key,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}),
ContentType: &contentType,
}
_, err := uploader.UploadWithContext(ctx, input)

View File

@ -199,7 +199,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
dstKey := getKey(dst, false)
input := &s3.CopyObjectInput{
Bucket: &d.Bucket,
CopySource: aws.String(url.PathEscape("/" + d.Bucket + "/" + srcKey)),
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
Key: &dstKey,
}
_, err := d.client.CopyObject(input)

View File

@ -197,7 +197,7 @@ func (d *Seafile) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
repo, path, err := d.getRepoAndPath(dstDir.GetPath())
if err != nil {
return err
@ -214,11 +214,16 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
u := string(res)
u = u[1 : len(u)-1] // remove quotes
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
req.SetFileReader("file", stream.GetName(), stream).
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req.SetFileReader("file", s.GetName(), r).
SetFormData(map[string]string{
"parent_dir": path,
"replace": "1",
})
}).
SetContext(ctx)
})
return err
}

View File

@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
defer func() {
_ = dstFile.Close()
}()
err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up)
err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
return err
}

View File

@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
_ = d.fs.Remove(fullPath)
}
}()
err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up)
err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
if err != nil {
return err
}

View File

@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
var newFile *FileUpload
if stream.GetSize() <= 20971520 {
// post upload
newFile, err = d.upload(ctx, stream, token)
newFile, err = d.upload(ctx, stream, token, up)
} else {
// chunk upload
//err = base.ErrNotImplement

View File

@ -1,6 +1,7 @@
package teambition
import (
"bytes"
"context"
"errors"
"fmt"
@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) {
return files, nil
}
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) {
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) {
prefix := "tcs"
if d.isInternational() {
prefix = "us-tcs"
}
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
})
var newFile FileUpload
res, err := base.RestyClient.R().
SetContext(ctx).
@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token
"type": file.GetMimetype(),
"size": strconv.FormatInt(file.GetSize(), 10),
"lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"),
}).SetMultipartField("file", file.GetName(), file.GetMimetype(), file).
}).
SetMultipartField("file", file.GetName(), file.GetMimetype(), reader).
Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix))
if err != nil {
return nil, err
@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
"Authorization": token,
"Content-Type": "application/octet-stream",
"Referer": referer,
}).SetBody(chunkData).Post(u)
if err != nil {
return nil, err
}
}).
SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))).
Post(u)
if err != nil {
return nil, err
}
@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod
Key: &uploadToken.Upload.Key,
ContentDisposition: &uploadToken.Upload.ContentDisposition,
ContentType: &uploadToken.Upload.ContentType,
Body: stream,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: stream,
UpdateProgress: up,
}),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {

View File

@ -66,11 +66,33 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
return errs.NotImplement
}
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (d *Template) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// TODO upload file, optional
return nil, errs.NotImplement
}
func (d *Template) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Template) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Template) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

View File

@ -228,7 +228,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
res, err := base.RestyClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))).
SetHeader("Cookie", d.Cookie).
Post(u)
if err != nil {

View File

@ -332,16 +332,16 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
hi := file.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
if len(gcid) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
tFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
if err != nil {
return err
}
@ -353,8 +353,8 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
r.SetBody(&base.Json{
"kind": FILE,
"parent_id": dstDir.GetID(),
"name": stream.GetName(),
"size": stream.GetSize(),
"name": file.GetName(),
"size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
})
@ -375,14 +375,17 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
return err
}
uploader := s3manager.NewUploader(s)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
Body: stream,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}),
})
return err
}

View File

@ -508,7 +508,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)),
Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))),
})
return err
}

View File

@ -363,16 +363,16 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
hi := stream.GetHash()
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
hi := file.GetHash()
gcid := hi.GetHash(hash_extend.GCID)
if len(gcid) < hash_extend.GCID.Width {
tFile, err := stream.CacheFullInTempFile()
tFile, err := file.CacheFullInTempFile()
if err != nil {
return err
}
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
if err != nil {
return err
}
@ -384,8 +384,8 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
r.SetBody(&base.Json{
"kind": FILE,
"parent_id": dstDir.GetID(),
"name": stream.GetName(),
"size": stream.GetSize(),
"name": file.GetName(),
"size": file.GetSize(),
"hash": gcid,
"upload_type": UPLOAD_TYPE_RESUMABLE,
})
@ -406,14 +406,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
return err
}
uploader := s3manager.NewUploader(s)
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
}
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration),
Body: stream,
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: file,
UpdateProgress: up,
}),
})
return err
}

View File

@ -58,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return nil, err
}
var jsonData any
json.Unmarshal(data, &jsonData)
err = json.Unmarshal(data, &jsonData)
if err != nil {
return nil, err
}
@ -114,23 +114,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
query := &url.Values{}
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
query.Add("guid", guid)
query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+"."))
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
endpoint.RawQuery = query.Encode()
var total int64
total = 0
progressReader := &ProgressReader{
stream,
func(byteNum int) {
total += int64(byteNum)
up(float64(total) / float64(stream.GetSize()) * 100)
},
}
req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)
progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
if err != nil {
return err
}

View File

@ -13,17 +13,6 @@ import (
"github.com/alist-org/alist/v3/internal/model"
)
type ProgressReader struct {
io.Reader
reporter func(byteNum int)
}
func (progressReader *ProgressReader) Read(data []byte) (int, error) {
byteNum, err := progressReader.Reader.Read(data)
progressReader.reporter(byteNum)
return byteNum, err
}
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {

View File

@ -3,7 +3,6 @@ package url_tree
import (
"context"
"errors"
"github.com/alist-org/alist/v3/internal/op"
stdpath "path"
"strings"
"sync"
@ -11,6 +10,7 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)

View File

@ -3,6 +3,7 @@ package uss
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"net/url"
"path"
"strings"
@ -122,11 +123,13 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error {
})
}
func (d *USS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
// TODO not support cancel??
func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
return d.client.Put(&upyun.PutObjectConfig{
Path: getKey(path.Join(dstDir.GetPath(), stream.GetName()), false),
Reader: stream,
Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
Reader: driver.NewLimitedUploadStream(ctx, &stream.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}),
})
}

View File

@ -278,7 +278,8 @@ func (d *Vtencent) FileUpload(ctx context.Context, dstDir model.Obj, stream mode
input := &s3manager.UploadInput{
Bucket: aws.String(fmt.Sprintf("%s-%d", params.StorageBucket, params.StorageAppID)),
Key: &params.Video.StoragePath,
Body: io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up))),
Body: driver.NewLimitedUploadStream(ctx,
io.TeeReader(stream, io.MultiWriter(hash, driver.NewProgress(stream.GetSize(), up)))),
}
_, err = uploader.UploadWithContext(ctx, input)
if err != nil {

View File

@ -93,13 +93,16 @@ func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error {
return d.client.RemoveAll(getPath(obj))
}
func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
callback := func(r *http.Request) {
r.Header.Set("Content-Type", stream.GetMimetype())
r.ContentLength = stream.GetSize()
r.Header.Set("Content-Type", s.GetMimetype())
r.ContentLength = s.GetSize()
}
// TODO: support cancel
err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback)
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), reader, 0644, callback)
return err
}

View File

@ -7,6 +7,7 @@ import (
"math"
"net/http"
"strconv"
"sync/atomic"
"time"
"github.com/alist-org/alist/v3/drivers/base"
@ -69,7 +70,7 @@ func (d *WeiYun) Init(ctx context.Context) error {
if d.client.LoginType() == 1 {
d.cron = cron.NewCron(time.Minute * 5)
d.cron.Do(func() {
d.client.KeepAlive()
_ = d.client.KeepAlive()
})
}
@ -311,77 +312,83 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// NOTE:
// 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??)
// 服务器支持上传进度恢复,不需要额外实现
if folder, ok := dstDir.(*Folder); ok {
file, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
var folder *Folder
var ok bool
if folder, ok = dstDir.(*Folder); !ok {
return nil, errs.NotSupport
}
file, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
// step 1.
preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
PdirKey: folder.GetPKey(),
DirKey: folder.DirKey,
// step 1.
preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
PdirKey: folder.GetPKey(),
DirKey: folder.DirKey,
FileName: stream.GetName(),
FileSize: stream.GetSize(),
File: file,
FileName: stream.GetName(),
FileSize: stream.GetSize(),
File: file,
ChannelCount: 4,
FileExistOption: 1,
})
if err != nil {
return nil, err
}
ChannelCount: 4,
FileExistOption: 1,
})
if err != nil {
return nil, err
}
// not fast upload
if !preData.FileExist {
// step.2 增加上传通道
if len(preData.ChannelList) < d.uploadThread {
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
if err != nil {
return nil, err
}
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
}
// step.3 上传
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for _, channel := range preData.ChannelList {
if utils.IsCanceled(upCtx) {
break
}
var channel = channel
threadG.Go(func(ctx context.Context) error {
for {
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
if err != nil {
return err
}
// 上传完成
if upData.UploadState != 1 {
return nil
}
channel = upData.Channel
}
})
}
if err = threadG.Wait(); err != nil {
// not fast upload
if !preData.FileExist {
// step.2 增加上传通道
if len(preData.ChannelList) < d.uploadThread {
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
if err != nil {
return nil, err
}
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
}
// step.3 上传
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
return &File{
PFolder: folder,
File: preData.File,
}, nil
total := atomic.Int64{}
for _, channel := range preData.ChannelList {
if utils.IsCanceled(upCtx) {
break
}
var channel = channel
threadG.Go(func(ctx context.Context) error {
for {
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
len64 := int64(channel.Len)
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(file, channel.Offset, len64)))
if err != nil {
return err
}
cur := total.Add(len64)
up(float64(cur) * 100.0 / float64(stream.GetSize()))
// 上传完成
if upData.UploadState != 1 {
return nil
}
channel = upData.Channel
}
})
}
if err = threadG.Wait(); err != nil {
return nil, err
}
}
return nil, errs.NotSupport
return &File{
PFolder: folder,
File: preData.File,
}, nil
}
// func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -155,12 +155,13 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
_, err := d.client.Upload2C(d.getSpaceType(), wopan.Upload2CFile{
Name: stream.GetName(),
Size: stream.GetSize(),
Content: stream,
Content: driver.NewLimitedUploadStream(ctx, stream),
ContentType: stream.GetMimetype(),
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
OnProgress: func(current, total int64) {
up(100 * float64(current) / float64(total))
},
Ctx: ctx,
})
return err
}

View File

@ -106,25 +106,31 @@ func (d *YandexDisk) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
var resp UploadResp
_, err := d.request("/upload", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": path.Join(dstDir.GetPath(), stream.GetName()),
"path": path.Join(dstDir.GetPath(), s.GetName()),
"overwrite": "true",
})
}, &resp)
if err != nil {
return err
}
req, err := http.NewRequest(resp.Method, resp.Href, stream)
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
})
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, reader)
if err != nil {
return err
}
req = req.WithContext(ctx)
req.Header.Set("Content-Length", strconv.FormatInt(stream.GetSize(), 10))
req.Header.Set("Content-Length", strconv.FormatInt(s.GetSize(), 10))
req.Header.Set("Content-Type", "application/octet-stream")
res, err := base.HttpClient.Do(req)
if err != nil {
return err
}
_ = res.Body.Close()
return err
}

30
go.mod
View File

@ -1,12 +1,11 @@
module github.com/alist-org/alist/v3
go 1.23
toolchain go1.23.1
go 1.23.4
require (
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
github.com/KirCute/sftpd-alist v0.0.12
github.com/ProtonMail/go-crypto v1.0.0
github.com/SheltonZhu/115driver v1.0.34
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
@ -62,14 +61,14 @@ require (
github.com/u2takey/ffmpeg-go v0.5.0
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
github.com/xhofe/tache v0.1.3
github.com/xhofe/tache v0.1.5
github.com/xhofe/wopan-sdk-go v0.1.3
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
golang.org/x/crypto v0.31.0
golang.org/x/crypto v0.36.0
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
golang.org/x/image v0.19.0
golang.org/x/net v0.28.0
golang.org/x/net v0.37.0
golang.org/x/oauth2 v0.22.0
golang.org/x/time v0.8.0
google.golang.org/appengine v1.6.8
@ -82,6 +81,7 @@ require (
require (
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
@ -90,11 +90,13 @@ require (
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/charmbracelet/x/ansi v0.2.3 // indirect
github.com/charmbracelet/x/term v0.2.0 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/fclairamb/go-log v0.5.0 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hekmon/cunits/v2 v2.1.0 // indirect
@ -102,12 +104,17 @@ require (
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
github.com/microcosm-cc/bluemonday v1.0.27
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/xhofe/115-sdk-go v0.1.1
github.com/yuin/goldmark v1.7.8
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
resty.dev/v3 v3.0.0-beta.2 // indirect
)
require (
@ -170,7 +177,6 @@ require (
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/pgx/v5 v5.5.5 // indirect
github.com/jaevor/go-nanoid v1.3.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
@ -240,10 +246,10 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/bbolt v1.3.8 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.28.0 // indirect
golang.org/x/term v0.27.0 // indirect
golang.org/x/text v0.21.0
golang.org/x/sync v0.12.0
golang.org/x/sys v0.31.0 // indirect
golang.org/x/term v0.30.0 // indirect
golang.org/x/text v0.23.0
golang.org/x/tools v0.24.0 // indirect
google.golang.org/api v0.169.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
@ -255,3 +261,5 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.1.7 // indirect
)
// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go

59
go.sum
View File

@ -28,6 +28,8 @@ github.com/KirCute/sftpd-alist v0.0.12 h1:GNVM5QLbQLAfXP4wGUlXFA2IO6fVek0n0IsGnO
github.com/KirCute/sftpd-alist v0.0.12/go.mod h1:2wNK7yyW2XfjyJq10OY6xB4COLac64hOwfV6clDJn6s=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
@ -66,6 +68,8 @@ github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiE
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8=
github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA=
github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk=
github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -118,6 +122,7 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=
github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
@ -147,6 +152,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e h1:GLC8iDDcbt1H8+RkNao2nRGjyNTIo81e1rAJT9/uWYA=
github.com/city404/v6-public-rpc-proto/go v0.0.0-20240817070657-90f8e24b653e/go.mod h1:ln9Whp+wVY/FTbn2SK0ag+SKD2fC0yQCF/Lqowc1LmU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA=
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
@ -297,6 +305,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA=
github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc=
github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8=
github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
@ -337,8 +347,6 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg=
github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
@ -403,6 +411,8 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@ -418,6 +428,8 @@ github.com/meilisearch/meilisearch-go v0.27.2 h1:3G21dJ5i208shnLPDsIEZ0L0Geg/5oe
github.com/meilisearch/meilisearch-go v0.27.2/go.mod h1:SxuSqDcPBIykjWz1PX+KzsYzArNLSCadQodWs8extS0=
github.com/mholt/archives v0.1.0 h1:FacgJyrjiuyomTuNA92X5GyRBRZjE43Y/lrzKIlF35Q=
github.com/mholt/archives v0.1.0/go.mod h1:j/Ire/jm42GN7h90F5kzj6hf6ZFzEH66de+hmjEKu+I=
github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk=
github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/minio/sio v0.4.0 h1:u4SWVEm5lXSqU42ZWawV0D9I5AZ5YMmo2RXpEQ/kRhc=
@ -594,10 +606,12 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhofe/115-sdk-go v0.1.1 h1:eMQIuCyhWZHQApqdCIt7bTA3S5MYQnANeLJbWYSDv6A=
github.com/xhofe/115-sdk-go v0.1.1/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE=
github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=
github.com/xhofe/tache v0.1.5/go.mod h1:PYt6I/XUKliSg1uHlgsk6ha+le/f6PAvjUtFZAVl3a8=
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
@ -607,6 +621,8 @@ github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9/go.mod h1:9BnoKCcgJ/+SLhf
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.8 h1:iERMLn0/QJeHFhxSt3p6PeN9mGnvIKSpG9YYorDMnic=
github.com/yuin/goldmark v1.7.8/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22 h1:X+lHsNTlbatQ1cErXIbtyrh+3MTWxqQFS+sBP/wpFXo=
@ -643,12 +659,14 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -706,15 +724,17 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -734,8 +754,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -764,6 +784,8 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -773,20 +795,22 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -797,13 +821,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
@ -930,6 +955,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
resty.dev/v3 v3.0.0-beta.2 h1:xu4mGAdbCLuc3kbk7eddWfWm4JfhwDtdapwss5nCjnQ=
resty.dev/v3 v3.0.0-beta.2/go.mod h1:OgkqiPvTDtOuV4MGZuUDhwOpkY8enjOsjjMzeOHefy4=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=

View File

@ -59,7 +59,7 @@ func _decompress(file *zip.File, targetPath, password string, up model.UpdatePro
return err
}
defer rc.Close()
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
@ -87,12 +87,27 @@ func filterPassword(err error) error {
func decodeName(name string) string {
b := []byte(name)
detector := chardet.NewTextDetector()
result, err := detector.DetectBest(b)
results, err := detector.DetectAll(b)
if err != nil {
return name
}
enc := getEncoding(result.Charset)
if enc == nil {
var ce, re, enc encoding.Encoding
for _, r := range results {
if r.Confidence > 30 {
ce = getCommonEncoding(r.Charset)
if ce != nil {
break
}
}
if re == nil {
re = getEncoding(r.Charset)
}
}
if ce != nil {
enc = ce
} else if re != nil {
enc = re
} else {
return name
}
i := bytes.NewReader(b)
@ -101,8 +116,30 @@ func decodeName(name string) string {
return string(content)
}
func getCommonEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16LE":
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
case "Shift_JIS":
enc = japanese.ShiftJIS
case "GB-18030":
enc = simplifiedchinese.GB18030
case "EUC-KR":
enc = korean.EUCKR
case "Big5":
enc = traditionalchinese.Big5
default:
enc = nil
}
return
}
func getEncoding(name string) (enc encoding.Encoding) {
switch name {
case "UTF-8":
enc = unicode.UTF8
case "UTF-16BE":
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
case "UTF-16LE":

View File

@ -35,7 +35,6 @@ func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.Ar
for _, file := range zipReader.File {
if file.IsEncrypted() {
encrypted = true
break
}
name := strings.TrimPrefix(decodeName(file.Name), "/")
@ -70,6 +69,7 @@ func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.Ar
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.ModTime()
dirObj.Children = make([]model.ObjTree, 0)
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹

View File

@ -1,6 +1,8 @@
package data
import (
"strconv"
"github.com/alist-org/alist/v3/cmd/flags"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/db"
@ -114,7 +116,7 @@ func InitialSettings() []model.SettingItem {
{Key: conf.VideoTypes, Value: "mp4,mkv,avi,mov,rmvb,webm,flv,m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ImageTypes, Value: "jpg,tiff,jpeg,png,gif,bmp,svg,ico,swf,webp", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
//{Key: conf.OfficeTypes, Value: "doc,docx,xls,xlsx,ppt,pptx", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ProxyTypes, Value: "m3u8", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ProxyTypes, Value: "m3u8,url", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: conf.ProxyIgnoreHeaders, Value: "authorization,referer", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE},
{Key: "external_previews", Value: `{}`, Type: conf.TypeText, Group: model.PREVIEW},
{Key: "iframe_previews", Value: `{
@ -139,6 +141,9 @@ func InitialSettings() []model.SettingItem {
{Key: "audio_cover", Value: "https://jsd.nn.ci/gh/alist-org/logo@main/logo.svg", Type: conf.TypeString, Group: model.PREVIEW},
{Key: conf.AudioAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.VideoAutoplay, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.PreviewArchivesByDefault, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.ReadMeAutoRender, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
{Key: conf.FilterReadMeScripts, Value: "true", Type: conf.TypeBool, Group: model.PREVIEW},
// global settings
{Key: conf.HideFiles, Value: "/\\/README.md/i", Type: conf.TypeText, Group: model.GLOBAL},
{Key: "package_download", Value: "true", Type: conf.TypeBool, Group: model.GLOBAL},
@ -191,12 +196,12 @@ func InitialSettings() []model.SettingItem {
{Key: conf.LdapDefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.LDAP, Flag: model.PRIVATE},
{Key: conf.LdapLoginTips, Value: "login with ldap", Type: conf.TypeString, Group: model.LDAP, Flag: model.PUBLIC},
//s3 settings
// s3 settings
{Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
{Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
{Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
//ftp settings
// ftp settings
{Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
{Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
{Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
@ -205,6 +210,18 @@ func InitialSettings() []model.SettingItem {
{Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
{Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
{Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
// traffic settings
{Key: conf.TaskOfflineDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Download.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.TaskOfflineDownloadTransferThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Transfer.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.TaskUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Upload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.TaskCopyThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Copy.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.TaskDecompressDownloadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.Decompress.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.TaskDecompressUploadThreadsNum, Value: strconv.Itoa(conf.Conf.Tasks.DecompressUpload.Workers), Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.StreamMaxClientDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.StreamMaxClientUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.StreamMaxServerDownloadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
{Key: conf.StreamMaxServerUploadSpeed, Value: "-1", Type: conf.TypeNumber, Group: model.TRAFFIC, Flag: model.PRIVATE},
}
initialSettingItems = append(initialSettingItems, tool.Tools.Items()...)
if flags.Dev {

View File

@ -0,0 +1,53 @@
package bootstrap
import (
"context"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/stream"
"golang.org/x/time/rate"
)
type blockBurstLimiter struct {
*rate.Limiter
}
func (l blockBurstLimiter) WaitN(ctx context.Context, total int) error {
for total > 0 {
n := l.Burst()
if l.Limiter.Limit() == rate.Inf || n > total {
n = total
}
err := l.Limiter.WaitN(ctx, n)
if err != nil {
return err
}
total -= n
}
return nil
}
func streamFilterNegative(limit int) (rate.Limit, int) {
if limit < 0 {
return rate.Inf, 0
}
return rate.Limit(limit) * 1024.0, limit * 1024
}
func initLimiter(limiter *stream.Limiter, s string) {
clientDownLimit, burst := streamFilterNegative(setting.GetInt(s, -1))
*limiter = blockBurstLimiter{Limiter: rate.NewLimiter(clientDownLimit, burst)}
op.RegisterSettingChangingCallback(func() {
newLimit, newBurst := streamFilterNegative(setting.GetInt(s, -1))
(*limiter).SetLimit(newLimit)
(*limiter).SetBurst(newBurst)
})
}
func InitStreamLimit() {
initLimiter(&stream.ClientDownloadLimit, conf.StreamMaxClientDownloadSpeed)
initLimiter(&stream.ClientUploadLimit, conf.StreamMaxClientUploadSpeed)
initLimiter(&stream.ServerDownloadLimit, conf.StreamMaxServerDownloadSpeed)
initLimiter(&stream.ServerUploadLimit, conf.StreamMaxServerUploadSpeed)
}

Some files were not shown because too many files have changed in this diff Show More