Compare commits
49 Commits
Author | SHA1 | Date | |
---|---|---|---|
0cde4e73d6 | |||
7b62dcb88c | |||
c38dc6df7c | |||
5668e4a4ea | |||
1335f80362 | |||
704d3854df | |||
44cc71d354 | |||
9a9aee9ac6 | |||
4fcc3a187e | |||
10a76c701d | |||
6e13923225 | |||
32890da29f | |||
758554a40f | |||
4563aea47e | |||
35d6f3b8fc | |||
b4e6ab12d9 | |||
3499c4db87 | |||
d20f41d687 | |||
d16ba65f42 | |||
c82e632ee1 | |||
04f5525f20 | |||
28b61a93fd | |||
0126af4de0 | |||
7579d44517 | |||
5dfea714d8 | |||
370a6c15a9 | |||
2570707a06 | |||
4145734c18 | |||
646c7bcd21 | |||
cdc41595bc | |||
79bef0be9e | |||
c230f24ebe | |||
30d8c20756 | |||
3b71500f23 | |||
399336b33c | |||
36b4204623 | |||
f25be154c6 | |||
ec3fc945a3 | |||
3f9bed3d5f | |||
b9ad18bd0a | |||
0219c4e15a | |||
d983a4ebcb | |||
f795807753 | |||
6164e4577b | |||
39bde328ee | |||
779c293f04 | |||
b9f397d29f | |||
d53eecc229 | |||
f88fd83d4a |
1
.github/workflows/beta_release.yml
vendored
1
.github/workflows/beta_release.yml
vendored
@ -94,7 +94,6 @@ jobs:
|
||||
out-dir: build
|
||||
x-flags: |
|
||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||
github.com/alist-org/alist/v3/internal/conf.GoVersion=$go_version
|
||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||
|
43
.github/workflows/build.yml
vendored
43
.github/workflows/build.yml
vendored
@ -15,14 +15,17 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ubuntu-latest]
|
||||
go-version: [ '1.21' ]
|
||||
target:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
- windows-amd64
|
||||
- linux-arm64-musl
|
||||
- linux-amd64-musl
|
||||
- windows-arm64
|
||||
- android-arm64
|
||||
name: Build
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@ -30,19 +33,29 @@ jobs:
|
||||
- uses: benjlevesque/short-sha@v3.0
|
||||
id: short-sha
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo snap install zig --classic --beta
|
||||
docker pull crazymax/xgo:latest
|
||||
go install github.com/crazy-max/xgo@latest
|
||||
sudo apt install upx
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
- name: Setup web
|
||||
run: bash build.sh dev web
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh dev
|
||||
uses: go-cross/cgo-actions@v1
|
||||
with:
|
||||
targets: ${{ matrix.target }}
|
||||
musl-target-format: $os-$musl-$arch
|
||||
out-dir: build
|
||||
x-flags: |
|
||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
||||
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: alist_${{ env.SHA }}
|
||||
path: dist
|
||||
name: alist_${{ env.SHA }}_${{ matrix.target }}
|
||||
path: build/*
|
2
build.sh
2
build.sh
@ -1,6 +1,5 @@
|
||||
appName="alist"
|
||||
builtAt="$(date +'%F %T %z')"
|
||||
goVersion=$(go version | sed 's/go version //')
|
||||
gitAuthor="Xhofe <i@nn.ci>"
|
||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||
|
||||
@ -22,7 +21,6 @@ echo "frontend version: $webVersion"
|
||||
ldflags="\
|
||||
-w -s \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
||||
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
||||
|
@ -17,6 +17,7 @@ func Init() {
|
||||
bootstrap.Log()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitStreamLimit()
|
||||
bootstrap.InitIndex()
|
||||
bootstrap.InitUpgradePatch()
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
|
||||
_ "github.com/alist-org/alist/v3/drivers"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
@ -137,6 +138,7 @@ var LangCmd = &cobra.Command{
|
||||
Use: "lang",
|
||||
Short: "Generate language json file",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
bootstrap.InitConfig()
|
||||
err := os.MkdirAll("lang", 0777)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
||||
|
@ -6,6 +6,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/spf13/cobra"
|
||||
@ -16,14 +17,15 @@ var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show current version of AList",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
fmt.Printf(`Built At: %s
|
||||
Go Version: %s
|
||||
Author: %s
|
||||
Commit ID: %s
|
||||
Version: %s
|
||||
WebVersion: %s
|
||||
`,
|
||||
conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
||||
`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
@ -215,12 +215,12 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
var uploadResult *UploadResult
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
||||
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
|
||||
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// 分片上传
|
||||
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
|
||||
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -2,6 +2,7 @@ package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
@ -13,9 +14,11 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -140,7 +143,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bytes, err := crypto.Decode(string(result.EncodedData), key)
|
||||
b, err := crypto.Decode(string(result.EncodedData), key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -148,7 +151,7 @@ func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
downloadInfo := struct {
|
||||
Url string `json:"url"`
|
||||
}{}
|
||||
if err := utils.Json.Unmarshal(bytes, &downloadInfo); err != nil {
|
||||
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -271,7 +274,7 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
||||
}
|
||||
|
||||
// UploadByOSS use aliyun sdk to upload
|
||||
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
|
||||
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
|
||||
ossToken, err := c.client.GetOSSToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -286,6 +289,10 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
||||
}
|
||||
|
||||
var bodyBytes []byte
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err = bucket.PutObject(params.Object, r, append(
|
||||
driver115.OssOption(params, ossToken),
|
||||
oss.CallbackResult(&bodyBytes),
|
||||
@ -301,7 +308,8 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
||||
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
|
||||
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
@ -313,7 +321,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -372,6 +380,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
completedNum := atomic.Int32{}
|
||||
// consumers
|
||||
for i := 0; i < options.ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
@ -384,24 +393,28 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break
|
||||
case <-ticker.C:
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
|
||||
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||
} else {
|
||||
num := completedNum.Add(1)
|
||||
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
|
299
drivers/115_open/driver.go
Normal file
299
drivers/115_open/driver.go
Normal file
@ -0,0 +1,299 @@
|
||||
package _115_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
sdk "github.com/xhofe/115-sdk-go"
|
||||
)
|
||||
|
||||
type Open115 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *sdk.Client
|
||||
}
|
||||
|
||||
func (d *Open115) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Open115) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Open115) Init(ctx context.Context) error {
|
||||
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
|
||||
sdk.WithAccessToken(d.Addition.AccessToken),
|
||||
sdk.WithOnRefreshToken(func(s1, s2 string) {
|
||||
d.Addition.AccessToken = s1
|
||||
d.Addition.RefreshToken = s2
|
||||
op.MustSaveDriverStorage(d)
|
||||
}))
|
||||
if flags.Debug || flags.Dev {
|
||||
d.client.SetDebug(true)
|
||||
}
|
||||
_, err := d.client.UserInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open115) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var res []model.Obj
|
||||
pageSize := int64(200)
|
||||
offset := int64(0)
|
||||
for {
|
||||
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
|
||||
CID: dir.GetID(),
|
||||
Limit: pageSize,
|
||||
Offset: offset,
|
||||
ASC: d.Addition.OrderDirection == "asc",
|
||||
O: d.Addition.OrderBy,
|
||||
// Cur: 1,
|
||||
ShowDir: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
|
||||
obj := Obj(src)
|
||||
return &obj
|
||||
})...)
|
||||
if len(res) >= int(resp.Count) {
|
||||
break
|
||||
}
|
||||
offset += pageSize
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var ua string
|
||||
if args.Header != nil {
|
||||
ua = args.Header.Get("User-Agent")
|
||||
}
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
obj, ok := file.(*Obj)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't convert obj")
|
||||
}
|
||||
pc := obj.Pc
|
||||
resp, err := d.client.DownURL(ctx, pc, ua)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, ok := resp[obj.GetID()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't get link")
|
||||
}
|
||||
return &model.Link{
|
||||
URL: u.URL.URL,
|
||||
Header: http.Header{
|
||||
"User-Agent": []string{ua},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Obj{
|
||||
Fid: resp.FileID,
|
||||
Pid: parentDir.GetID(),
|
||||
Fn: dirName,
|
||||
Fc: "0",
|
||||
Upt: time.Now().Unix(),
|
||||
Uet: time.Now().Unix(),
|
||||
UpPt: time.Now().Unix(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
_, err := d.client.Move(ctx, &sdk.MoveReq{
|
||||
FileIDs: srcObj.GetID(),
|
||||
ToCid: dstDir.GetID(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
|
||||
FileID: srcObj.GetID(),
|
||||
FileNma: newName,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, ok := srcObj.(*Obj)
|
||||
if ok {
|
||||
obj.Fn = newName
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
_, err := d.client.Copy(ctx, &sdk.CopyReq{
|
||||
PID: dstDir.GetID(),
|
||||
FileID: srcObj.GetID(),
|
||||
NoDupli: "1",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_obj, ok := obj.(*Obj)
|
||||
if !ok {
|
||||
return fmt.Errorf("can't convert obj")
|
||||
}
|
||||
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
|
||||
FileIDs: _obj.GetID(),
|
||||
ParentID: _obj.Pid,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempF, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// cal full sha1
|
||||
sha1, err := utils.HashReader(utils.SHA1, tempF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempF.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// pre 128k sha1
|
||||
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempF.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 1. Init
|
||||
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||
FileName: file.GetName(),
|
||||
FileSize: file.GetSize(),
|
||||
Target: dstDir.GetID(),
|
||||
FileID: strings.ToUpper(sha1),
|
||||
PreID: strings.ToUpper(sha1128k),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Status == 2 {
|
||||
return nil
|
||||
}
|
||||
// 2. two way verify
|
||||
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
|
||||
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
|
||||
start, err := strconv.ParseInt(signCheck[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := strconv.ParseInt(signCheck[1], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempF.Seek(start, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempF.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||
FileName: file.GetName(),
|
||||
FileSize: file.GetSize(),
|
||||
Target: dstDir.GetID(),
|
||||
FileID: strings.ToUpper(sha1),
|
||||
PreID: strings.ToUpper(sha1128k),
|
||||
SignKey: resp.SignKey,
|
||||
SignVal: strings.ToUpper(signVal),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Status == 2 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// 3. get upload token
|
||||
tokenResp, err := d.client.UploadGetToken(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 4. upload
|
||||
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
// }
|
||||
|
||||
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
// }
|
||||
|
||||
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
// return nil, errs.NotImplement
|
||||
// }
|
||||
|
||||
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// // return errs.NotImplement to use an internal archive tool
|
||||
// return nil, errs.NotImplement
|
||||
// }
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Open115)(nil)
|
36
drivers/115_open/meta.go
Normal file
36
drivers/115_open/meta.go
Normal file
@ -0,0 +1,36 @@
|
||||
package _115_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootID
|
||||
// define other
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Open",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Open115{}
|
||||
})
|
||||
}
|
59
drivers/115_open/types.go
Normal file
59
drivers/115_open/types.go
Normal file
@ -0,0 +1,59 @@
|
||||
package _115_open
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
sdk "github.com/xhofe/115-sdk-go"
|
||||
)
|
||||
|
||||
type Obj sdk.GetFilesResp_File
|
||||
|
||||
// Thumb implements model.Thumb.
|
||||
func (o *Obj) Thumb() string {
|
||||
return o.Thumbnail
|
||||
}
|
||||
|
||||
// CreateTime implements model.Obj.
|
||||
func (o *Obj) CreateTime() time.Time {
|
||||
return time.Unix(o.UpPt, 0)
|
||||
}
|
||||
|
||||
// GetHash implements model.Obj.
|
||||
func (o *Obj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, o.Sha1)
|
||||
}
|
||||
|
||||
// GetID implements model.Obj.
|
||||
func (o *Obj) GetID() string {
|
||||
return o.Fid
|
||||
}
|
||||
|
||||
// GetName implements model.Obj.
|
||||
func (o *Obj) GetName() string {
|
||||
return o.Fn
|
||||
}
|
||||
|
||||
// GetPath implements model.Obj.
|
||||
func (o *Obj) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetSize implements model.Obj.
|
||||
func (o *Obj) GetSize() int64 {
|
||||
return o.FS
|
||||
}
|
||||
|
||||
// IsDir implements model.Obj.
|
||||
func (o *Obj) IsDir() bool {
|
||||
return o.Fc == "0"
|
||||
}
|
||||
|
||||
// ModTime implements model.Obj.
|
||||
func (o *Obj) ModTime() time.Time {
|
||||
return time.Unix(o.Upt, 0)
|
||||
}
|
||||
|
||||
var _ model.Obj = (*Obj)(nil)
|
||||
var _ model.Thumb = (*Obj)(nil)
|
140
drivers/115_open/upload.go
Normal file
140
drivers/115_open/upload.go
Normal file
@ -0,0 +1,140 @@
|
||||
package _115_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/avast/retry-go"
|
||||
sdk "github.com/xhofe/115-sdk-go"
|
||||
)
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
var partSize int64 = 20 * utils.MB
|
||||
if fileSize > partSize {
|
||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||
partSize = 5 * utils.GB // file part size 5GB
|
||||
} else if fileSize > 768*utils.GB { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if fileSize > 512*utils.GB { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if fileSize > 384*utils.GB { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if fileSize > 256*utils.GB { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if fileSize > 128*utils.GB { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
}
|
||||
return partSize
|
||||
}
|
||||
|
||||
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.PutObject(initResp.Object, tempF,
|
||||
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// type CallbackResult struct {
|
||||
// State bool `json:"state"`
|
||||
// Code int `json:"code"`
|
||||
// Message string `json:"message"`
|
||||
// Data struct {
|
||||
// PickCode string `json:"pick_code"`
|
||||
// FileName string `json:"file_name"`
|
||||
// FileSize int64 `json:"file_size"`
|
||||
// FileID string `json:"file_id"`
|
||||
// ThumbURL string `json:"thumb_url"`
|
||||
// Sha1 string `json:"sha1"`
|
||||
// Aid int `json:"aid"`
|
||||
// Cid string `json:"cid"`
|
||||
// } `json:"data"`
|
||||
// }
|
||||
|
||||
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||
fileSize := stream.GetSize()
|
||||
chunkSize := calPartSize(fileSize)
|
||||
|
||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||
parts := make([]oss.UploadPart, partNum)
|
||||
offset := int64(0)
|
||||
for i := int64(1); i <= partNum; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
partSize := chunkSize
|
||||
if i == partNum {
|
||||
partSize = fileSize - (i-1)*chunkSize
|
||||
}
|
||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
err = retry.Do(func() error {
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parts[i-1] = part
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i == partNum {
|
||||
offset = fileSize
|
||||
} else {
|
||||
offset += partSize
|
||||
}
|
||||
up(float64(offset) / float64(fileSize))
|
||||
}
|
||||
|
||||
// callbackRespBytes := make([]byte, 1024)
|
||||
_, err = bucket.CompleteMultipartUpload(
|
||||
imur,
|
||||
parts,
|
||||
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||
// oss.CallbackResult(&callbackRespBytes),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
3
drivers/115_open/util.go
Normal file
3
drivers/115_open/util.go
Normal file
@ -0,0 +1,3 @@
|
||||
package _115_open
|
||||
|
||||
// do others that not defined in Driver interface
|
@ -185,32 +185,35 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
etag := file.GetHash().GetHash(utils.MD5)
|
||||
if len(etag) < utils.MD5.Width {
|
||||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
}()
|
||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag = hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
}()
|
||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := hex.EncodeToString(h.Sum(nil))
|
||||
data := base.Json{
|
||||
"driveId": 0,
|
||||
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
||||
"etag": etag,
|
||||
"fileName": stream.GetName(),
|
||||
"fileName": file.GetName(),
|
||||
"parentFileId": dstDir.GetID(),
|
||||
"size": stream.GetSize(),
|
||||
"size": file.GetSize(),
|
||||
"type": 0,
|
||||
}
|
||||
var resp UploadResp
|
||||
@ -225,7 +228,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return nil
|
||||
}
|
||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
||||
err = d.newUpload(ctx, &resp, file, up)
|
||||
return err
|
||||
} else {
|
||||
cfg := &aws.Config{
|
||||
@ -239,15 +242,21 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return err
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &resp.Data.Bucket,
|
||||
Key: &resp.Data.Key,
|
||||
Body: tempFile,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
|
@ -69,7 +69,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
chunkSize := int64(1024 * 1024 * 16)
|
||||
// fetch s3 pre signed urls
|
||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||
@ -81,6 +81,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
limited := driver.NewLimitedUploadStream(ctx, file)
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -103,7 +104,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if j == chunkCount {
|
||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -2,11 +2,13 @@ package _139
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -69,29 +71,28 @@ func (d *Yun139) Init(ctx context.Context) error {
|
||||
default:
|
||||
return errs.NotImplement
|
||||
}
|
||||
// if d.ref != nil {
|
||||
// return nil
|
||||
// }
|
||||
// decode, err := base64.StdEncoding.DecodeString(d.Authorization)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// decodeStr := string(decode)
|
||||
// splits := strings.Split(decodeStr, ":")
|
||||
// if len(splits) < 2 {
|
||||
// return fmt.Errorf("authorization is invalid, splits < 2")
|
||||
// }
|
||||
// d.Account = splits[1]
|
||||
// _, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
|
||||
// "qryUserExternInfoReq": base.Json{
|
||||
// "commonAccountInfo": base.Json{
|
||||
// "account": d.getAccount(),
|
||||
// "accountType": 1,
|
||||
// },
|
||||
// },
|
||||
// }, nil)
|
||||
// return err
|
||||
return nil
|
||||
if d.ref != nil {
|
||||
return nil
|
||||
}
|
||||
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decodeStr := string(decode)
|
||||
splits := strings.Split(decodeStr, ":")
|
||||
if len(splits) < 2 {
|
||||
return fmt.Errorf("authorization is invalid, splits < 2")
|
||||
}
|
||||
d.Account = splits[1]
|
||||
_, err = d.post("/orchestration/personalCloud/user/v1.0/qryUserExternInfo", base.Json{
|
||||
"qryUserExternInfoReq": base.Json{
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.getAccount(),
|
||||
"accountType": 1,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Yun139) InitReference(storage driver.Driver) error {
|
||||
@ -630,12 +631,13 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// Progress
|
||||
p := driver.NewProgress(stream.GetSize(), up)
|
||||
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
// 上传所有分片
|
||||
for _, uploadPartInfo := range uploadPartInfos {
|
||||
index := uploadPartInfo.PartNumber - 1
|
||||
partSize := partInfos[index].PartSize
|
||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||
limitReader := io.LimitReader(stream, partSize)
|
||||
limitReader := io.LimitReader(rateLimited, partSize)
|
||||
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
@ -786,6 +788,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
if part == 0 {
|
||||
part = 1
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for i := int64(0); i < part; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -797,7 +800,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
byteSize = partSize
|
||||
}
|
||||
|
||||
limitReader := io.LimitReader(stream, byteSize)
|
||||
limitReader := io.LimitReader(rateLimited, byteSize)
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
req, err := http.NewRequest("POST", resp.Data.UploadResult.RedirectionURL, r)
|
||||
|
@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
log.Debugf("uploadData: %+v", uploadData)
|
||||
requestURL := uploadData.RequestURL
|
||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
|
||||
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
req.Header.Set(v[0:i], v[i+1:])
|
||||
}
|
||||
r, err := base.HttpClient.Do(req)
|
||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||
_ = r.Body.Close()
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||
|
@ -1,8 +1,8 @@
|
||||
package _189pc
|
||||
|
||||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -14,6 +14,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Cloud189PC struct {
|
||||
@ -29,7 +30,7 @@ type Cloud189PC struct {
|
||||
|
||||
uploadThread int
|
||||
|
||||
familyTransferFolder *ring.Ring
|
||||
familyTransferFolder *Cloud189Folder
|
||||
cleanFamilyTransferFile func()
|
||||
|
||||
storageConfig driver.Config
|
||||
@ -48,9 +49,18 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
// 兼容旧上传接口
|
||||
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
||||
|
||||
y.storageConfig = config
|
||||
if y.isFamily() {
|
||||
// 兼容旧上传接口
|
||||
if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" {
|
||||
y.storageConfig.NoOverwriteUpload = true
|
||||
}
|
||||
} else {
|
||||
// 家庭云转存,不支持覆盖上传
|
||||
if y.Addition.FamilyTransfer {
|
||||
y.storageConfig.NoOverwriteUpload = true
|
||||
}
|
||||
}
|
||||
// 处理个人云和家庭云参数
|
||||
if y.isFamily() && y.RootFolderID == "-11" {
|
||||
y.RootFolderID = ""
|
||||
@ -91,13 +101,14 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// 创建中转文件夹,防止重名文件
|
||||
// 创建中转文件夹
|
||||
if y.FamilyTransfer {
|
||||
if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil {
|
||||
if err := y.createFamilyTransferFolder(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 清理转存文件节流
|
||||
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
|
||||
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
|
||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
||||
@ -327,35 +338,49 @@ func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if !isFamily && y.FamilyTransfer {
|
||||
// 修改上传目标为家庭云文件夹
|
||||
transferDstDir := dstDir
|
||||
dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder)
|
||||
y.familyTransferFolder = y.familyTransferFolder.Next()
|
||||
dstDir = y.familyTransferFolder
|
||||
|
||||
// 使用临时文件名
|
||||
srcName := stream.GetName()
|
||||
stream = &WrapFileStreamer{
|
||||
FileStreamer: stream,
|
||||
Name: fmt.Sprintf("0%s.transfer", uuid.NewString()),
|
||||
}
|
||||
|
||||
// 使用家庭云上传
|
||||
isFamily = true
|
||||
overwrite = false
|
||||
|
||||
defer func() {
|
||||
if newObj != nil {
|
||||
// 批量任务有概率删不掉
|
||||
y.cleanFamilyTransferFile()
|
||||
|
||||
// 转存家庭云文件到个人云
|
||||
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
|
||||
|
||||
task := BatchTaskInfo{
|
||||
FileId: newObj.GetID(),
|
||||
FileName: newObj.GetName(),
|
||||
IsFolder: BoolToNumber(newObj.IsDir()),
|
||||
// 删除家庭云源文件
|
||||
go y.Delete(context.TODO(), y.FamilyID, newObj)
|
||||
// 批量任务有概率删不掉
|
||||
go y.cleanFamilyTransferFile()
|
||||
// 转存失败返回错误
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 删除源文件
|
||||
if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil {
|
||||
y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||
// 永久删除
|
||||
if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil {
|
||||
y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||
// 查找转存文件
|
||||
var file *Cloud189File
|
||||
file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false)
|
||||
if err != nil {
|
||||
if err == errs.ObjectNotFound {
|
||||
err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName())
|
||||
}
|
||||
return
|
||||
}
|
||||
newObj = nil
|
||||
|
||||
// 重命名转存文件
|
||||
newObj, err = y.Rename(context.TODO(), file, srcName)
|
||||
if err != nil {
|
||||
// 重命名失败删除源文件
|
||||
_ = y.Delete(context.TODO(), "", file)
|
||||
}
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
)
|
||||
|
||||
@ -208,3 +209,12 @@ func IF[V any](o bool, t V, f V) V {
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
type WrapFileStreamer struct {
|
||||
model.FileStreamer
|
||||
Name string
|
||||
}
|
||||
|
||||
func (w *WrapFileStreamer) GetName() string {
|
||||
return w.Name
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package _189pc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/ring"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
@ -20,9 +19,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
@ -174,8 +176,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||
}
|
||||
|
||||
var erron RespErr
|
||||
jsoniter.Unmarshal(body, &erron)
|
||||
xml.Unmarshal(body, &erron)
|
||||
_ = jsoniter.Unmarshal(body, &erron)
|
||||
_ = xml.Unmarshal(body, &erron)
|
||||
if erron.HasError() {
|
||||
return nil, &erron
|
||||
}
|
||||
@ -185,39 +187,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
||||
return body, nil
|
||||
}
|
||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
||||
fullUrl := API_URL
|
||||
if isFamily {
|
||||
fullUrl += "/family/file"
|
||||
}
|
||||
fullUrl += "/listFiles.action"
|
||||
|
||||
res := make([]model.Obj, 0, 130)
|
||||
res := make([]model.Obj, 0, 100)
|
||||
for pageNum := 1; ; pageNum++ {
|
||||
var resp Cloud189FilesResp
|
||||
_, err := y.get(fullUrl, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"folderId": fileId,
|
||||
"fileType": "0",
|
||||
"mediaAttr": "0",
|
||||
"iconOption": "5",
|
||||
"pageNum": fmt.Sprint(pageNum),
|
||||
"pageSize": "130",
|
||||
})
|
||||
if isFamily {
|
||||
r.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"orderBy": toFamilyOrderBy(y.OrderBy),
|
||||
"descending": toDesc(y.OrderDirection),
|
||||
})
|
||||
} else {
|
||||
r.SetQueryParams(map[string]string{
|
||||
"recursive": "0",
|
||||
"orderBy": y.OrderBy,
|
||||
"descending": toDesc(y.OrderDirection),
|
||||
})
|
||||
}
|
||||
}, &resp, isFamily)
|
||||
resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -236,6 +208,63 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) {
|
||||
fullUrl := API_URL
|
||||
if isFamily {
|
||||
fullUrl += "/family/file"
|
||||
}
|
||||
fullUrl += "/listFiles.action"
|
||||
|
||||
var resp Cloud189FilesResp
|
||||
_, err := y.get(fullUrl, func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"folderId": fileId,
|
||||
"fileType": "0",
|
||||
"mediaAttr": "0",
|
||||
"iconOption": "5",
|
||||
"pageNum": fmt.Sprint(pageNum),
|
||||
"pageSize": fmt.Sprint(pageSize),
|
||||
})
|
||||
if isFamily {
|
||||
r.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"orderBy": toFamilyOrderBy(orderBy),
|
||||
"descending": toDesc(orderDirection),
|
||||
})
|
||||
} else {
|
||||
r.SetQueryParams(map[string]string{
|
||||
"recursive": "0",
|
||||
"orderBy": orderBy,
|
||||
"descending": toDesc(orderDirection),
|
||||
})
|
||||
}
|
||||
}, &resp, isFamily)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) {
|
||||
for pageNum := 1; ; pageNum++ {
|
||||
resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 获取完毕跳出
|
||||
if resp.FileListAO.Count == 0 {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
||||
file := resp.FileListAO.FileList[i]
|
||||
if file.Name == searchName {
|
||||
return &file, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) login() (err error) {
|
||||
// 初始化登陆所需参数
|
||||
if y.loginParam == nil {
|
||||
@ -481,6 +510,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
@ -490,7 +520,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
byteData := make([]byte, sliceSize)
|
||||
if i == count {
|
||||
byteData = byteData[:lastPartSize]
|
||||
@ -499,6 +528,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
// 读取块
|
||||
silceMd5.Reset()
|
||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||
sem.Release(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -508,6 +538,10 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sem.Release(1)
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -515,7 +549,8 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -767,6 +802,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
||||
|
||||
// 创建上传会话
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
||||
@ -793,7 +829,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||
}
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return nil, err
|
||||
}
|
||||
@ -902,8 +938,7 @@ func (y *Cloud189PC) isLogin() bool {
|
||||
}
|
||||
|
||||
// 创建家庭云中转文件夹
|
||||
func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
|
||||
folders := ring.New(count)
|
||||
func (y *Cloud189PC) createFamilyTransferFolder() error {
|
||||
var rootFolder Cloud189Folder
|
||||
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
@ -912,81 +947,61 @@ func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
|
||||
})
|
||||
}, &rootFolder, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
folderCount := 0
|
||||
|
||||
// 获取已有目录
|
||||
files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, file := range files {
|
||||
if folder, ok := file.(*Cloud189Folder); ok {
|
||||
folders.Value = folder
|
||||
folders = folders.Next()
|
||||
folderCount++
|
||||
}
|
||||
}
|
||||
|
||||
// 创建新的目录
|
||||
for folderCount < count {
|
||||
var newFolder Cloud189Folder
|
||||
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"folderName": uuid.NewString(),
|
||||
"familyId": y.FamilyID,
|
||||
"parentId": rootFolder.GetID(),
|
||||
})
|
||||
}, &newFolder, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
folders.Value = &newFolder
|
||||
folders = folders.Next()
|
||||
folderCount++
|
||||
}
|
||||
return folders, nil
|
||||
y.familyTransferFolder = &rootFolder
|
||||
return nil
|
||||
}
|
||||
|
||||
// 清理中转文件夹
|
||||
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
|
||||
var tasks []BatchTaskInfo
|
||||
r := y.familyTransferFolder
|
||||
for p := r.Next(); p != r; p = p.Next() {
|
||||
folder := p.Value.(*Cloud189Folder)
|
||||
|
||||
files, err := y.getFiles(ctx, folder.GetID(), true)
|
||||
transferFolderId := y.familyTransferFolder.GetID()
|
||||
for pageNum := 1; ; pageNum++ {
|
||||
resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, file := range files {
|
||||
// 获取完毕跳出
|
||||
if resp.FileListAO.Count == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
var tasks []BatchTaskInfo
|
||||
for i := 0; i < len(resp.FileListAO.FolderList); i++ {
|
||||
folder := resp.FileListAO.FolderList[i]
|
||||
tasks = append(tasks, BatchTaskInfo{
|
||||
FileId: folder.GetID(),
|
||||
FileName: folder.GetName(),
|
||||
IsFolder: BoolToNumber(folder.IsDir()),
|
||||
})
|
||||
}
|
||||
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
||||
file := resp.FileListAO.FileList[i]
|
||||
tasks = append(tasks, BatchTaskInfo{
|
||||
FileId: file.GetID(),
|
||||
FileName: file.GetName(),
|
||||
IsFolder: BoolToNumber(file.IsDir()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(tasks) > 0 {
|
||||
// 删除
|
||||
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
|
||||
if err != nil {
|
||||
if len(tasks) > 0 {
|
||||
// 删除
|
||||
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 永久删除
|
||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 永久删除
|
||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1063,6 +1078,34 @@ func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId s
|
||||
}
|
||||
}
|
||||
|
||||
// 永久删除文件
|
||||
func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error {
|
||||
task := BatchTaskInfo{
|
||||
FileId: srcObj.GetID(),
|
||||
FileName: srcObj.GetName(),
|
||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||
}
|
||||
// 删除源文件
|
||||
resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 清除回收站
|
||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
|
||||
var resp CreateBatchTaskResp
|
||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||
|
@ -3,6 +3,7 @@ package alias
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -126,8 +127,46 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := d.getReqPath(ctx, parentDir, true)
|
||||
if err == nil {
|
||||
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name dirs cannot make sub-dir")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot be moved")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name dirs cannot be moved to")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.Move(ctx, *srcPath, *dstPath)
|
||||
}
|
||||
|
||||
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
reqPath, err := d.getReqPath(ctx, srcObj)
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if err == nil {
|
||||
return fs.Rename(ctx, *reqPath, newName)
|
||||
}
|
||||
@ -137,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot be copied")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name dirs cannot be copied to")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fs.Copy(ctx, *srcPath, *dstPath)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||
reqPath, err := d.getReqPath(ctx, obj)
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := d.getReqPath(ctx, obj, false)
|
||||
if err == nil {
|
||||
return fs.Remove(ctx, *reqPath)
|
||||
}
|
||||
@ -148,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if err == nil {
|
||||
return fs.PutDirectly(ctx, *reqPath, s)
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name dirs cannot be Put")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if err == nil {
|
||||
return fs.PutURL(ctx, *reqPath, name, url)
|
||||
}
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot offline download")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
root, sub := d.getRootAndPath(obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
return meta, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
root, sub := d.getRootAndPath(obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
l, err := d.listArchive(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
return l, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容?
|
||||
// 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用
|
||||
// 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用
|
||||
root, sub := d.getRootAndPath(obj.GetPath())
|
||||
dsts, ok := d.pathMap[root]
|
||||
if !ok {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
for _, dst := range dsts {
|
||||
link, err := d.extract(ctx, dst, sub, args)
|
||||
if err == nil {
|
||||
if !args.Redirect && len(link.URL) > 0 {
|
||||
if d.DownloadConcurrency > 0 {
|
||||
link.Concurrency = d.DownloadConcurrency
|
||||
}
|
||||
if d.DownloadPartSize > 0 {
|
||||
link.PartSize = d.DownloadPartSize * utils.KB
|
||||
}
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||
if !d.Writable {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name files cannot be decompressed")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||
if errs.IsNotImplement(err) {
|
||||
return errors.New("same-name dirs cannot be decompressed to")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
|
||||
return err
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Alias)(nil)
|
||||
|
@ -13,13 +13,14 @@ type Addition struct {
|
||||
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Alias",
|
||||
LocalSort: true,
|
||||
NoCache: true,
|
||||
NoUpload: true,
|
||||
NoUpload: false,
|
||||
DefaultRoot: "/",
|
||||
ProxyRangeOption: true,
|
||||
}
|
||||
|
@ -3,9 +3,11 @@ package alias
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -63,6 +65,7 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
|
||||
Size: obj.GetSize(),
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
HashInfo: obj.GetHash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -124,9 +127,9 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
|
||||
return link, err
|
||||
}
|
||||
|
||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
|
||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) {
|
||||
root, sub := d.getRootAndPath(obj.GetPath())
|
||||
if sub == "" {
|
||||
if sub == "" && !isParent {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
dsts, ok := d.pathMap[root]
|
||||
@ -155,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error)
|
||||
}
|
||||
return reqPath, nil
|
||||
}
|
||||
|
||||
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
|
||||
ArchiveArgs: args,
|
||||
Refresh: true,
|
||||
})
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
|
||||
ArchiveInnerArgs: args,
|
||||
Refresh: true,
|
||||
})
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
reqPath := stdpath.Join(dst, sub)
|
||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
}
|
||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||
link := &model.Link{
|
||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||
common.GetApiUrl(args.HttpReq),
|
||||
utils.EncodePath(reqPath, true),
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
sign.SignArchive(reqPath)),
|
||||
}
|
||||
if args.HttpReq != nil && d.ProxyRange {
|
||||
link.RangeReadCloser = common.NoProxyRange
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||
return link, err
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
@ -5,12 +5,14 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional {
|
||||
func (d *AListV3) Init(ctx context.Context) error {
|
||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||
var resp common.Resp[MeResp]
|
||||
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
// re-get the user info
|
||||
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetResult(&resp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.Data.Role == model.GUEST {
|
||||
url := d.Address + "/api/public/settings"
|
||||
res, err := base.RestyClient.R().Get(url)
|
||||
u := d.Address + "/api/public/settings"
|
||||
res, err := base.RestyClient.R().Get(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var resp common.Resp[FsListResp]
|
||||
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ListReq{
|
||||
PageReq: model.PageReq{
|
||||
Page: 1,
|
||||
@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
userAgent = base.UserAgent
|
||||
}
|
||||
}
|
||||
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(FsGetReq{
|
||||
Path: file.GetPath(),
|
||||
Password: d.MetaPassword,
|
||||
@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MkdirOrLinkReq{
|
||||
Path: path.Join(parentDir.GetPath(), dirName),
|
||||
})
|
||||
@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
||||
}
|
||||
|
||||
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MoveCopyReq{
|
||||
SrcDir: path.Dir(srcObj.GetPath()),
|
||||
DstDir: dstDir.GetPath(),
|
||||
@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(RenameReq{
|
||||
Path: srcObj.GetPath(),
|
||||
Name: newName,
|
||||
@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
||||
}
|
||||
|
||||
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(MoveCopyReq{
|
||||
SrcDir: path.Dir(srcObj.GetPath()),
|
||||
DstDir: dstDir.GetPath(),
|
||||
@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(RemoveReq{
|
||||
Dir: path.Dir(obj.GetPath()),
|
||||
Names: []string{obj.GetName()},
|
||||
@ -181,25 +183,29 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream)
|
||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", d.Token)
|
||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName()))
|
||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
|
||||
req.Header.Set("Password", d.MetaPassword)
|
||||
if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
||||
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
||||
req.Header.Set("X-File-Md5", md5)
|
||||
}
|
||||
if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
||||
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
||||
req.Header.Set("X-File-Sha1", sha1)
|
||||
}
|
||||
if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
||||
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
||||
req.Header.Set("X-File-Sha256", sha256)
|
||||
}
|
||||
|
||||
req.ContentLength = stream.GetSize()
|
||||
req.ContentLength = s.GetSize()
|
||||
// client := base.NewHttpClient()
|
||||
// client.Timeout = time.Hour * 6
|
||||
res, err := base.HttpClient.Do(req)
|
||||
@ -228,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
if !d.ForwardArchiveReq {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var resp common.Resp[ArchiveMetaResp]
|
||||
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||
ArchivePass: args.Password,
|
||||
Password: d.MetaPassword,
|
||||
Path: obj.GetPath(),
|
||||
Refresh: false,
|
||||
})
|
||||
})
|
||||
if code == 202 {
|
||||
return nil, errs.WrongArchivePassword
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tree []model.ObjTree
|
||||
if resp.Data.Content != nil {
|
||||
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
|
||||
for _, content := range resp.Data.Content {
|
||||
tree = append(tree, &content)
|
||||
}
|
||||
}
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: resp.Data.Comment,
|
||||
Encrypted: resp.Data.Encrypted,
|
||||
Tree: tree,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
if !d.ForwardArchiveReq {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var resp common.Resp[ArchiveListResp]
|
||||
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ArchiveListReq{
|
||||
ArchiveMetaReq: ArchiveMetaReq{
|
||||
ArchivePass: args.Password,
|
||||
Password: d.MetaPassword,
|
||||
Path: obj.GetPath(),
|
||||
Refresh: false,
|
||||
},
|
||||
PageReq: model.PageReq{
|
||||
Page: 1,
|
||||
PerPage: 0,
|
||||
},
|
||||
InnerPath: args.InnerPath,
|
||||
})
|
||||
})
|
||||
if code == 202 {
|
||||
return nil, errs.WrongArchivePassword
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var files []model.Obj
|
||||
for _, f := range resp.Data.Content {
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
files = append(files, &file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
if !d.ForwardArchiveReq {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
var resp common.Resp[ArchiveMetaResp]
|
||||
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||
ArchivePass: args.Password,
|
||||
Password: d.MetaPassword,
|
||||
Path: obj.GetPath(),
|
||||
Refresh: false,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
|
||||
resp.Data.RawURL,
|
||||
utils.EncodePath(args.InnerPath, true),
|
||||
url.QueryEscape(args.Password),
|
||||
resp.Data.Sign),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||
if !d.ForwardArchiveReq {
|
||||
return errs.NotImplement
|
||||
}
|
||||
dir, name := path.Split(srcObj.GetPath())
|
||||
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(DecompressReq{
|
||||
ArchivePass: args.Password,
|
||||
CacheFull: args.CacheFull,
|
||||
DstDir: dstDir.GetPath(),
|
||||
InnerPath: args.InnerPath,
|
||||
Name: []string{name},
|
||||
PutIntoNewDir: args.PutIntoNewDir,
|
||||
SrcDir: dir,
|
||||
})
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -7,12 +7,13 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Address string `json:"url" required:"true"`
|
||||
MetaPassword string `json:"meta_password"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||
Address string `json:"url" required:"true"`
|
||||
MetaPassword string `json:"meta_password"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Token string `json:"token"`
|
||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type ListReq struct {
|
||||
@ -81,3 +82,89 @@ type MeResp struct {
|
||||
SsoId string `json:"sso_id"`
|
||||
Otp bool `json:"otp"`
|
||||
}
|
||||
|
||||
type ArchiveMetaReq struct {
|
||||
ArchivePass string `json:"archive_pass"`
|
||||
Password string `json:"password"`
|
||||
Path string `json:"path"`
|
||||
Refresh bool `json:"refresh"`
|
||||
}
|
||||
|
||||
type TreeResp struct {
|
||||
ObjResp
|
||||
Children []TreeResp `json:"children"`
|
||||
hashCache *utils.HashInfo
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetSize() int64 {
|
||||
return t.Size
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetName() string {
|
||||
return t.Name
|
||||
}
|
||||
|
||||
func (t *TreeResp) ModTime() time.Time {
|
||||
return t.Modified
|
||||
}
|
||||
|
||||
func (t *TreeResp) CreateTime() time.Time {
|
||||
return t.Created
|
||||
}
|
||||
|
||||
func (t *TreeResp) IsDir() bool {
|
||||
return t.ObjResp.IsDir
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetHash() utils.HashInfo {
|
||||
return utils.FromString(t.HashInfo)
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (t *TreeResp) GetChildren() []model.ObjTree {
|
||||
ret := make([]model.ObjTree, 0, len(t.Children))
|
||||
for _, child := range t.Children {
|
||||
ret = append(ret, &child)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *TreeResp) Thumb() string {
|
||||
return t.ObjResp.Thumb
|
||||
}
|
||||
|
||||
type ArchiveMetaResp struct {
|
||||
Comment string `json:"comment"`
|
||||
Encrypted bool `json:"encrypted"`
|
||||
Content []TreeResp `json:"content"`
|
||||
RawURL string `json:"raw_url"`
|
||||
Sign string `json:"sign"`
|
||||
}
|
||||
|
||||
type ArchiveListReq struct {
|
||||
model.PageReq
|
||||
ArchiveMetaReq
|
||||
InnerPath string `json:"inner_path"`
|
||||
}
|
||||
|
||||
type ArchiveListResp struct {
|
||||
Content []ObjResp `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
}
|
||||
|
||||
type DecompressReq struct {
|
||||
ArchivePass string `json:"archive_pass"`
|
||||
CacheFull bool `json:"cache_full"`
|
||||
DstDir string `json:"dst_dir"`
|
||||
InnerPath string `json:"inner_path"`
|
||||
Name []string `json:"name"`
|
||||
PutIntoNewDir bool `json:"put_into_new_dir"`
|
||||
SrcDir string `json:"src_dir"`
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ func (d *AListV3) login() error {
|
||||
return nil
|
||||
}
|
||||
var resp common.Resp[LoginResp]
|
||||
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetResult(&resp).SetBody(base.Json{
|
||||
"username": d.Username,
|
||||
"password": d.Password,
|
||||
@ -31,7 +31,7 @@ func (d *AListV3) login() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) {
|
||||
url := d.Address + "/api" + api
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Authorization", d.Token)
|
||||
@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
code := 0
|
||||
if res != nil {
|
||||
code = res.StatusCode()
|
||||
}
|
||||
return nil, code, err
|
||||
}
|
||||
log.Debugf("[alist_v3] response body: %s", res.String())
|
||||
if res.StatusCode() >= 400 {
|
||||
return nil, fmt.Errorf("request failed, status: %s", res.Status())
|
||||
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||
if code != 200 {
|
||||
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
||||
err = d.login()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, code, err
|
||||
}
|
||||
return d.request(api, method, callback, true)
|
||||
}
|
||||
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||
}
|
||||
return res.Body(), nil
|
||||
return res.Body(), 200, nil
|
||||
}
|
||||
|
@ -14,13 +14,12 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -194,7 +193,10 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
utils.CopyWithBufferN(buf, file, 1024)
|
||||
_, err := utils.CopyWithBufferN(buf, file, 1024)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
@ -286,6 +288,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
rateLimited := driver.NewLimitedUploadStream(ctx, file)
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -294,7 +297,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if d.InternalUpload {
|
||||
url = partInfo.InternalUploadUrl
|
||||
}
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
|
||||
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -303,7 +306,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
if count > 0 {
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
@ -251,8 +251,9 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
||||
rd = utils.NewMultiReadable(srd)
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
_ = rd.Reset()
|
||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
|
@ -2,6 +2,7 @@ package drivers
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||
_ "github.com/alist-org/alist/v3/drivers/115_open"
|
||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||
@ -21,6 +22,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -187,7 +189,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
sliceSize := d.getSliceSize()
|
||||
sliceSize := d.getSliceSize(streamSize)
|
||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
||||
lastBlockSize := streamSize % sliceSize
|
||||
if streamSize > 0 && lastBlockSize == 0 {
|
||||
@ -195,7 +197,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
|
||||
//cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * 1024
|
||||
const SliceSize int64 = 256 * utils.KB
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := sliceSize
|
||||
@ -260,9 +262,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
}
|
||||
// step.2 上传分片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Attempts(1),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
@ -273,6 +276,10 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sem.Release(1)
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
@ -281,7 +288,8 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -8,16 +8,17 @@ import (
|
||||
type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
driver.RootPath
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -136,7 +136,7 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||
var resp DownloadResp
|
||||
params := map[string]string{
|
||||
"method": "filemetas",
|
||||
@ -164,7 +164,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||
var resp DownloadResp2
|
||||
param := map[string]string{
|
||||
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
|
||||
@ -230,22 +230,72 @@ func joinTime(form map[string]string, ctime, mtime int64) {
|
||||
|
||||
const (
|
||||
DefaultSliceSize int64 = 4 * utils.MB
|
||||
VipSliceSize = 16 * utils.MB
|
||||
SVipSliceSize = 32 * utils.MB
|
||||
VipSliceSize int64 = 16 * utils.MB
|
||||
SVipSliceSize int64 = 32 * utils.MB
|
||||
|
||||
MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048
|
||||
SliceStep int64 = 1 * utils.MB
|
||||
)
|
||||
|
||||
func (d *BaiduNetdisk) getSliceSize() int64 {
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
return d.CustomUploadPartSize
|
||||
}
|
||||
switch d.vipType {
|
||||
case 1:
|
||||
return VipSliceSize
|
||||
case 2:
|
||||
return SVipSliceSize
|
||||
default:
|
||||
func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
||||
// 非会员固定为 4MB
|
||||
if d.vipType == 0 {
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
||||
}
|
||||
if filesize > MaxSliceNum*DefaultSliceSize {
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return DefaultSliceSize
|
||||
}
|
||||
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
if d.CustomUploadPartSize < DefaultSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
||||
return DefaultSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
||||
return VipSliceSize
|
||||
}
|
||||
|
||||
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
||||
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
||||
return SVipSliceSize
|
||||
}
|
||||
|
||||
return d.CustomUploadPartSize
|
||||
}
|
||||
|
||||
maxSliceSize := DefaultSliceSize
|
||||
|
||||
switch d.vipType {
|
||||
case 1:
|
||||
maxSliceSize = VipSliceSize
|
||||
case 2:
|
||||
maxSliceSize = SVipSliceSize
|
||||
}
|
||||
|
||||
// upload on low bandwidth
|
||||
if d.LowBandwithUploadMode {
|
||||
size := DefaultSliceSize
|
||||
|
||||
for size <= maxSliceSize {
|
||||
if filesize <= MaxSliceNum*size {
|
||||
return size
|
||||
}
|
||||
|
||||
size += SliceStep
|
||||
}
|
||||
}
|
||||
|
||||
if filesize > MaxSliceNum*maxSliceSize {
|
||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
||||
}
|
||||
|
||||
return maxSliceSize
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
|
@ -13,6 +13,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -314,6 +316,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
@ -325,6 +328,10 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sem.Release(1)
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
@ -335,7 +342,8 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
r.SetFileReader("file", stream.GetName(),
|
||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -26,7 +27,7 @@ func InitClient() {
|
||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||
|
||||
RestyClient = NewRestyClient()
|
||||
HttpClient = NewHttpClient()
|
||||
HttpClient = net.NewHttpClient()
|
||||
}
|
||||
|
||||
func NewRestyClient() *resty.Client {
|
||||
@ -38,13 +39,3 @@ func NewRestyClient() *resty.Client {
|
||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
return client
|
||||
}
|
||||
|
||||
func NewHttpClient() *http.Client {
|
||||
return &http.Client{
|
||||
Timeout: time.Hour * 48,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
var resp UploadDataRsp
|
||||
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
||||
}, &resp)
|
||||
@ -227,11 +227,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
}
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
filePart, err := writer.CreateFormFile("file", stream.GetName())
|
||||
filePart, err := writer.CreateFormFile("file", file.GetName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = utils.CopyWithBuffer(filePart, stream)
|
||||
_, err = utils.CopyWithBuffer(filePart, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -248,7 +248,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: body,
|
||||
Size: int64(body.Len()),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -147,7 +146,7 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
"size": stream.GetSize(),
|
||||
"name": stream.GetName(),
|
||||
"policy_id": r.Policy.Id,
|
||||
"last_modified": stream.ModTime().Unix(),
|
||||
"last_modified": stream.ModTime().UnixMilli(),
|
||||
}
|
||||
|
||||
// 获取上传会话信息
|
||||
@ -166,39 +165,13 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
case "remote": // 从机存储
|
||||
err = d.upRemote(ctx, stream, u, up)
|
||||
case "local": // 本机存储
|
||||
var chunkSize = u.ChunkSize
|
||||
var buf []byte
|
||||
var chunk int
|
||||
for {
|
||||
var n int
|
||||
buf = make([]byte, chunkSize)
|
||||
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
buf = buf[:n]
|
||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||
req.SetHeader("Content-Type", "application/octet-stream")
|
||||
req.SetHeader("Content-Length", strconv.Itoa(n))
|
||||
req.SetBody(buf)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
chunk++
|
||||
}
|
||||
err = d.upLocal(ctx, stream, u, up)
|
||||
default:
|
||||
err = errs.NotImplement
|
||||
}
|
||||
if err != nil {
|
||||
// 删除失败的会话
|
||||
err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
||||
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -27,17 +27,20 @@ import (
|
||||
|
||||
const loginPath = "/user/session"
|
||||
|
||||
func (d *Cloudreve) getUA() string {
|
||||
if d.CustomUA != "" {
|
||||
return d.CustomUA
|
||||
}
|
||||
return base.UserAgent
|
||||
}
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
u := d.Address + "/api/v3" + path
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": ua,
|
||||
"User-Agent": d.getUA(),
|
||||
})
|
||||
|
||||
var r Resp
|
||||
@ -100,7 +103,7 @@ func (d *Cloudreve) login() error {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if err != nil && err.Error() != "CAPTCHA not match." {
|
||||
if err.Error() != "CAPTCHA not match." {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -161,15 +164,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||
if !d.Addition.EnableThumbAndFolderSize {
|
||||
return model.Thumbnail{}, nil
|
||||
}
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.NoRedirectClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||
"User-Agent": ua,
|
||||
"User-Agent": d.getUA(),
|
||||
})
|
||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||
if err != nil {
|
||||
@ -180,6 +179,43 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||
req.SetHeader("Content-Type", "application/octet-stream")
|
||||
req.SetContentLength(true)
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||
req.SetHeader("User-Agent", d.getUA())
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
finish += byteSize
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
credential := u.Credential
|
||||
@ -202,19 +238,22 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
@ -241,13 +280,15 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
req.Header.Set("User-Agent", d.getUA())
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
@ -256,10 +297,10 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
_ = res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
|
@ -263,12 +263,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
}
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -287,8 +282,9 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 可以直接返回,读取完也不会调用Close,直到连接断开Close
|
||||
return remoteLink.MFile, nil
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
|
||||
return nil, errs.NotSupport
|
||||
@ -304,7 +300,6 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
Header: remoteLink.Header,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
174
drivers/doubao/driver.go
Normal file
174
drivers/doubao/driver.go
Normal file
@ -0,0 +1,174 @@
|
||||
package doubao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Doubao struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *Doubao) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Doubao) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Doubao) Init(ctx context.Context) error {
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Doubao) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
var files []model.Obj
|
||||
var r NodeInfoResp
|
||||
_, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"node_id": dir.GetID(),
|
||||
"need_full_path": false,
|
||||
})
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, child := range r.Data.Children {
|
||||
files = append(files, &Object{
|
||||
Object: model.Object{
|
||||
ID: child.ID,
|
||||
Path: child.ParentID,
|
||||
Name: child.Name,
|
||||
Size: int64(child.Size),
|
||||
Modified: time.Unix(int64(child.UpdateTime), 0),
|
||||
Ctime: time.Unix(int64(child.CreateTime), 0),
|
||||
IsFolder: child.NodeType == 1,
|
||||
},
|
||||
Key: child.Key,
|
||||
})
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if u, ok := file.(*Object); ok {
|
||||
var r GetFileUrlResp
|
||||
_, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"uris": []string{u.Key},
|
||||
"type": "file",
|
||||
})
|
||||
}, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: r.Data.FileUrls[0].MainURL,
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.New("can't convert obj to URL")
|
||||
}
|
||||
|
||||
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"node_list": []base.Json{
|
||||
{
|
||||
"local_id": uuid.New().String(),
|
||||
"name": dirName,
|
||||
"parent_id": parentDir.GetID(),
|
||||
"node_type": 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
}, &r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
var r UploadNodeResp
|
||||
_, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"node_list": []base.Json{
|
||||
{"id": srcObj.GetID()},
|
||||
},
|
||||
"current_parent_id": srcObj.GetPath(),
|
||||
"target_parent_id": dstDir.GetID(),
|
||||
})
|
||||
}, &r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"node_id": srcObj.GetID(),
|
||||
"node_name": newName,
|
||||
})
|
||||
}, &r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO copy obj, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var r BaseResp
|
||||
_, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) {
|
||||
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||
}, &r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// TODO upload file, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Doubao)(nil)
|
34
drivers/doubao/meta.go
Normal file
34
drivers/doubao/meta.go
Normal file
@ -0,0 +1,34 @@
|
||||
package doubao
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
// driver.RootPath
|
||||
driver.RootID
|
||||
// define other
|
||||
Cookie string `json:"cookie" type:"text"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Doubao",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Doubao{}
|
||||
})
|
||||
}
|
64
drivers/doubao/types.go
Normal file
64
drivers/doubao/types.go
Normal file
@ -0,0 +1,64 @@
|
||||
package doubao
|
||||
|
||||
import "github.com/alist-org/alist/v3/internal/model"
|
||||
|
||||
type BaseResp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
type NodeInfoResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
NodeInfo NodeInfo `json:"node_info"`
|
||||
Children []NodeInfo `json:"children"`
|
||||
NextCursor string `json:"next_cursor"`
|
||||
HasMore bool `json:"has_more"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type NodeInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||
Size int `json:"size"`
|
||||
Source int `json:"source"`
|
||||
NameReviewStatus int `json:"name_review_status"`
|
||||
ContentReviewStatus int `json:"content_review_status"`
|
||||
RiskReviewStatus int `json:"risk_review_status"`
|
||||
ConversationID string `json:"conversation_id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
CreateTime int `json:"create_time"`
|
||||
UpdateTime int `json:"update_time"`
|
||||
}
|
||||
|
||||
type GetFileUrlResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FileUrls []struct {
|
||||
URI string `json:"uri"`
|
||||
MainURL string `json:"main_url"`
|
||||
BackURL string `json:"back_url"`
|
||||
} `json:"file_urls"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type UploadNodeResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
NodeList []struct {
|
||||
LocalID string `json:"local_id"`
|
||||
ID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Name string `json:"name"`
|
||||
Key string `json:"key"`
|
||||
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||
} `json:"node_list"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type Object struct {
|
||||
model.Object
|
||||
Key string
|
||||
}
|
38
drivers/doubao/util.go
Normal file
38
drivers/doubao/util.go
Normal file
@ -0,0 +1,38 @@
|
||||
package doubao
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
url := "https://www.doubao.com" + path
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Cookie", d.Cookie)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
var r BaseResp
|
||||
req.SetResult(&r)
|
||||
res, err := req.Execute(method, url)
|
||||
log.Debugln(res.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 业务状态码检查(优先于HTTP状态码)
|
||||
if r.Code != 0 {
|
||||
return res.Body(), errors.New(r.Msg)
|
||||
}
|
||||
if resp != nil {
|
||||
err = utils.Json.Unmarshal(res.Body(), resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
@ -191,7 +191,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
|
||||
url := d.contentBase + "/2/files/upload_session/append_v2"
|
||||
reader := io.LimitReader(stream, PartSize)
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, PartSize))
|
||||
req, err := http.NewRequest(http.MethodPost, url, reader)
|
||||
if err != nil {
|
||||
log.Errorf("failed to update file when append to upload session, err: %+v", err)
|
||||
@ -219,13 +219,8 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
|
||||
if count > 0 {
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
}
|
||||
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
offset += byteSize
|
||||
|
||||
}
|
||||
// 3.finish
|
||||
toPath := dstDir.GetPath() + "/" + stream.GetName()
|
||||
|
@ -3,6 +3,7 @@ package febbox
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -135,6 +136,9 @@ func (d *FebBox) getDownloadLink(id string, ip string) (string, error) {
|
||||
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(fileDownloadResp.Data) == 0 {
|
||||
return "", fmt.Errorf("can not get download link, code:%d, msg:%s", fileDownloadResp.Code, fileDownloadResp.Msg)
|
||||
}
|
||||
|
||||
return fileDownloadResp.Data[0].DownloadURL, nil
|
||||
}
|
||||
|
@ -114,13 +114,15 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if err := d.login(); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: support cancel
|
||||
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
return d.conn.Stor(encode(path, d.Encoding), stream)
|
||||
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
||||
return d.conn.Stor(encode(path, d.Encoding), driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}))
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*FTP)(nil)
|
||||
|
@ -3,7 +3,6 @@ package github
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -12,12 +11,14 @@ import (
|
||||
"sync"
|
||||
"text/template"
|
||||
|
||||
"github.com/ProtonMail/go-crypto/openpgp"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -33,6 +34,7 @@ type Github struct {
|
||||
moveMsgTmpl *template.Template
|
||||
isOnBranch bool
|
||||
commitMutex sync.Mutex
|
||||
pgpEntity *openpgp.Entity
|
||||
}
|
||||
|
||||
func (d *Github) Config() driver.Config {
|
||||
@ -84,10 +86,13 @@ func (d *Github) Init(ctx context.Context) error {
|
||||
}
|
||||
d.client = base.NewRestyClient().
|
||||
SetHeader("Accept", "application/vnd.github.object+json").
|
||||
SetHeader("Authorization", "Bearer "+d.Token).
|
||||
SetHeader("X-GitHub-Api-Version", "2022-11-28").
|
||||
SetLogger(log.StandardLogger()).
|
||||
SetDebug(false)
|
||||
token := strings.TrimSpace(d.Token)
|
||||
if token != "" {
|
||||
d.client = d.client.SetHeader("Authorization", "Bearer "+token)
|
||||
}
|
||||
if d.Ref == "" {
|
||||
repo, err := d.getRepo()
|
||||
if err != nil {
|
||||
@ -99,6 +104,26 @@ func (d *Github) Init(ctx context.Context) error {
|
||||
_, err = d.getBranchHead()
|
||||
d.isOnBranch = err == nil
|
||||
}
|
||||
if d.GPGPrivateKey != "" {
|
||||
if d.CommitterName == "" || d.AuthorName == "" {
|
||||
user, e := d.getAuthenticatedUser()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
if d.CommitterName == "" {
|
||||
d.CommitterName = user.Name
|
||||
d.CommitterEmail = user.Email
|
||||
}
|
||||
if d.AuthorName == "" {
|
||||
d.AuthorName = user.Name
|
||||
d.AuthorEmail = user.Email
|
||||
}
|
||||
}
|
||||
d.pgpEntity, err = loadPrivateKey(d.GPGPrivateKey, d.GPGKeyPassphrase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -148,8 +173,13 @@ func (d *Github) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
if obj.Type == "submodule" {
|
||||
return nil, errors.New("cannot download a submodule")
|
||||
}
|
||||
url := obj.DownloadURL
|
||||
ghProxy := strings.TrimSpace(d.Addition.GitHubProxy)
|
||||
if ghProxy != "" {
|
||||
url = strings.Replace(url, "https://raw.githubusercontent.com", ghProxy, 1)
|
||||
}
|
||||
return &model.Link{
|
||||
URL: obj.DownloadURL,
|
||||
URL: url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -166,10 +196,39 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
if parent.Entries == nil {
|
||||
return errs.NotFolder
|
||||
}
|
||||
// if parent folder contains .gitkeep only, mark it and delete .gitkeep later
|
||||
gitKeepSha := ""
|
||||
subDirSha, err := d.newTree("", []interface{}{
|
||||
map[string]string{
|
||||
"path": ".gitkeep",
|
||||
"mode": "100644",
|
||||
"type": "blob",
|
||||
"content": "",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newTree := make([]interface{}, 0, 2)
|
||||
newTree = append(newTree, TreeObjReq{
|
||||
Path: dirName,
|
||||
Mode: "040000",
|
||||
Type: "tree",
|
||||
Sha: subDirSha,
|
||||
})
|
||||
if len(parent.Entries) == 1 && parent.Entries[0].Name == ".gitkeep" {
|
||||
gitKeepSha = parent.Entries[0].Sha
|
||||
newTree = append(newTree, TreeObjReq{
|
||||
Path: ".gitkeep",
|
||||
Mode: "100644",
|
||||
Type: "blob",
|
||||
Sha: nil,
|
||||
})
|
||||
}
|
||||
newSha, err := d.newTree(parent.Sha, newTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootSha, err := d.renewParentTrees(parentDir.GetPath(), parent.Sha, newSha, "/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commitMessage, err := getMessage(d.mkdirMsgTmpl, &MessageTemplateVars{
|
||||
@ -182,13 +241,7 @@ func (d *Github) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = d.createGitKeep(stdpath.Join(parentDir.GetPath(), dirName), commitMessage); err != nil {
|
||||
return err
|
||||
}
|
||||
if gitKeepSha != "" {
|
||||
err = d.delete(stdpath.Join(parentDir.GetPath(), ".gitkeep"), gitKeepSha, commitMessage)
|
||||
}
|
||||
return err
|
||||
return d.commit(commitMessage, rootSha)
|
||||
}
|
||||
|
||||
func (d *Github) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -631,33 +684,15 @@ func (d *Github) get(path string) (*Object, error) {
|
||||
return &resp, err
|
||||
}
|
||||
|
||||
func (d *Github) createGitKeep(path, message string) error {
|
||||
body := map[string]interface{}{
|
||||
"message": message,
|
||||
"content": "",
|
||||
"branch": d.Ref,
|
||||
}
|
||||
d.addCommitterAndAuthor(&body)
|
||||
|
||||
res, err := d.client.R().SetBody(body).Put(d.getContentApiUrl(stdpath.Join(path, ".gitkeep")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != 200 && res.StatusCode() != 201 {
|
||||
return toErr(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
||||
func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
||||
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
|
||||
afterContent := "\"}"
|
||||
length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent))
|
||||
length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent))
|
||||
beforeContentReader := strings.NewReader(beforeContent)
|
||||
contentReader, contentWriter := io.Pipe()
|
||||
go func() {
|
||||
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
|
||||
if _, err := utils.CopyWithBuffer(encoder, stream); err != nil {
|
||||
if _, err := utils.CopyWithBuffer(encoder, s); err != nil {
|
||||
_ = contentWriter.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
@ -667,23 +702,29 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
|
||||
afterContentReader := strings.NewReader(afterContent)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
||||
&ReaderWithProgress{
|
||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||
Length: length,
|
||||
Progress: up,
|
||||
})
|
||||
driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||
Size: length,
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Accept", "application/vnd.github+json")
|
||||
req.Header.Set("Authorization", "Bearer "+d.Token)
|
||||
req.Header.Set("X-GitHub-Api-Version", "2022-11-28")
|
||||
token := strings.TrimSpace(d.Token)
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
req.ContentLength = length
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resBody, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -703,23 +744,6 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
|
||||
return resp.Sha, nil
|
||||
}
|
||||
|
||||
func (d *Github) delete(path, sha, message string) error {
|
||||
body := map[string]interface{}{
|
||||
"message": message,
|
||||
"sha": sha,
|
||||
"branch": d.Ref,
|
||||
}
|
||||
d.addCommitterAndAuthor(&body)
|
||||
res, err := d.client.R().SetBody(body).Delete(d.getContentApiUrl(path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return toErr(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Github) renewParentTrees(path, prevSha, curSha, until string) (string, error) {
|
||||
for path != until {
|
||||
path = stdpath.Dir(path)
|
||||
@ -781,11 +805,11 @@ func (d *Github) getTreeDirectly(path string) (*TreeResp, string, error) {
|
||||
}
|
||||
|
||||
func (d *Github) newTree(baseSha string, tree []interface{}) (string, error) {
|
||||
res, err := d.client.R().
|
||||
SetBody(&TreeReq{
|
||||
BaseTree: baseSha,
|
||||
Trees: tree,
|
||||
}).
|
||||
body := &TreeReq{Trees: tree}
|
||||
if baseSha != "" {
|
||||
body.BaseTree = baseSha
|
||||
}
|
||||
res, err := d.client.R().SetBody(body).
|
||||
Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/trees", d.Owner, d.Repo))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -808,6 +832,13 @@ func (d *Github) commit(message, treeSha string) error {
|
||||
"parents": []string{oldCommit},
|
||||
}
|
||||
d.addCommitterAndAuthor(&body)
|
||||
if d.pgpEntity != nil {
|
||||
signature, e := signCommit(&body, d.pgpEntity)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
body["signature"] = signature
|
||||
}
|
||||
res, err := d.client.R().SetBody(body).Post(fmt.Sprintf("https://api.github.com/repos/%s/%s/git/commits", d.Owner, d.Repo))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -911,6 +942,21 @@ func (d *Github) getRepo() (*RepoResp, error) {
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (d *Github) getAuthenticatedUser() (*UserResp, error) {
|
||||
res, err := d.client.R().Get("https://api.github.com/user")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return nil, toErr(res)
|
||||
}
|
||||
resp := &UserResp{}
|
||||
if err = utils.Json.Unmarshal(res.Body(), resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (d *Github) addCommitterAndAuthor(m *map[string]interface{}) {
|
||||
if d.CommitterName != "" {
|
||||
committer := map[string]string{
|
||||
|
@ -7,20 +7,23 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Token string `json:"token" type:"string" required:"true"`
|
||||
Owner string `json:"owner" type:"string" required:"true"`
|
||||
Repo string `json:"repo" type:"string" required:"true"`
|
||||
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
|
||||
CommitterName string `json:"committer_name" type:"string"`
|
||||
CommitterEmail string `json:"committer_email" type:"string"`
|
||||
AuthorName string `json:"author_name" type:"string"`
|
||||
AuthorEmail string `json:"author_email" type:"string"`
|
||||
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
|
||||
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
|
||||
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
|
||||
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
|
||||
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
|
||||
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
|
||||
Token string `json:"token" type:"string" required:"true"`
|
||||
Owner string `json:"owner" type:"string" required:"true"`
|
||||
Repo string `json:"repo" type:"string" required:"true"`
|
||||
Ref string `json:"ref" type:"string" help:"A branch, a tag or a commit SHA, main branch by default."`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" help:"GitHub proxy, e.g. https://ghproxy.net/raw.githubusercontent.com or https://gh-proxy.com/raw.githubusercontent.com"`
|
||||
GPGPrivateKey string `json:"gpg_private_key" type:"text"`
|
||||
GPGKeyPassphrase string `json:"gpg_key_passphrase" type:"string"`
|
||||
CommitterName string `json:"committer_name" type:"string"`
|
||||
CommitterEmail string `json:"committer_email" type:"string"`
|
||||
AuthorName string `json:"author_name" type:"string"`
|
||||
AuthorEmail string `json:"author_email" type:"string"`
|
||||
MkdirCommitMsg string `json:"mkdir_commit_message" type:"text" default:"{{.UserName}} mkdir {{.ObjPath}}"`
|
||||
DeleteCommitMsg string `json:"delete_commit_message" type:"text" default:"{{.UserName}} remove {{.ObjPath}}"`
|
||||
PutCommitMsg string `json:"put_commit_message" type:"text" default:"{{.UserName}} upload {{.ObjPath}}"`
|
||||
RenameCommitMsg string `json:"rename_commit_message" type:"text" default:"{{.UserName}} rename {{.ObjPath}} to {{.TargetName}}"`
|
||||
CopyCommitMsg string `json:"copy_commit_message" type:"text" default:"{{.UserName}} copy {{.ObjPath}} to {{.TargetPath}}"`
|
||||
MoveCommitMsg string `json:"move_commit_message" type:"text" default:"{{.UserName}} move {{.ObjPath}} to {{.TargetPath}}"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -79,7 +79,7 @@ type TreeResp struct {
|
||||
}
|
||||
|
||||
type TreeReq struct {
|
||||
BaseTree string `json:"base_tree"`
|
||||
BaseTree interface{} `json:"base_tree,omitempty"`
|
||||
Trees []interface{} `json:"tree"`
|
||||
}
|
||||
|
||||
@ -100,3 +100,8 @@ type UpdateRefReq struct {
|
||||
type RepoResp struct {
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
}
|
||||
|
||||
type UserResp struct {
|
||||
Name string `json:"name"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
|
@ -1,32 +1,22 @@
|
||||
package github
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/go-crypto/openpgp"
|
||||
"github.com/ProtonMail/go-crypto/openpgp/armor"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type ReaderWithProgress struct {
|
||||
Reader io.Reader
|
||||
Length int64
|
||||
Progress func(percentage float64)
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (r *ReaderWithProgress) Read(p []byte) (int, error) {
|
||||
n, err := r.Reader.Read(p)
|
||||
r.offset += int64(n)
|
||||
r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0))
|
||||
return n, err
|
||||
}
|
||||
|
||||
type MessageTemplateVars struct {
|
||||
UserName string
|
||||
ObjName string
|
||||
@ -113,3 +103,65 @@ func getUsername(ctx context.Context) string {
|
||||
}
|
||||
return user.Username
|
||||
}
|
||||
|
||||
func loadPrivateKey(key, passphrase string) (*openpgp.Entity, error) {
|
||||
entityList, err := openpgp.ReadArmoredKeyRing(strings.NewReader(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(entityList) < 1 {
|
||||
return nil, fmt.Errorf("no keys found in key ring")
|
||||
}
|
||||
entity := entityList[0]
|
||||
|
||||
pass := []byte(passphrase)
|
||||
if entity.PrivateKey != nil && entity.PrivateKey.Encrypted {
|
||||
if err = entity.PrivateKey.Decrypt(pass); err != nil {
|
||||
return nil, fmt.Errorf("password incorrect: %+v", err)
|
||||
}
|
||||
}
|
||||
for _, subKey := range entity.Subkeys {
|
||||
if subKey.PrivateKey != nil && subKey.PrivateKey.Encrypted {
|
||||
if err = subKey.PrivateKey.Decrypt(pass); err != nil {
|
||||
return nil, fmt.Errorf("password incorrect: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return entity, nil
|
||||
}
|
||||
|
||||
func signCommit(m *map[string]interface{}, entity *openpgp.Entity) (string, error) {
|
||||
var commit strings.Builder
|
||||
commit.WriteString(fmt.Sprintf("tree %s\n", (*m)["tree"].(string)))
|
||||
parents := (*m)["parents"].([]string)
|
||||
for _, p := range parents {
|
||||
commit.WriteString(fmt.Sprintf("parent %s\n", p))
|
||||
}
|
||||
now := time.Now()
|
||||
_, offset := now.Zone()
|
||||
hour := offset / 3600
|
||||
author := (*m)["author"].(map[string]string)
|
||||
commit.WriteString(fmt.Sprintf("author %s <%s> %d %+03d00\n", author["name"], author["email"], now.Unix(), hour))
|
||||
author["date"] = now.Format(time.RFC3339)
|
||||
committer := (*m)["committer"].(map[string]string)
|
||||
commit.WriteString(fmt.Sprintf("committer %s <%s> %d %+03d00\n", committer["name"], committer["email"], now.Unix(), hour))
|
||||
committer["date"] = now.Format(time.RFC3339)
|
||||
commit.WriteString(fmt.Sprintf("\n%s", (*m)["message"].(string)))
|
||||
data := commit.String()
|
||||
|
||||
var sigBuffer bytes.Buffer
|
||||
err := openpgp.DetachSign(&sigBuffer, entity, strings.NewReader(data), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("signing failed: %v", err)
|
||||
}
|
||||
var armoredSig bytes.Buffer
|
||||
armorWriter, err := armor.Encode(&armoredSig, "PGP SIGNATURE", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err = io.Copy(armorWriter, &sigBuffer); err != nil {
|
||||
return "", err
|
||||
}
|
||||
_ = armorWriter.Close()
|
||||
return armoredSig.String(), nil
|
||||
}
|
||||
|
@ -4,8 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -18,7 +16,7 @@ type GithubReleases struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
releases []Release
|
||||
points []MountPoint
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Config() driver.Config {
|
||||
@ -30,17 +28,11 @@ func (d *GithubReleases) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Init(ctx context.Context) error {
|
||||
SetHeader(d.Addition.Token)
|
||||
repos, err := ParseRepos(d.Addition.RepoStructure, d.Addition.ShowAllVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.releases = repos
|
||||
d.ParseRepos(d.Addition.RepoStructure)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Drop(ctx context.Context) error {
|
||||
ClearCache()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -48,67 +40,83 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
files := make([]File, 0)
|
||||
path := fmt.Sprintf("/%s", strings.Trim(dir.GetPath(), "/"))
|
||||
|
||||
for _, repo := range d.releases {
|
||||
if repo.Path == path { // 与仓库路径相同
|
||||
resp, err := GetRepoReleaseInfo(repo.RepoName, repo.ID, path, d.Storage.CacheExpiration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, resp.Files...)
|
||||
for i := range d.points {
|
||||
point := &d.points[i]
|
||||
|
||||
if d.Addition.ShowReadme {
|
||||
resp, err := GetGithubOtherFile(repo.RepoName, path, d.Storage.CacheExpiration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !d.Addition.ShowAllVersion { // latest
|
||||
point.RequestRelease(d.GetRequest, args.Refresh)
|
||||
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
files = append(files, point.GetLatestRelease()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
continue
|
||||
}
|
||||
files = append(files, *resp...)
|
||||
}
|
||||
|
||||
} else if strings.HasPrefix(repo.Path, path) { // 仓库路径是目录的子目录
|
||||
nextDir := GetNextDir(repo.Path, path)
|
||||
if nextDir == "" {
|
||||
continue
|
||||
}
|
||||
if d.Addition.ShowAllVersion {
|
||||
files = append(files, File{
|
||||
FileName: nextDir,
|
||||
Size: 0,
|
||||
CreateAt: time.Time{},
|
||||
UpdateAt: time.Time{},
|
||||
Url: "",
|
||||
Type: "dir",
|
||||
Path: fmt.Sprintf("%s/%s", path, nextDir),
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
repo, _ := GetRepoReleaseInfo(repo.RepoName, repo.Version, path, d.Storage.CacheExpiration)
|
||||
|
||||
hasSameDir := false
|
||||
for index, file := range files {
|
||||
if file.FileName == nextDir {
|
||||
hasSameDir = true
|
||||
files[index].Size += repo.Size
|
||||
files[index].UpdateAt = func(a time.Time, b time.Time) time.Time {
|
||||
if a.After(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}(files[index].UpdateAt, repo.UpdateAt)
|
||||
break
|
||||
hasSameDir := false
|
||||
for index := range files {
|
||||
if files[index].GetName() == nextDir {
|
||||
hasSameDir = true
|
||||
files[index].Size += point.GetLatestSize()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasSameDir {
|
||||
files = append(files, File{
|
||||
Path: path + "/" + nextDir,
|
||||
FileName: nextDir,
|
||||
Size: point.GetLatestSize(),
|
||||
UpdateAt: point.Release.PublishedAt,
|
||||
CreateAt: point.Release.CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
})
|
||||
}
|
||||
}
|
||||
} else { // all version
|
||||
point.RequestReleases(d.GetRequest, args.Refresh)
|
||||
|
||||
if !hasSameDir {
|
||||
files = append(files, File{
|
||||
FileName: nextDir,
|
||||
Size: repo.Size,
|
||||
CreateAt: repo.CreateAt,
|
||||
UpdateAt: repo.UpdateAt,
|
||||
Url: repo.Url,
|
||||
Type: "dir",
|
||||
Path: fmt.Sprintf("%s/%s", path, nextDir),
|
||||
})
|
||||
if point.Point == path { // 与仓库路径相同
|
||||
files = append(files, point.GetAllVersion()...)
|
||||
if d.Addition.ShowReadme {
|
||||
files = append(files, point.GetOtherFile(d.GetRequest, args.Refresh)...)
|
||||
}
|
||||
} else if strings.HasPrefix(point.Point, path) { // 仓库目录的父目录
|
||||
nextDir := GetNextDir(point.Point, path)
|
||||
if nextDir == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
hasSameDir := false
|
||||
for index := range files {
|
||||
if files[index].GetName() == nextDir {
|
||||
hasSameDir = true
|
||||
files[index].Size += point.GetAllVersionSize()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !hasSameDir {
|
||||
files = append(files, File{
|
||||
FileName: nextDir,
|
||||
Path: path + "/" + nextDir,
|
||||
Size: point.GetAllVersionSize(),
|
||||
UpdateAt: (*point.Releases)[0].PublishedAt,
|
||||
CreateAt: (*point.Releases)[0].CreatedAt,
|
||||
Type: "dir",
|
||||
Url: "",
|
||||
})
|
||||
}
|
||||
} else if strings.HasPrefix(path, point.Point) { // 仓库目录的子目录
|
||||
tagName := GetNextDir(path, point.Point)
|
||||
if tagName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, point.GetReleaseByTagName(tagName)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -119,35 +127,41 @@ func (d *GithubReleases) List(ctx context.Context, dir model.Obj, args model.Lis
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
url := file.GetID()
|
||||
gh_proxy := strings.TrimSpace(d.Addition.GitHubProxy)
|
||||
|
||||
if gh_proxy != "" {
|
||||
url = strings.Replace(url, "https://github.com", gh_proxy, 1)
|
||||
}
|
||||
|
||||
link := model.Link{
|
||||
URL: file.GetID(),
|
||||
URL: url,
|
||||
Header: http.Header{},
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
|
||||
func (d *GithubReleases) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
// TODO create folder, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO move obj, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
// TODO rename obj, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO copy obj, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// TODO remove obj, optional
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *GithubReleases) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*GithubReleases)(nil)
|
||||
|
@ -7,10 +7,11 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"/path/to/alist-gh:alistGo/alist\n/path/to2/alist-web-gh:AlistGo/alist-web" help:"structure:[path:]org/repo"`
|
||||
RepoStructure string `json:"repo_structure" type:"text" required:"true" default:"alistGo/alist" help:"structure:[path:]org/repo"`
|
||||
ShowReadme bool `json:"show_readme" type:"bool" default:"true" help:"show README、LICENSE file"`
|
||||
Token string `json:"token" type:"string" required:"false" help:"GitHub token, if you want to access private repositories or increase the rate limit"`
|
||||
ShowAllVersion bool `json:"show_all_version" type:"bool" default:"false" help:"show all versions"`
|
||||
GitHubProxy string `json:"gh_proxy" type:"string" default:"" help:"GitHub proxy, e.g. https://ghproxy.net/github.com or https://gh-proxy.com/github.com "`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
86
drivers/github_releases/models.go
Normal file
86
drivers/github_releases/models.go
Normal file
@ -0,0 +1,86 @@
|
||||
package github_releases
|
||||
|
||||
type Release struct {
|
||||
Url string `json:"url"`
|
||||
AssetsUrl string `json:"assets_url"`
|
||||
UploadUrl string `json:"upload_url"`
|
||||
HtmlUrl string `json:"html_url"`
|
||||
Id int `json:"id"`
|
||||
Author User `json:"author"`
|
||||
NodeId string `json:"node_id"`
|
||||
TagName string `json:"tag_name"`
|
||||
TargetCommitish string `json:"target_commitish"`
|
||||
Name string `json:"name"`
|
||||
Draft bool `json:"draft"`
|
||||
Prerelease bool `json:"prerelease"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
PublishedAt string `json:"published_at"`
|
||||
Assets []Asset `json:"assets"`
|
||||
TarballUrl string `json:"tarball_url"`
|
||||
ZipballUrl string `json:"zipball_url"`
|
||||
Body string `json:"body"`
|
||||
Reactions Reactions `json:"reactions"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Login string `json:"login"`
|
||||
Id int `json:"id"`
|
||||
NodeId string `json:"node_id"`
|
||||
AvatarUrl string `json:"avatar_url"`
|
||||
GravatarId string `json:"gravatar_id"`
|
||||
Url string `json:"url"`
|
||||
HtmlUrl string `json:"html_url"`
|
||||
FollowersUrl string `json:"followers_url"`
|
||||
FollowingUrl string `json:"following_url"`
|
||||
GistsUrl string `json:"gists_url"`
|
||||
StarredUrl string `json:"starred_url"`
|
||||
SubscriptionsUrl string `json:"subscriptions_url"`
|
||||
OrganizationsUrl string `json:"organizations_url"`
|
||||
ReposUrl string `json:"repos_url"`
|
||||
EventsUrl string `json:"events_url"`
|
||||
ReceivedEventsUrl string `json:"received_events_url"`
|
||||
Type string `json:"type"`
|
||||
UserViewType string `json:"user_view_type"`
|
||||
SiteAdmin bool `json:"site_admin"`
|
||||
}
|
||||
|
||||
type Asset struct {
|
||||
Url string `json:"url"`
|
||||
Id int `json:"id"`
|
||||
NodeId string `json:"node_id"`
|
||||
Name string `json:"name"`
|
||||
Label string `json:"label"`
|
||||
Uploader User `json:"uploader"`
|
||||
ContentType string `json:"content_type"`
|
||||
State string `json:"state"`
|
||||
Size int64 `json:"size"`
|
||||
DownloadCount int `json:"download_count"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
BrowserDownloadUrl string `json:"browser_download_url"`
|
||||
}
|
||||
|
||||
type Reactions struct {
|
||||
Url string `json:"url"`
|
||||
TotalCount int `json:"total_count"`
|
||||
PlusOne int `json:"+1"`
|
||||
MinusOne int `json:"-1"`
|
||||
Laugh int `json:"laugh"`
|
||||
Hooray int `json:"hooray"`
|
||||
Confused int `json:"confused"`
|
||||
Heart int `json:"heart"`
|
||||
Rocket int `json:"rocket"`
|
||||
Eyes int `json:"eyes"`
|
||||
}
|
||||
|
||||
type FileInfo struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Sha string `json:"sha"`
|
||||
Size int64 `json:"size"`
|
||||
Url string `json:"url"`
|
||||
HtmlUrl string `json:"html_url"`
|
||||
GitUrl string `json:"git_url"`
|
||||
DownloadUrl string `json:"download_url"`
|
||||
Type string `json:"type"`
|
||||
}
|
@ -1,19 +1,181 @@
|
||||
package github_releases
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type MountPoint struct {
|
||||
Point string // 挂载点
|
||||
Repo string // 仓库名 owner/repo
|
||||
Release *Release // Release 指针 latest
|
||||
Releases *[]Release // []Release 指针
|
||||
OtherFile *[]FileInfo // 仓库根目录下的其他文件
|
||||
}
|
||||
|
||||
// 请求最新版本
|
||||
func (m *MountPoint) RequestRelease(get func(url string) (*resty.Response, error), refresh bool) {
|
||||
if m.Repo == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Release == nil || refresh {
|
||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases/latest")
|
||||
m.Release = new(Release)
|
||||
json.Unmarshal(resp.Body(), m.Release)
|
||||
}
|
||||
}
|
||||
|
||||
// 请求所有版本
|
||||
func (m *MountPoint) RequestReleases(get func(url string) (*resty.Response, error), refresh bool) {
|
||||
if m.Repo == "" {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Releases == nil || refresh {
|
||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/releases")
|
||||
m.Releases = new([]Release)
|
||||
json.Unmarshal(resp.Body(), m.Releases)
|
||||
}
|
||||
}
|
||||
|
||||
// 获取最新版本
|
||||
func (m *MountPoint) GetLatestRelease() []File {
|
||||
files := make([]File, 0)
|
||||
for _, asset := range m.Release.Assets {
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + asset.Name,
|
||||
FileName: asset.Name,
|
||||
Size: asset.Size,
|
||||
Type: "file",
|
||||
UpdateAt: asset.UpdatedAt,
|
||||
CreateAt: asset.CreatedAt,
|
||||
Url: asset.BrowserDownloadUrl,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
// 获取最新版本大小
|
||||
func (m *MountPoint) GetLatestSize() int64 {
|
||||
size := int64(0)
|
||||
for _, asset := range m.Release.Assets {
|
||||
size += asset.Size
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// 获取所有版本
|
||||
func (m *MountPoint) GetAllVersion() []File {
|
||||
files := make([]File, 0)
|
||||
for _, release := range *m.Releases {
|
||||
file := File{
|
||||
Path: m.Point + "/" + release.TagName,
|
||||
FileName: release.TagName,
|
||||
Size: m.GetSizeByTagName(release.TagName),
|
||||
Type: "dir",
|
||||
UpdateAt: release.PublishedAt,
|
||||
CreateAt: release.CreatedAt,
|
||||
Url: release.HtmlUrl,
|
||||
}
|
||||
for _, asset := range release.Assets {
|
||||
file.Size += asset.Size
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
// 根据版本号获取版本
|
||||
func (m *MountPoint) GetReleaseByTagName(tagName string) []File {
|
||||
for _, item := range *m.Releases {
|
||||
if item.TagName == tagName {
|
||||
files := make([]File, 0)
|
||||
for _, asset := range item.Assets {
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + tagName + "/" + asset.Name,
|
||||
FileName: asset.Name,
|
||||
Size: asset.Size,
|
||||
Type: "file",
|
||||
UpdateAt: asset.UpdatedAt,
|
||||
CreateAt: asset.CreatedAt,
|
||||
Url: asset.BrowserDownloadUrl,
|
||||
})
|
||||
}
|
||||
return files
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// 根据版本号获取版本大小
|
||||
func (m *MountPoint) GetSizeByTagName(tagName string) int64 {
|
||||
if m.Releases == nil {
|
||||
return 0
|
||||
}
|
||||
for _, item := range *m.Releases {
|
||||
if item.TagName == tagName {
|
||||
size := int64(0)
|
||||
for _, asset := range item.Assets {
|
||||
size += asset.Size
|
||||
}
|
||||
return size
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// 获取所有版本大小
|
||||
func (m *MountPoint) GetAllVersionSize() int64 {
|
||||
if m.Releases == nil {
|
||||
return 0
|
||||
}
|
||||
size := int64(0)
|
||||
for _, release := range *m.Releases {
|
||||
for _, asset := range release.Assets {
|
||||
size += asset.Size
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *MountPoint) GetOtherFile(get func(url string) (*resty.Response, error), refresh bool) []File {
|
||||
if m.OtherFile == nil || refresh {
|
||||
resp, _ := get("https://api.github.com/repos/" + m.Repo + "/contents")
|
||||
m.OtherFile = new([]FileInfo)
|
||||
json.Unmarshal(resp.Body(), m.OtherFile)
|
||||
}
|
||||
|
||||
files := make([]File, 0)
|
||||
defaultTime := "1970-01-01T00:00:00Z"
|
||||
for _, file := range *m.OtherFile {
|
||||
if strings.HasSuffix(file.Name, ".md") || strings.HasPrefix(file.Name, "LICENSE") {
|
||||
files = append(files, File{
|
||||
Path: m.Point + "/" + file.Name,
|
||||
FileName: file.Name,
|
||||
Size: file.Size,
|
||||
Type: "file",
|
||||
UpdateAt: defaultTime,
|
||||
CreateAt: defaultTime,
|
||||
Url: file.DownloadUrl,
|
||||
})
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
type File struct {
|
||||
FileName string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
CreateAt time.Time `json:"time"`
|
||||
UpdateAt time.Time `json:"chtime"`
|
||||
Url string `json:"url"`
|
||||
Type string `json:"type"`
|
||||
Path string `json:"path"`
|
||||
Path string // 文件路径
|
||||
FileName string // 文件名
|
||||
Size int64 // 文件大小
|
||||
Type string // 文件类型
|
||||
UpdateAt string // 更新时间 eg:"2025-01-27T16:10:16Z"
|
||||
CreateAt string // 创建时间
|
||||
Url string // 下载链接
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
@ -33,11 +195,13 @@ func (f File) GetName() string {
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
return f.UpdateAt
|
||||
t, _ := time.Parse(time.RFC3339, f.CreateAt)
|
||||
return t
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.CreateAt
|
||||
t, _ := time.Parse(time.RFC3339, f.CreateAt)
|
||||
return t
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
@ -47,22 +211,3 @@ func (f File) IsDir() bool {
|
||||
func (f File) GetID() string {
|
||||
return f.Url
|
||||
}
|
||||
|
||||
func (f File) Thumb() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type ReleasesData struct {
|
||||
Files []File `json:"files"`
|
||||
Size int64 `json:"size"`
|
||||
UpdateAt time.Time `json:"chtime"`
|
||||
CreateAt time.Time `json:"time"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
Path string // 挂载路径
|
||||
RepoName string // 仓库名称
|
||||
Version string // 版本号, tag
|
||||
ID string // 版本ID
|
||||
}
|
||||
|
@ -2,28 +2,36 @@ package github_releases
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
cache = make(map[string]*resty.Response)
|
||||
created = make(map[string]time.Time)
|
||||
mu sync.Mutex
|
||||
req *resty.Request
|
||||
)
|
||||
// 发送 GET 请求
|
||||
func (d *GithubReleases) GetRequest(url string) (*resty.Response, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeader("Accept", "application/vnd.github+json")
|
||||
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
|
||||
if d.Addition.Token != "" {
|
||||
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", d.Addition.Token))
|
||||
}
|
||||
res, err := req.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
log.Warn("failed to get request: ", res.StatusCode(), res.String())
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 解析仓库列表
|
||||
func ParseRepos(text string, allVersion bool) ([]Release, error) {
|
||||
// 解析挂载结构
|
||||
func (d *GithubReleases) ParseRepos(text string) ([]MountPoint, error) {
|
||||
lines := strings.Split(text, "\n")
|
||||
var repos []Release
|
||||
points := make([]MountPoint, 0)
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
@ -41,177 +49,37 @@ func ParseRepos(text string, allVersion bool) ([]Release, error) {
|
||||
return nil, fmt.Errorf("invalid format: %s", line)
|
||||
}
|
||||
|
||||
if allVersion {
|
||||
releases, _ := GetAllVersion(repo, path)
|
||||
repos = append(repos, *releases...)
|
||||
} else {
|
||||
repos = append(repos, Release{
|
||||
Path: path,
|
||||
RepoName: repo,
|
||||
Version: "latest",
|
||||
ID: "latest",
|
||||
})
|
||||
}
|
||||
|
||||
points = append(points, MountPoint{
|
||||
Point: path,
|
||||
Repo: repo,
|
||||
Release: nil,
|
||||
Releases: nil,
|
||||
})
|
||||
}
|
||||
return repos, nil
|
||||
d.points = points
|
||||
return points, nil
|
||||
}
|
||||
|
||||
// 获取下一级目录
|
||||
func GetNextDir(wholePath string, basePath string) string {
|
||||
if !strings.HasSuffix(basePath, "/") {
|
||||
basePath += "/"
|
||||
}
|
||||
basePath = fmt.Sprintf("%s/", strings.TrimRight(basePath, "/"))
|
||||
if !strings.HasPrefix(wholePath, basePath) {
|
||||
return ""
|
||||
}
|
||||
remainingPath := strings.TrimLeft(strings.TrimPrefix(wholePath, basePath), "/")
|
||||
if remainingPath != "" {
|
||||
parts := strings.Split(remainingPath, "/")
|
||||
return parts[0]
|
||||
nextDir := parts[0]
|
||||
if strings.HasPrefix(wholePath, strings.TrimRight(basePath, "/")+"/"+nextDir) {
|
||||
return nextDir
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// 发送 GET 请求
|
||||
func GetRequest(url string, cacheExpiration int) (*resty.Response, error) {
|
||||
mu.Lock()
|
||||
if res, ok := cache[url]; ok && time.Now().Before(created[url].Add(time.Duration(cacheExpiration)*time.Minute)) {
|
||||
mu.Unlock()
|
||||
return res, nil
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
res, err := req.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
log.Warn("failed to get request: ", res.StatusCode(), res.String())
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
cache[url] = res
|
||||
created[url] = time.Now()
|
||||
mu.Unlock()
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 获取 README、LICENSE 等文件
|
||||
func GetGithubOtherFile(repo string, basePath string, cacheExpiration int) (*[]File, error) {
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/contents/", strings.Trim(repo, "/"))
|
||||
res, _ := GetRequest(url, cacheExpiration)
|
||||
body := jsoniter.Get(res.Body())
|
||||
var files []File
|
||||
for i := 0; i < body.Size(); i++ {
|
||||
filename := body.Get(i, "name").ToString()
|
||||
|
||||
re := regexp.MustCompile(`(?i)^(.*\.md|LICENSE)$`)
|
||||
|
||||
if !re.MatchString(filename) {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, File{
|
||||
FileName: filename,
|
||||
Size: body.Get(i, "size").ToInt64(),
|
||||
CreateAt: time.Time{},
|
||||
UpdateAt: time.Now(),
|
||||
Url: body.Get(i, "download_url").ToString(),
|
||||
Type: body.Get(i, "type").ToString(),
|
||||
Path: fmt.Sprintf("%s/%s", basePath, filename),
|
||||
})
|
||||
}
|
||||
return &files, nil
|
||||
}
|
||||
|
||||
// 获取 GitHub Release 详细信息
|
||||
func GetRepoReleaseInfo(repo string, version string, basePath string, cacheExpiration int) (*ReleasesData, error) {
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/releases/%s", strings.Trim(repo, "/"), version)
|
||||
res, _ := GetRequest(url, cacheExpiration)
|
||||
body := res.Body()
|
||||
|
||||
if jsoniter.Get(res.Body(), "status").ToInt64() != 0 {
|
||||
return &ReleasesData{}, fmt.Errorf("%s", res.String())
|
||||
}
|
||||
|
||||
assets := jsoniter.Get(res.Body(), "assets")
|
||||
var files []File
|
||||
|
||||
for i := 0; i < assets.Size(); i++ {
|
||||
filename := assets.Get(i, "name").ToString()
|
||||
|
||||
files = append(files, File{
|
||||
FileName: filename,
|
||||
Size: assets.Get(i, "size").ToInt64(),
|
||||
Url: assets.Get(i, "browser_download_url").ToString(),
|
||||
Type: assets.Get(i, "content_type").ToString(),
|
||||
Path: fmt.Sprintf("%s/%s", basePath, filename),
|
||||
|
||||
CreateAt: func() time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, assets.Get(i, "created_at").ToString())
|
||||
return t
|
||||
}(),
|
||||
UpdateAt: func() time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, assets.Get(i, "updated_at").ToString())
|
||||
return t
|
||||
}(),
|
||||
})
|
||||
}
|
||||
|
||||
return &ReleasesData{
|
||||
Files: files,
|
||||
Url: jsoniter.Get(body, "html_url").ToString(),
|
||||
|
||||
Size: func() int64 {
|
||||
size := int64(0)
|
||||
for _, file := range files {
|
||||
size += file.Size
|
||||
}
|
||||
return size
|
||||
}(),
|
||||
UpdateAt: func() time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "published_at").ToString())
|
||||
return t
|
||||
}(),
|
||||
CreateAt: func() time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, jsoniter.Get(body, "created_at").ToString())
|
||||
return t
|
||||
}(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 获取所有的版本号
|
||||
func GetAllVersion(repo string, path string) (*[]Release, error) {
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/releases", strings.Trim(repo, "/"))
|
||||
res, _ := GetRequest(url, 0)
|
||||
body := jsoniter.Get(res.Body())
|
||||
releases := make([]Release, 0)
|
||||
for i := 0; i < body.Size(); i++ {
|
||||
version := body.Get(i, "tag_name").ToString()
|
||||
releases = append(releases, Release{
|
||||
Path: fmt.Sprintf("%s/%s", path, version),
|
||||
Version: version,
|
||||
RepoName: repo,
|
||||
ID: body.Get(i, "id").ToString(),
|
||||
})
|
||||
}
|
||||
return &releases, nil
|
||||
}
|
||||
|
||||
func ClearCache() {
|
||||
mu.Lock()
|
||||
cache = make(map[string]*resty.Response)
|
||||
created = make(map[string]time.Time)
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
func SetHeader(token string) {
|
||||
req = base.RestyClient.R()
|
||||
if token != "" {
|
||||
req.SetHeader("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
}
|
||||
req.SetHeader("Accept", "application/vnd.github+json")
|
||||
req.SetHeader("X-GitHub-Api-Version", "2022-11-28")
|
||||
// 判断当前目录是否是目标目录的祖先目录
|
||||
func IsAncestorDir(parentDir string, targetDir string) bool {
|
||||
absTargetDir, _ := filepath.Abs(targetDir)
|
||||
absParentDir, _ := filepath.Abs(parentDir)
|
||||
return strings.HasPrefix(absTargetDir, absParentDir)
|
||||
}
|
||||
|
@ -158,7 +158,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetBody(driver.NewLimitedUploadStream(ctx, stream))
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
@ -11,10 +11,10 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
@ -126,8 +126,7 @@ func (d *GoogleDrive) refreshToken() error {
|
||||
}
|
||||
d.AccessToken = resp.AccessToken
|
||||
return nil
|
||||
}
|
||||
if gdsaFileErr != nil && os.IsExist(gdsaFileErr) {
|
||||
} else if os.IsExist(gdsaFileErr) {
|
||||
return gdsaFileErr
|
||||
}
|
||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
||||
@ -229,6 +228,7 @@ func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader = driver.NewLimitedUploadStream(ctx, reader)
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
@ -392,10 +392,11 @@ func (d *HalalCloud) put(ctx context.Context, dstDir model.Obj, fileStream model
|
||||
if fileStream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = fileStream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
reader := driver.NewLimitedUploadStream(ctx, fileStream)
|
||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(result.Bucket),
|
||||
Key: aws.String(result.Key),
|
||||
Body: io.TeeReader(fileStream, driver.NewProgress(fileStream.GetSize(), up)),
|
||||
Body: io.TeeReader(reader, driver.NewProgress(fileStream.GetSize(), up)),
|
||||
})
|
||||
return nil, err
|
||||
|
||||
|
@ -120,7 +120,7 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts, ts_str, err := getTimestamp(d.conf.secret)
|
||||
ts, ts_str, _ := getTimestamp(d.conf.secret)
|
||||
|
||||
params := []string{
|
||||
"uuid=" + url.QueryEscape(d.UUID),
|
||||
@ -149,11 +149,17 @@ func (d *ILanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
u.RawQuery = strings.Join(params, "&")
|
||||
realURL := u.String()
|
||||
// get the url after redirect
|
||||
res, err := base.NoRedirectClient.R().SetHeaders(map[string]string{
|
||||
//"Origin": d.conf.site,
|
||||
req := base.NoRedirectClient.R()
|
||||
|
||||
req.SetHeaders(map[string]string{
|
||||
"Referer": d.conf.site + "/",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
||||
}).Get(realURL)
|
||||
})
|
||||
if d.Addition.Ip != "" {
|
||||
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
|
||||
}
|
||||
|
||||
res, err := req.Get(realURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -266,10 +272,10 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||
|
||||
const DefaultPartSize = 1024 * 1024 * 8
|
||||
|
||||
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
tempFile, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -288,8 +294,8 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"fileId": "",
|
||||
"fileName": stream.GetName(),
|
||||
"fileSize": stream.GetSize()/1024 + 1,
|
||||
"fileName": s.GetName(),
|
||||
"fileSize": s.GetSize()/1024 + 1,
|
||||
"folderId": dstDir.GetID(),
|
||||
"md5": etag,
|
||||
"type": 1,
|
||||
@ -301,13 +307,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
upToken := utils.Json.Get(res, "upToken").ToString()
|
||||
now := time.Now()
|
||||
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: tempFile,
|
||||
Size: s.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
var token string
|
||||
if stream.GetSize() <= DefaultPartSize {
|
||||
res, err := d.upClient.R().SetMultipartFormData(map[string]string{
|
||||
if s.GetSize() <= DefaultPartSize {
|
||||
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
||||
"token": upToken,
|
||||
"key": key,
|
||||
"fname": stream.GetName(),
|
||||
}).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile).
|
||||
"fname": s.GetName(),
|
||||
}).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader).
|
||||
Post("https://upload.qiniup.com/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -321,10 +334,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
|
||||
parts := make([]Part, 0)
|
||||
partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
||||
partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
||||
for i := 1; i <= int(partNum); i++ {
|
||||
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
|
||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u)
|
||||
res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -335,7 +348,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
})
|
||||
}
|
||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
|
||||
"fnmae": stream.GetName(),
|
||||
"fnmae": s.GetName(),
|
||||
"parts": parts,
|
||||
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
|
||||
if err != nil {
|
||||
@ -373,9 +386,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
ID: strconv.FormatInt(file.FileId, 10),
|
||||
//Path: ,
|
||||
Name: file.FileName,
|
||||
Size: stream.GetSize(),
|
||||
Modified: stream.ModTime(),
|
||||
Ctime: stream.CreateTime(),
|
||||
Size: s.GetSize(),
|
||||
Modified: s.ModTime(),
|
||||
Ctime: s.CreateTime(),
|
||||
IsFolder: false,
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, etag),
|
||||
}, nil
|
||||
|
@ -9,6 +9,7 @@ type Addition struct {
|
||||
driver.RootID
|
||||
Username string `json:"username" type:"string" required:"true"`
|
||||
Password string `json:"password" type:"string" required:"true"`
|
||||
Ip string `json:"ip" type:"string"`
|
||||
|
||||
Token string
|
||||
UUID string
|
||||
|
@ -73,8 +73,13 @@ func (d *ILanZou) request(pathname, method string, callback base.ReqCallback, pr
|
||||
"Referer": d.conf.site + "/",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
|
||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5",
|
||||
})
|
||||
|
||||
if d.Addition.Ip != "" {
|
||||
req.SetHeader("X-Forwarded-For", d.Addition.Ip)
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
|
@ -4,13 +4,13 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
shell "github.com/ipfs/go-ipfs-api"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
shell "github.com/ipfs/go-ipfs-api"
|
||||
)
|
||||
|
||||
type IPFS struct {
|
||||
@ -44,27 +44,32 @@ func (d *IPFS) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
if path[len(path):] != "/" {
|
||||
path += "/"
|
||||
switch d.Mode {
|
||||
case "ipfs":
|
||||
path, _ = url.JoinPath("/ipfs", path)
|
||||
case "ipns":
|
||||
path, _ = url.JoinPath("/ipns", path)
|
||||
case "mfs":
|
||||
fileStat, err := d.sh.FilesStat(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path, _ = url.JoinPath("/ipfs", fileStat.Hash)
|
||||
default:
|
||||
return nil, fmt.Errorf("mode error")
|
||||
}
|
||||
|
||||
path_cid, err := d.sh.FilesStat(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirs, err := d.sh.List(path_cid.Hash)
|
||||
dirs, err := d.sh.List(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objlist := []model.Obj{}
|
||||
for _, file := range dirs {
|
||||
gateurl := *d.gateURL
|
||||
gateurl.Path = "ipfs/" + file.Hash
|
||||
gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash)
|
||||
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
|
||||
objlist = append(objlist, &model.ObjectURL{
|
||||
Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
||||
Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
||||
Url: model.Url{Url: gateurl.String()},
|
||||
})
|
||||
}
|
||||
@ -73,11 +78,15 @@ func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
||||
}
|
||||
|
||||
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName())
|
||||
return &model.Link{URL: link}, nil
|
||||
gateurl := d.gateURL.JoinPath(file.GetID())
|
||||
gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName())
|
||||
return &model.Link{URL: gateurl.String()}, nil
|
||||
}
|
||||
|
||||
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
path := parentDir.GetPath()
|
||||
if path[len(path):] != "/" {
|
||||
path += "/"
|
||||
@ -86,39 +95,48 @@ func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string)
|
||||
}
|
||||
|
||||
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
||||
}
|
||||
|
||||
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
|
||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||
}
|
||||
|
||||
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
fmt.Println(srcObj.GetPath())
|
||||
fmt.Println(dstDir.GetPath())
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
|
||||
fmt.Println(newFileName)
|
||||
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||
}
|
||||
|
||||
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// TODO remove obj, optional
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
||||
}
|
||||
|
||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// TODO upload file, optional
|
||||
_, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName())))
|
||||
return err
|
||||
}
|
||||
|
||||
func ToFiles(dstDir string) shell.AddOpts {
|
||||
return func(rb *shell.RequestBuilder) error {
|
||||
rb.Option("to-files", dstDir)
|
||||
return nil
|
||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if d.Mode != "mfs" {
|
||||
return fmt.Errorf("only write in mfs mode")
|
||||
}
|
||||
outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/"))
|
||||
return err
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -8,14 +8,16 @@ import (
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"`
|
||||
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
|
||||
Gateway string `json:"gateway" default:"https://ipfs.io"`
|
||||
Gateway string `json:"gateway" default:"http://127.0.0.1:8080"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "IPFS API",
|
||||
DefaultRoot: "/",
|
||||
LocalSort: true,
|
||||
OnlyProxy: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -3,8 +3,6 @@ package kodbox
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -12,6 +10,8 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
type KodBox struct {
|
||||
@ -225,14 +225,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var resp *CommonResp
|
||||
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
||||
req.SetFileReader("file", stream.GetName(), stream).
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req.SetFileReader("file", s.GetName(), r).
|
||||
SetResult(&resp).
|
||||
SetFormData(map[string]string{
|
||||
"path": dstDir.GetPath(),
|
||||
})
|
||||
}).
|
||||
SetContext(ctx)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -244,8 +249,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: resp.Info.(string),
|
||||
Name: stream.GetName(),
|
||||
Size: stream.GetSize(),
|
||||
Name: s.GetName(),
|
||||
Size: s.GetSize(),
|
||||
IsFolder: false,
|
||||
Modified: time.Now(),
|
||||
Ctime: time.Now(),
|
||||
|
@ -208,18 +208,22 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
var resp RespText[[]FileOrFolder]
|
||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req.SetFormData(map[string]string{
|
||||
"task": "1",
|
||||
"vie": "2",
|
||||
"ve": "2",
|
||||
"id": "WU_FILE_0",
|
||||
"name": stream.GetName(),
|
||||
"name": s.GetName(),
|
||||
"folder_id_bb_n": dstDir.GetID(),
|
||||
}).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx)
|
||||
}).SetFileReader("upload_file", s.GetName(), reader).SetContext(ctx)
|
||||
}, &resp, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -320,7 +320,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
uploadLimit.Wait(ctx)
|
||||
err := uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPrepare(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -341,7 +344,7 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
length = stream.GetSize() - int64(i*blockSize)
|
||||
}
|
||||
|
||||
reader := io.LimitReader(stream, length)
|
||||
reader := driver.NewLimitedUploadStream(ctx, io.LimitReader(stream, length))
|
||||
|
||||
req := larkdrive.NewUploadPartFileReqBuilder().
|
||||
Body(larkdrive.NewUploadPartFileReqBodyBuilder().
|
||||
@ -353,7 +356,10 @@ func (c *Lark) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
Build()
|
||||
|
||||
// 发起请求
|
||||
uploadLimit.Wait(ctx)
|
||||
err = uploadLimit.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.client.Drive.File.UploadPart(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
|
@ -47,7 +47,11 @@ func (f File) GetPath() string {
|
||||
}
|
||||
|
||||
func (f File) GetSize() int64 {
|
||||
return f.Size
|
||||
if f.IsDir() {
|
||||
return 0
|
||||
} else {
|
||||
return f.Size
|
||||
}
|
||||
}
|
||||
|
||||
func (f File) GetName() string {
|
||||
@ -70,10 +74,6 @@ func (f File) GetID() string {
|
||||
return f.GetPath()
|
||||
}
|
||||
|
||||
func (f File) Thumb() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type Files struct {
|
||||
Data struct {
|
||||
List []File `json:"list"`
|
||||
|
@ -161,7 +161,7 @@ func (d *MediaTrack) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
src := "assets/" + uuid.New().String()
|
||||
var resp UploadResp
|
||||
_, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) {
|
||||
@ -180,7 +180,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -188,13 +188,19 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
_ = tempFile.Close()
|
||||
}()
|
||||
uploader := s3manager.NewUploader(s)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &resp.Data.Bucket,
|
||||
Key: &resp.Data.Object,
|
||||
Body: tempFile,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: tempFile,
|
||||
Size: file.GetSize(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
@ -213,12 +219,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
hash := hex.EncodeToString(h.Sum(nil))
|
||||
data := base.Json{
|
||||
"category": 0,
|
||||
"description": stream.GetName(),
|
||||
"description": file.GetName(),
|
||||
"hash": hash,
|
||||
"mime": stream.GetMimetype(),
|
||||
"size": stream.GetSize(),
|
||||
"mime": file.GetMimetype(),
|
||||
"size": file.GetSize(),
|
||||
"src": src,
|
||||
"title": stream.GetName(),
|
||||
"title": file.GetName(),
|
||||
"type": 0,
|
||||
}
|
||||
_, err = d.request(url, http.MethodPost, func(req *resty.Request) {
|
||||
|
@ -156,6 +156,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
|
||||
reader := driver.NewLimitedUploadStream(ctx, stream)
|
||||
for id := 0; id < u.Chunks(); id++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -165,7 +166,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return err
|
||||
}
|
||||
chunk := make([]byte, chkSize)
|
||||
n, err := io.ReadFull(stream, chunk)
|
||||
n, err := io.ReadFull(reader, chunk)
|
||||
if err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func (d *Misskey) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Misskey) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.put(dstDir, stream, up)
|
||||
return d.put(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,7 +1,6 @@
|
||||
package misskey
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
@ -190,16 +189,16 @@ func (d *Misskey) remove(obj model.Obj) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *Misskey) put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var file MFile
|
||||
|
||||
fileContent, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req := base.RestyClient.R().
|
||||
SetFileReader("file", stream.GetName(), io.NopCloser(bytes.NewReader(fileContent))).
|
||||
SetContext(ctx).
|
||||
SetFileReader("file", stream.GetName(), reader).
|
||||
SetFormData(map[string]string{
|
||||
"folderId": handleFolderId(dstDir).(string),
|
||||
"name": stream.GetName(),
|
||||
@ -207,7 +206,8 @@ func (d *Misskey) put(dstDir model.Obj, stream model.FileStreamer, up driver.Upd
|
||||
"isSensitive": "false",
|
||||
"force": "false",
|
||||
}).
|
||||
SetResult(&file).SetAuthToken(d.AccessToken)
|
||||
SetResult(&file).
|
||||
SetAuthToken(d.AccessToken)
|
||||
|
||||
resp, err := req.Post(d.Endpoint + "/api/drive/files/create")
|
||||
if err != nil {
|
||||
|
@ -10,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -301,6 +303,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
sem := semaphore.NewWeighted(3)
|
||||
|
||||
// step.3
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||
@ -319,7 +322,12 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
|
||||
// step.4
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||
if err = sem.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer sem.Release(1)
|
||||
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
|
||||
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -328,7 +336,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Body.Close()
|
||||
_ = resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (d *NeteaseMusic) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
return d.putSongStream(stream)
|
||||
return d.putSongStream(ctx, stream, up)
|
||||
}
|
||||
|
||||
func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
@ -2,6 +2,7 @@ package netease_music
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -71,6 +72,8 @@ func (lrc *LyricObj) getLyricLink() *model.Link {
|
||||
type ReqOption struct {
|
||||
crypto string
|
||||
stream model.FileStreamer
|
||||
up driver.UpdateProgress
|
||||
ctx context.Context
|
||||
data map[string]string
|
||||
headers map[string]string
|
||||
cookies []*http.Cookie
|
||||
|
@ -1,8 +1,10 @@
|
||||
package netease_music
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
@ -47,9 +49,12 @@ func (u *uploader) init(stream model.FileStreamer) error {
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
utils.CopyWithBuffer(h, stream)
|
||||
_, err := utils.CopyWithBuffer(h, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.md5 = hex.EncodeToString(h.Sum(nil))
|
||||
_, err := u.file.Seek(0, io.SeekStart)
|
||||
_, err = u.file.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -167,7 +172,7 @@ func (u *uploader) publishInfo(resourceId string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uploader) upload(stream model.FileStreamer) error {
|
||||
func (u *uploader) upload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
bucket := "jd-musicrep-privatecloud-audio-public"
|
||||
token, err := u.allocToken(bucket)
|
||||
if err != nil {
|
||||
@ -192,6 +197,8 @@ func (u *uploader) upload(stream model.FileStreamer) error {
|
||||
http.MethodPost,
|
||||
ReqOption{
|
||||
stream: stream,
|
||||
up: up,
|
||||
ctx: ctx,
|
||||
headers: map[string]string{
|
||||
"x-nos-token": token.token,
|
||||
"Content-Type": "audio/mpeg",
|
||||
|
@ -1,7 +1,7 @@
|
||||
package netease_music
|
||||
|
||||
import (
|
||||
"io"
|
||||
"context"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -58,20 +59,35 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error
|
||||
url = "https://music.163.com/api/linux/forward"
|
||||
}
|
||||
|
||||
if opt.ctx != nil {
|
||||
req.SetContext(opt.ctx)
|
||||
}
|
||||
if method == http.MethodPost {
|
||||
if opt.stream != nil {
|
||||
if opt.up == nil {
|
||||
opt.up = func(_ float64) {}
|
||||
}
|
||||
req.SetContentLength(true)
|
||||
req.SetBody(io.ReadCloser(opt.stream))
|
||||
req.SetBody(driver.NewLimitedUploadStream(opt.ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: opt.stream,
|
||||
UpdateProgress: opt.up,
|
||||
}))
|
||||
} else {
|
||||
req.SetFormData(data)
|
||||
}
|
||||
res, err := req.Post(url)
|
||||
return res.Body(), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
if method == http.MethodGet {
|
||||
res, err := req.Get(url)
|
||||
return res.Body(), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
return nil, errs.NotImplement
|
||||
@ -206,7 +222,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
|
||||
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tmp, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -231,7 +247,7 @@ func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
|
||||
}
|
||||
|
||||
if u.meta.needUpload {
|
||||
err = u.upload(stream)
|
||||
err = u.upload(ctx, stream, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -152,12 +151,8 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
// 1. upload new file
|
||||
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
|
||||
url := d.GetMetaUrl(false, filepath) + "/content"
|
||||
data, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
|
||||
@ -225,12 +220,13 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -140,12 +139,8 @@ func (d *OnedriveAPP) GetFile(path string) (*File, error) {
|
||||
|
||||
func (d *OnedriveAPP) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
|
||||
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
|
||||
data, err := io.ReadAll(stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
_, err := d.Request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetBody(driver.NewLimitedUploadStream(ctx, stream)).SetContext(ctx)
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
@ -175,12 +170,13 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.ContentLength = byteSize
|
||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
|
@ -255,10 +255,10 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传
|
||||
return d.UploadByOSS(¶ms, stream, up)
|
||||
return d.UploadByOSS(ctx, ¶ms, stream, up)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(¶ms, stream.GetSize(), stream, up)
|
||||
return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up)
|
||||
}
|
||||
|
||||
// 离线下载文件
|
||||
|
@ -2,6 +2,7 @@ package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@ -19,6 +20,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -417,7 +419,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -427,14 +429,17 @@ func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up dri
|
||||
return err
|
||||
}
|
||||
|
||||
err = bucket.PutObject(params.Key, stream, OssOption(params)...)
|
||||
err = bucket.PutObject(params.Key, driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}), OssOption(params)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
@ -444,7 +449,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
tmpF, err := s.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -488,6 +493,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
completedNum := atomic.Int32{}
|
||||
// consumers
|
||||
for i := 0; i < ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
@ -500,6 +506,8 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break
|
||||
case <-ticker.C:
|
||||
errCh <- errors.Wrap(err, "ossToken 过期")
|
||||
default:
|
||||
@ -510,13 +518,16 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
b := driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf))
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||
} else {
|
||||
num := completedNum.Add(1)
|
||||
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
@ -547,7 +558,7 @@ LOOP:
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
if filename := filepath.Base(s.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package quark
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
@ -178,7 +179,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
// part up
|
||||
partSize := pre.Metadata.PartSize
|
||||
var bytes []byte
|
||||
var part []byte
|
||||
md5s := make([]string, 0)
|
||||
defaultBytes := make([]byte, partSize)
|
||||
total := stream.GetSize()
|
||||
@ -189,17 +190,18 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
return ctx.Err()
|
||||
}
|
||||
if left > int64(partSize) {
|
||||
bytes = defaultBytes
|
||||
part = defaultBytes
|
||||
} else {
|
||||
bytes = make([]byte, left)
|
||||
part = make([]byte, left)
|
||||
}
|
||||
_, err := io.ReadFull(tempFile, bytes)
|
||||
_, err := io.ReadFull(tempFile, part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
left -= int64(len(bytes))
|
||||
left -= int64(len(part))
|
||||
log.Debugf("left: %d", left)
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, bytes)
|
||||
reader := driver.NewLimitedUploadStream(ctx, bytes.NewReader(part))
|
||||
m, err := d.upPart(ctx, pre, stream.GetMimetype(), partNumber, reader)
|
||||
//m, err := driver.UpPart(pre, file.GetMIMEType(), partNumber, bytes, account, md5Str, sha1Str)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -119,7 +120,7 @@ func (d *QuarkOrUC) upHash(md5, sha1, taskId string) (bool, error) {
|
||||
return resp.Data.Finish, err
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes []byte) (string, error) {
|
||||
func (d *QuarkOrUC) upPart(ctx context.Context, pre UpPreResp, mineType string, partNumber int, bytes io.Reader) (string, error) {
|
||||
//func (driver QuarkOrUC) UpPart(pre UpPreResp, mineType string, partNumber int, bytes []byte, account *model.Account, md5Str, sha1Str string) (string, error) {
|
||||
timeStr := time.Now().UTC().Format(http.TimeFormat)
|
||||
data := base.Json{
|
||||
@ -163,10 +164,13 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||
"partNumber": strconv.Itoa(partNumber),
|
||||
"uploadId": pre.Data.UploadId,
|
||||
}).SetBody(bytes).Put(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return "", fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||
}
|
||||
return res.Header().Get("ETag"), nil
|
||||
return res.Header().Get("Etag"), nil
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) upCommit(pre UpPreResp, md5s []string) error {
|
||||
@ -230,6 +234,9 @@ x-oss-user-agent:aliyun-sdk-js/6.6.1 Chrome 98.0.4758.80 on Windows 10 64-bit
|
||||
SetQueryParams(map[string]string{
|
||||
"uploadId": pre.Data.UploadId,
|
||||
}).SetBody(body).Post(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode(), res.String())
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package quqi
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -315,7 +316,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
// if the file already exists in Quqi server, there is no need to actually upload it
|
||||
if uploadInitResp.Data.Exist {
|
||||
// the file name returned by Quqi does not include the extension name
|
||||
nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName())
|
||||
nodeName, nodeExt := uploadInitResp.Data.NodeName, utils.Ext(stream.GetName())
|
||||
if nodeExt != "" {
|
||||
nodeName = nodeName + "." + nodeExt
|
||||
}
|
||||
@ -385,20 +386,34 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
buf := make([]byte, 1024*1024*2)
|
||||
fup := &driver.ReaderUpdatingProgress{
|
||||
Reader: &driver.SimpleReaderWithSize{
|
||||
Reader: f,
|
||||
Size: int64(len(buf)),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
}
|
||||
for partNumber := int64(1); ; partNumber++ {
|
||||
n, err := io.ReadFull(f, buf)
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
n, err := io.ReadFull(fup, buf)
|
||||
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
reader := bytes.NewReader(buf[:n])
|
||||
_, err = uploader.S3.UploadPartWithContext(ctx, &s3.UploadPartInput{
|
||||
UploadId: &uploadInitResp.Data.UploadID,
|
||||
Key: &uploadInitResp.Data.Key,
|
||||
Bucket: &uploadInitResp.Data.Bucket,
|
||||
PartNumber: aws.Int64(partNumber),
|
||||
Body: bytes.NewReader(buf[:n]),
|
||||
Body: struct {
|
||||
*driver.RateLimitReader
|
||||
io.Seeker
|
||||
}{
|
||||
RateLimitReader: driver.NewLimitedUploadStream(ctx, reader),
|
||||
Seeker: reader,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -417,7 +432,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
return nil, err
|
||||
}
|
||||
// the file name returned by Quqi does not include the extension name
|
||||
nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName())
|
||||
nodeName, nodeExt := uploadFinishResp.Data.NodeName, utils.Ext(stream.GetName())
|
||||
if nodeExt != "" {
|
||||
nodeName = nodeName + "." + nodeExt
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -115,16 +114,6 @@ func (d *Quqi) checkLogin() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// rawExt 保留扩展名大小写
|
||||
func rawExt(name string) string {
|
||||
ext := stdpath.Ext(name)
|
||||
if strings.HasPrefix(ext, ".") {
|
||||
ext = ext[1:]
|
||||
}
|
||||
|
||||
return ext
|
||||
}
|
||||
|
||||
// decryptKey 获取密码
|
||||
func decryptKey(encodeKey string) []byte {
|
||||
// 移除非法字符
|
||||
@ -304,10 +293,6 @@ func (d *Quqi) linkFromCDN(id string) (*model.Link, error) {
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
Header: http.Header{
|
||||
"Origin": []string{"https://quqi.com"},
|
||||
"Cookie": []string{d.Cookie},
|
||||
},
|
||||
RangeReadCloser: &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers},
|
||||
Expiration: &expiration,
|
||||
}, nil
|
||||
|
@ -4,18 +4,17 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
@ -163,18 +162,21 @@ func (d *S3) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.removeFile(obj.GetPath())
|
||||
}
|
||||
|
||||
func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
uploader := s3manager.NewUploader(d.Session)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false)
|
||||
contentType := stream.GetMimetype()
|
||||
key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false)
|
||||
contentType := s.GetMimetype()
|
||||
log.Debugln("key:", key)
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
Body: stream,
|
||||
Bucket: &d.Bucket,
|
||||
Key: &key,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
ContentType: &contentType,
|
||||
}
|
||||
_, err := uploader.UploadWithContext(ctx, input)
|
||||
|
@ -199,7 +199,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
|
||||
dstKey := getKey(dst, false)
|
||||
input := &s3.CopyObjectInput{
|
||||
Bucket: &d.Bucket,
|
||||
CopySource: aws.String(url.PathEscape("/" + d.Bucket + "/" + srcKey)),
|
||||
CopySource: aws.String(url.PathEscape(d.Bucket + "/" + srcKey)),
|
||||
Key: &dstKey,
|
||||
}
|
||||
_, err := d.client.CopyObject(input)
|
||||
|
@ -197,7 +197,7 @@ func (d *Seafile) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
repo, path, err := d.getRepoAndPath(dstDir.GetPath())
|
||||
if err != nil {
|
||||
return err
|
||||
@ -214,11 +214,16 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
u := string(res)
|
||||
u = u[1 : len(u)-1] // remove quotes
|
||||
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
||||
req.SetFileReader("file", stream.GetName(), stream).
|
||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req.SetFileReader("file", s.GetName(), r).
|
||||
SetFormData(map[string]string{
|
||||
"parent_dir": path,
|
||||
"replace": "1",
|
||||
})
|
||||
}).
|
||||
SetContext(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
defer func() {
|
||||
_ = dstFile.Close()
|
||||
}()
|
||||
err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up)
|
||||
err = utils.CopyWithCtx(ctx, dstFile, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream
|
||||
_ = d.fs.Remove(fullPath)
|
||||
}
|
||||
}()
|
||||
err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up)
|
||||
err = utils.CopyWithCtx(ctx, out, driver.NewLimitedUploadStream(ctx, stream), stream.GetSize(), up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -148,7 +148,7 @@ func (d *Teambition) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
var newFile *FileUpload
|
||||
if stream.GetSize() <= 20971520 {
|
||||
// post upload
|
||||
newFile, err = d.upload(ctx, stream, token)
|
||||
newFile, err = d.upload(ctx, stream, token, up)
|
||||
} else {
|
||||
// chunk upload
|
||||
//err = base.ErrNotImplement
|
||||
|
@ -1,6 +1,7 @@
|
||||
package teambition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -120,11 +121,15 @@ func (d *Teambition) getFiles(parentId string) ([]model.Obj, error) {
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string) (*FileUpload, error) {
|
||||
func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token string, up driver.UpdateProgress) (*FileUpload, error) {
|
||||
prefix := "tcs"
|
||||
if d.isInternational() {
|
||||
prefix = "us-tcs"
|
||||
}
|
||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
var newFile FileUpload
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
@ -134,7 +139,8 @@ func (d *Teambition) upload(ctx context.Context, file model.FileStreamer, token
|
||||
"type": file.GetMimetype(),
|
||||
"size": strconv.FormatInt(file.GetSize(), 10),
|
||||
"lastModifiedDate": time.Now().Format("Mon Jan 02 2006 15:04:05 GMT+0800 (中国标准时间)"),
|
||||
}).SetMultipartField("file", file.GetName(), file.GetMimetype(), file).
|
||||
}).
|
||||
SetMultipartField("file", file.GetName(), file.GetMimetype(), reader).
|
||||
Post(fmt.Sprintf("https://%s.teambition.net/upload", prefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -183,10 +189,9 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
|
||||
"Authorization": token,
|
||||
"Content-Type": "application/octet-stream",
|
||||
"Referer": referer,
|
||||
}).SetBody(chunkData).Post(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}).
|
||||
SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(chunkData))).
|
||||
Post(u)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -252,7 +257,10 @@ func (d *Teambition) newUpload(ctx context.Context, dstDir model.Obj, stream mod
|
||||
Key: &uploadToken.Upload.Key,
|
||||
ContentDisposition: &uploadToken.Upload.ContentDisposition,
|
||||
ContentType: &uploadToken.Upload.ContentType,
|
||||
Body: stream,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: stream,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
if err != nil {
|
||||
|
@ -66,11 +66,33 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
func (d *Template) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// TODO upload file, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||
// return errs.NotImplement to use an internal archive tool
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
@ -228,7 +228,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
||||
SetFileReader("file", stream.GetName(), driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData))).
|
||||
SetHeader("Cookie", d.Cookie).
|
||||
Post(u)
|
||||
if err != nil {
|
||||
|
@ -332,16 +332,16 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
hi := stream.GetHash()
|
||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
hi := file.GetHash()
|
||||
gcid := hi.GetHash(hash_extend.GCID)
|
||||
if len(gcid) < hash_extend.GCID.Width {
|
||||
tFile, err := stream.CacheFullInTempFile()
|
||||
tFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -353,8 +353,8 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
|
||||
r.SetBody(&base.Json{
|
||||
"kind": FILE,
|
||||
"parent_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"size": stream.GetSize(),
|
||||
"name": file.GetName(),
|
||||
"size": file.GetSize(),
|
||||
"hash": gcid,
|
||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||
})
|
||||
@ -375,14 +375,17 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
|
||||
return err
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: stream,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -508,7 +508,7 @@ func (xc *XunLeiBrowserCommon) Put(ctx context.Context, dstDir model.Obj, stream
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up)),
|
||||
Body: driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, driver.NewProgress(stream.GetSize(), up))),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -363,16 +363,16 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
hi := stream.GetHash()
|
||||
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||
hi := file.GetHash()
|
||||
gcid := hi.GetHash(hash_extend.GCID)
|
||||
if len(gcid) < hash_extend.GCID.Width {
|
||||
tFile, err := stream.CacheFullInTempFile()
|
||||
tFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -384,8 +384,8 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
|
||||
r.SetBody(&base.Json{
|
||||
"kind": FILE,
|
||||
"parent_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"size": stream.GetSize(),
|
||||
"name": file.GetName(),
|
||||
"size": file.GetSize(),
|
||||
"hash": gcid,
|
||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||
})
|
||||
@ -406,14 +406,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
|
||||
return err
|
||||
}
|
||||
uploader := s3manager.NewUploader(s)
|
||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: aws.String(param.Bucket),
|
||||
Key: aws.String(param.Key),
|
||||
Expires: aws.Time(param.Expiration),
|
||||
Body: stream,
|
||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: file,
|
||||
UpdateProgress: up,
|
||||
}),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func (d *Trainbit) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
return nil, err
|
||||
}
|
||||
var jsonData any
|
||||
json.Unmarshal(data, &jsonData)
|
||||
err = json.Unmarshal(data, &jsonData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -114,23 +114,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
|
||||
query := &url.Values{}
|
||||
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
|
||||
query.Add("guid", guid)
|
||||
query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+"."))
|
||||
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
||||
endpoint.RawQuery = query.Encode()
|
||||
var total int64
|
||||
total = 0
|
||||
progressReader := &ProgressReader{
|
||||
stream,
|
||||
func(byteNum int) {
|
||||
total += int64(byteNum)
|
||||
up(float64(total) / float64(stream.GetSize()) * 100)
|
||||
},
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)
|
||||
progressReader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||
Reader: s,
|
||||
UpdateProgress: up,
|
||||
})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -13,17 +13,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type ProgressReader struct {
|
||||
io.Reader
|
||||
reporter func(byteNum int)
|
||||
}
|
||||
|
||||
func (progressReader *ProgressReader) Read(data []byte) (int, error) {
|
||||
byteNum, err := progressReader.Reader.Read(data)
|
||||
progressReader.reporter(byteNum)
|
||||
return byteNum, err
|
||||
}
|
||||
|
||||
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
|
@ -3,7 +3,6 @@ package url_tree
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -11,6 +10,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user