Compare commits

...

21 Commits

Author SHA1 Message Date
4cbbda8832 fix(baidu): custom upload part size (close #5757) 2024-05-02 22:30:00 +08:00
Mmx
7bf5014417 ci: cache musl library in docker build workflow (#6392)
* ci: add musl libs into action cache

* build: update Dockerfile.ci
2024-05-02 22:28:13 +08:00
b704bba444 fix(115): disable NoOverwriteUpload (#6409 close #6251)
closed #6251
2024-05-02 22:27:55 +08:00
eecea3febd fix(onedrive): fix Ctime/Mtime (#6397) 2024-05-02 22:27:31 +08:00
0e246a7b0c chore: replace link of vidhub [skip ci] 2024-04-30 14:22:26 +08:00
Mmx
b95df1d745 perf: use io copy with buffer pool (#6389)
* feat: add io methods with buffer

* chore: move io.Copy calls to utils.CopyWithBuffer
2024-04-25 20:11:15 +08:00
ec08ecdf6c fix(baidu_netdisk): cached Ctime/Mtime (#6373 close #6370)
(cherry picked from commit 23542541e4f343d484de1f83ee5c928d2ab6753c)
2024-04-25 20:08:20 +08:00
479fc6d466 fix(webdav): make sure Mtime after Ctime (#6372 close #6371)
* fix(server/webdav) make sure Mtime >= Ctime

* fix(server/webdav) avoid variable 'stream' collides with imported package name
2024-04-24 17:13:30 +08:00
32ddab9b01 feat(123_share): add access token (#6357) 2024-04-24 14:54:01 +08:00
0c9dcec9cd fix: init storages in order (#6346) 2024-04-19 17:22:16 +08:00
793a4ea6ca fix(cloudreve): add domain to the download url if not exists (#6339 close #6265)
* fix: correct the download url got by Cloudreve driver

* fix: add an condition to the correction
2024-04-12 21:45:16 +08:00
c3c5181847 feat(Seafile): add token login (#6324 close #5302) 2024-04-10 21:50:30 +08:00
Mix
cd5a8a011d fix: typo about env of Meilisearch (#6316) 2024-04-08 18:35:23 +08:00
1756036a21 fix(authn): subfolder api is considered as a wrong origin(closes #6294 in #6301) 2024-04-03 14:33:19 +08:00
58c3cb3cf6 fix(s3): don't bind s3 port if s3 is not enabled (#6291) 2024-04-03 10:09:48 +08:00
d8e190406a feat(189pc): add family transfer upload (#6288)
* feat(189pc): add family transfer upload

* fix(189):family transfer file delete
2024-04-02 16:51:02 +08:00
2880ed70ce fix: some typos (#6283)
Signed-off-by: guoguangwu <guoguangwug@gmail.com>
2024-04-02 16:50:30 +08:00
0e86036874 fix(doge): reget client after refresh session (#6277) 2024-03-29 14:56:49 +08:00
e37465e67e feat(crypt): force stream upload for supported drivers (#6270) 2024-03-29 14:42:01 +08:00
d517adde71 docs: use width instead of height for image in Readme (#6282)
* Update README.md

* Update README_cn.md

* Update README_ja.md
2024-03-29 14:40:43 +08:00
8a18f47e68 fix(doge): the temporary access key is only valid for two hours (#6273)
* feat: add doge driver

* doc: 补充readme文档

* fix: 对齐meta信息

* fix: 调整结构体名字,与driver保持一致

* perf: merge to s3

* Rename goge.go to doge.go

* fix: 解决多吉云临时秘钥两个小时过期的问题

* fix: 定时任务在Drop中Stop

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2024-03-27 14:22:26 +08:00
59 changed files with 756 additions and 257 deletions

View File

@ -32,10 +32,21 @@ jobs:
flavor: |
suffix=-ffmpeg,onlatest=true
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Cache Musl
id: cache-musl
uses: actions/cache@v4
with:
path: build/musl-libs
key: docker-musl-libs
- name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true'
run: bash build.sh prepare docker-multiplatform
- name: Build go binary
run: bash build.sh dev docker-multiplatform

View File

@ -13,10 +13,21 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-go@v4
- uses: actions/setup-go@v5
with:
go-version: 'stable'
- name: Cache Musl
id: cache-musl
uses: actions/cache@v4
with:
path: build/musl-libs
key: docker-musl-libs
- name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true'
run: bash build.sh prepare docker-multiplatform
- name: Build go binary
run: bash build.sh release docker-multiplatform

View File

@ -3,7 +3,7 @@ ARG TARGETPLATFORM
LABEL MAINTAINER="i@nn.ci"
VOLUME /opt/alist/data/
WORKDIR /opt/alist/
COPY /${TARGETPLATFORM}/alist ./
COPY /build/${TARGETPLATFORM}/alist ./
COPY entrypoint.sh /entrypoint.sh
RUN apk update && \
apk upgrade --no-cache && \

View File

@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -115,7 +115,7 @@ https://alist.nn.ci/guide/sponsor.html
### Special sponsors
- [VidHub](https://okaapps.com/product/1659622164?ref=alist) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎

View File

@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -113,7 +113,7 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
### 特别赞助
- [VidHub](https://zh.okaapps.com/product/1659622164?ref=alist) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎

View File

@ -1,5 +1,5 @@
<div align="center">
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
<p><em>🗂Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
<div>
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
@ -115,7 +115,7 @@ https://alist.nn.ci/guide/sponsor.html
### スペシャルスポンサー
- [VidHub](https://okaapps.com/product/1659622164?ref=alist) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎

View File

@ -96,17 +96,24 @@ BuildDocker() {
go build -o ./bin/alist -ldflags="$ldflags" -tags=jsoniter .
}
BuildDockerMultiplatform() {
PrepareBuildDocker
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://musl.cc/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
curl -L -o "${i}.tgz" "${url}"
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
rm -f "${i}.tgz"
lib_tgz="build/${i}.tgz"
curl -L -o "${lib_tgz}" "${url}"
tar xf "${lib_tgz}" --strip-components 1 -C build/musl-libs
rm -f "${lib_tgz}"
done
}
BuildDockerMultiplatform() {
PrepareBuildDocker
# run PrepareBuildDockerMusl before build
export PATH=$PATH:$PWD/build/musl-libs/bin
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
@ -122,7 +129,7 @@ BuildDockerMultiplatform() {
export GOARCH=$arch
export CC=${cgo_cc}
echo "building for $os_arch"
go build -o ./$os/$arch/alist -ldflags="$docker_lflags" -tags=jsoniter .
go build -o build/$os/$arch/alist -ldflags="$docker_lflags" -tags=jsoniter .
done
DOCKER_ARM_ARCHES=(linux-arm/v6 linux-arm/v7)
@ -136,7 +143,7 @@ BuildDockerMultiplatform() {
export GOARM=${GO_ARM[$i]}
export CC=${cgo_cc}
echo "building for $docker_arch"
go build -o ./${docker_arch%%-*}/${docker_arch##*-}/alist -ldflags="$docker_lflags" -tags=jsoniter .
go build -o build/${docker_arch%%-*}/${docker_arch##*-}/alist -ldflags="$docker_lflags" -tags=jsoniter .
done
}
@ -289,6 +296,10 @@ elif [ "$1" = "release" ]; then
BuildRelease
MakeRelease "md5.txt"
fi
elif [ "$1" = "prepare" ]; then
if [ "$2" = "docker-multiplatform" ]; then
PrepareBuildDockerMusl
fi
else
echo -e "Parameter error"
fi

View File

@ -91,10 +91,10 @@ the address is defined in config file`,
}
}()
}
s3r := gin.New()
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
if conf.Conf.S3.Port != -1 {
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
s3r := gin.New()
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
server.InitS3(s3r)
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
utils.Log.Infof("start S3 server @ %s", s3Base)
go func() {

View File

@ -19,7 +19,7 @@ var config = driver.Config{
DefaultRoot: "0",
//OnlyProxy: true,
//OnlyLocal: true,
NoOverwriteUpload: true,
//NoOverwriteUpload: true,
}
func init() {

View File

@ -194,7 +194,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
defer func() {
_ = tempFile.Close()
}()
if _, err = io.Copy(h, tempFile); err != nil {
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return err
}
_, err = tempFile.Seek(0, io.SeekStart)

View File

@ -4,8 +4,11 @@ import (
"context"
"encoding/base64"
"fmt"
"golang.org/x/time/rate"
"net/http"
"net/url"
"sync"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@ -19,6 +22,7 @@ import (
type Pan123Share struct {
model.Storage
Addition
apiRateLimit sync.Map
}
func (d *Pan123Share) Config() driver.Config {
@ -146,4 +150,11 @@ func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
// return nil, errs.NotSupport
//}
func (d *Pan123Share) APIRateLimit(api string) bool {
limiter, _ := d.apiRateLimit.LoadOrStore(api,
rate.NewLimiter(rate.Every(time.Millisecond*700), 1))
ins := limiter.(*rate.Limiter)
return ins.Allow()
}
var _ driver.Driver = (*Pan123Share)(nil)

View File

@ -7,10 +7,11 @@ import (
type Addition struct {
ShareKey string `json:"sharekey" required:"true"`
SharePwd string `json:"sharepassword" required:"true"`
SharePwd string `json:"sharepassword"`
driver.RootID
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
AccessToken string `json:"accesstoken" type:"text"`
}
var config = driver.Config{

View File

@ -2,8 +2,15 @@ package _123Share
import (
"errors"
"fmt"
"hash/crc32"
"math"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
@ -15,20 +22,45 @@ const (
Api = "https://www.123pan.com/api"
AApi = "https://www.123pan.com/a/api"
BApi = "https://www.123pan.com/b/api"
MainApi = Api
MainApi = BApi
FileList = MainApi + "/share/get"
DownloadInfo = MainApi + "/share/download/info"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
)
func signPath(path string, os string, version string) (k string, v string) {
table := []byte{'a', 'd', 'e', 'f', 'g', 'h', 'l', 'm', 'y', 'i', 'j', 'n', 'o', 'p', 'k', 'q', 'r', 's', 't', 'u', 'b', 'c', 'v', 'w', 's', 'z'}
random := fmt.Sprintf("%.f", math.Round(1e7*rand.Float64()))
now := time.Now().In(time.FixedZone("CST", 8*3600))
timestamp := fmt.Sprint(now.Unix())
nowStr := []byte(now.Format("200601021504"))
for i := 0; i < len(nowStr); i++ {
nowStr[i] = table[nowStr[i]-48]
}
timeSign := fmt.Sprint(crc32.ChecksumIEEE(nowStr))
data := strings.Join([]string{timestamp, random, path, os, version, timeSign}, "|")
dataSign := fmt.Sprint(crc32.ChecksumIEEE([]byte(data)))
return timeSign, strings.Join([]string{timestamp, random, dataSign}, "-")
}
func GetApi(rawUrl string) string {
u, _ := url.Parse(rawUrl)
query := u.Query()
query.Add(signPath(u.Path, "web", "3"))
u.RawQuery = query.Encode()
return u.String()
}
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"user-agent": "Dart/2.19(dart:io)",
"platform": "android",
"app-version": "36",
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
"platform": "web",
"app-version": "3",
//"user-agent": base.UserAgent,
})
if callback != nil {
callback(req)
@ -36,7 +68,7 @@ func (d *Pan123Share) request(url string, method string, callback base.ReqCallba
if resp != nil {
req.SetResult(resp)
}
res, err := req.Execute(method, url)
res, err := req.Execute(method, GetApi(url))
if err != nil {
return nil, err
}
@ -52,6 +84,10 @@ func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
page := 1
res := make([]File, 0)
for {
if !d.APIRateLimit(FileList) {
time.Sleep(time.Millisecond * 200)
continue
}
var resp Files
query := map[string]string{
"limit": "100",

View File

@ -1,6 +1,7 @@
package _189pc
import (
"container/ring"
"context"
"net/http"
"strconv"
@ -28,6 +29,9 @@ type Cloud189PC struct {
uploadThread int
familyTransferFolder *ring.Ring
cleanFamilyTransferFile func()
storageConfig driver.Config
}
@ -52,7 +56,6 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
}
if !y.isFamily() && y.RootFolderID == "" {
y.RootFolderID = "-11"
y.FamilyID = ""
}
// 限制上传线程数
@ -79,11 +82,24 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
}
// 处理家庭云ID
if y.isFamily() && y.FamilyID == "" {
if y.FamilyID == "" {
if y.FamilyID, err = y.getFamilyID(); err != nil {
return err
}
}
// 创建中转文件夹,防止重名文件
if y.FamilyTransfer {
if y.familyTransferFolder, err = y.createFamilyTransferFolder(32); err != nil {
return err
}
}
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
}
})
return
}
@ -92,7 +108,7 @@ func (y *Cloud189PC) Drop(ctx context.Context) error {
}
func (y *Cloud189PC) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
return y.getFiles(ctx, dir.GetID())
return y.getFiles(ctx, dir.GetID(), y.isFamily())
}
func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
@ -100,8 +116,9 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
URL string `json:"fileDownloadUrl"`
}
isFamily := y.isFamily()
fullUrl := API_URL
if y.isFamily() {
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/getFileDownloadUrl.action"
@ -109,7 +126,7 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
_, err := y.get(fullUrl, func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParam("fileId", file.GetID())
if y.isFamily() {
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
})
@ -119,7 +136,7 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
"flag": "1",
})
}
}, &downloadUrl)
}, &downloadUrl, isFamily)
if err != nil {
return nil, err
}
@ -156,8 +173,9 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
}
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
isFamily := y.isFamily()
fullUrl := API_URL
if y.isFamily() {
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/createFolder.action"
@ -169,7 +187,7 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
"folderName": dirName,
"relativePath": "",
})
if y.isFamily() {
if isFamily {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"parentId": parentDir.GetID(),
@ -179,7 +197,7 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
"parentFolderId": parentDir.GetID(),
})
}
}, &newFolder)
}, &newFolder, isFamily)
if err != nil {
return nil, err
}
@ -187,27 +205,14 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
}
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
"type": "MOVE",
"taskInfos": MustString(utils.Json.MarshalToString(
[]BatchTaskInfo{
{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
},
})),
"targetFolderId": dstDir.GetID(),
})
if y.isFamily() {
req.SetFormData(map[string]string{
"familyId": y.FamilyID,
})
}
}, &resp)
isFamily := y.isFamily()
other := map[string]string{"targetFileName": dstDir.GetName()}
resp, err := y.CreateBatchTask("MOVE", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, BatchTaskInfo{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
})
if err != nil {
return nil, err
}
@ -218,10 +223,11 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.
}
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
isFamily := y.isFamily()
queryParam := make(map[string]string)
fullUrl := API_URL
method := http.MethodPost
if y.isFamily() {
if isFamily {
fullUrl += "/family/file"
method = http.MethodGet
queryParam["familyId"] = y.FamilyID
@ -245,7 +251,7 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
_, err := y.request(fullUrl, method, func(req *resty.Request) {
req.SetContext(ctx).SetQueryParams(queryParam)
}, nil, newObj)
}, nil, newObj, isFamily)
if err != nil {
return nil, err
}
@ -253,28 +259,15 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
}
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
"type": "COPY",
"taskInfos": MustString(utils.Json.MarshalToString(
[]BatchTaskInfo{
{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
},
})),
"targetFolderId": dstDir.GetID(),
"targetFileName": dstDir.GetName(),
})
if y.isFamily() {
req.SetFormData(map[string]string{
"familyId": y.FamilyID,
})
}
}, &resp)
isFamily := y.isFamily()
other := map[string]string{"targetFileName": dstDir.GetName()}
resp, err := y.CreateBatchTask("COPY", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, BatchTaskInfo{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
})
if err != nil {
return err
}
@ -282,27 +275,13 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
"type": "DELETE",
"taskInfos": MustString(utils.Json.MarshalToString(
[]*BatchTaskInfo{
{
FileId: obj.GetID(),
FileName: obj.GetName(),
IsFolder: BoolToNumber(obj.IsDir()),
},
})),
})
isFamily := y.isFamily()
if y.isFamily() {
req.SetFormData(map[string]string{
"familyId": y.FamilyID,
})
}
}, &resp)
resp, err := y.CreateBatchTask("DELETE", IF(isFamily, y.FamilyID, ""), "", nil, BatchTaskInfo{
FileId: obj.GetID(),
FileName: obj.GetName(),
IsFolder: BoolToNumber(obj.IsDir()),
})
if err != nil {
return err
}
@ -310,25 +289,73 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
}
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (newObj model.Obj, err error) {
overwrite := true
isFamily := y.isFamily()
// 响应时间长,按需启用
if y.Addition.RapidUpload {
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
if y.Addition.RapidUpload && !stream.IsForceStreamUpload() {
if newObj, err := y.RapidUpload(ctx, dstDir, stream, isFamily, overwrite); err == nil {
return newObj, nil
}
}
switch y.UploadMethod {
case "old":
return y.OldUpload(ctx, dstDir, stream, up)
uploadMethod := y.UploadMethod
if stream.IsForceStreamUpload() {
uploadMethod = "stream"
}
// 旧版上传家庭云也有限制
if uploadMethod == "old" {
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
}
// 开启家庭云转存
if !isFamily && y.FamilyTransfer {
// 修改上传目标为家庭云文件夹
transferDstDir := dstDir
dstDir = (y.familyTransferFolder.Value).(*Cloud189Folder)
y.familyTransferFolder = y.familyTransferFolder.Next()
isFamily = true
overwrite = false
defer func() {
if newObj != nil {
// 批量任务有概率删不掉
y.cleanFamilyTransferFile()
// 转存家庭云文件到个人云
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
task := BatchTaskInfo{
FileId: newObj.GetID(),
FileName: newObj.GetName(),
IsFolder: BoolToNumber(newObj.IsDir()),
}
// 删除源文件
if resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, task); err == nil {
y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
// 永久删除
if resp, err := y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, task); err == nil {
y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
}
}
newObj = nil
}
}()
}
switch uploadMethod {
case "rapid":
return y.FastUpload(ctx, dstDir, stream, up)
return y.FastUpload(ctx, dstDir, stream, up, isFamily, overwrite)
case "stream":
if stream.GetSize() == 0 {
return y.FastUpload(ctx, dstDir, stream, up)
return y.FastUpload(ctx, dstDir, stream, up, isFamily, overwrite)
}
fallthrough
default:
return y.StreamUpload(ctx, dstDir, stream, up)
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
}
}

View File

@ -192,3 +192,19 @@ func partSize(size int64) int64 {
}
return DEFAULT
}
func isBool(bs ...bool) bool {
for _, b := range bs {
if b {
return true
}
}
return false
}
func IF[V any](o bool, t V, f V) V {
if o {
return t
}
return f
}

View File

@ -16,6 +16,7 @@ type Addition struct {
FamilyID string `json:"family_id"`
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
FamilyTransfer bool `json:"family_transfer"`
RapidUpload bool `json:"rapid_upload"`
NoUseOcr bool `json:"no_use_ocr"`
}

View File

@ -3,10 +3,11 @@ package _189pc
import (
"encoding/xml"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"sort"
"strings"
"time"
"github.com/alist-org/alist/v3/pkg/utils"
)
// 居然有四种返回方式
@ -242,7 +243,12 @@ type BatchTaskInfo struct {
// IsFolder 是否是文件夹0-否1-是
IsFolder int `json:"isFolder"`
// SrcParentId 文件所在父目录ID
//SrcParentId string `json:"srcParentId"`
SrcParentId string `json:"srcParentId,omitempty"`
/* 冲突管理 */
// 1 -> 跳过 2 -> 保留 3 -> 覆盖
DealWay int `json:"dealWay,omitempty"`
IsConflict int `json:"isConflict,omitempty"`
}
/* 上传部分 */
@ -355,6 +361,14 @@ type BatchTaskStateResp struct {
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中4 完成
}
type BatchTaskConflictTaskInfoResp struct {
SessionKey string `json:"sessionKey"`
TargetFolderID int `json:"targetFolderId"`
TaskID string `json:"taskId"`
TaskInfos []BatchTaskInfo
TaskType int `json:"taskType"`
}
/* query 加密参数*/
type Params map[string]string

View File

@ -2,6 +2,7 @@ package _189pc
import (
"bytes"
"container/ring"
"context"
"crypto/md5"
"encoding/base64"
@ -54,11 +55,11 @@ const (
CHANNEL_ID = "web_cloud.189.cn"
)
func (y *Cloud189PC) SignatureHeader(url, method, params string) map[string]string {
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
dateOfGmt := getHttpDateStr()
sessionKey := y.tokenInfo.SessionKey
sessionSecret := y.tokenInfo.SessionSecret
if y.isFamily() {
if isFamily {
sessionKey = y.tokenInfo.FamilySessionKey
sessionSecret = y.tokenInfo.FamilySessionSecret
}
@ -72,9 +73,9 @@ func (y *Cloud189PC) SignatureHeader(url, method, params string) map[string]stri
return header
}
func (y *Cloud189PC) EncryptParams(params Params) string {
func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
sessionSecret := y.tokenInfo.SessionSecret
if y.isFamily() {
if isFamily {
sessionSecret = y.tokenInfo.FamilySessionSecret
}
if params != nil {
@ -83,17 +84,17 @@ func (y *Cloud189PC) EncryptParams(params Params) string {
return ""
}
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}) ([]byte, error) {
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
req := y.client.R().SetQueryParams(clientSuffix())
// 设置params
paramsData := y.EncryptParams(params)
paramsData := y.EncryptParams(params, isBool(isFamily...))
if paramsData != "" {
req.SetQueryParam("params", paramsData)
}
// Signature
req.SetHeaders(y.SignatureHeader(url, method, paramsData))
req.SetHeaders(y.SignatureHeader(url, method, paramsData, isBool(isFamily...)))
var erron RespErr
req.SetError(&erron)
@ -129,15 +130,15 @@ func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, para
return res.Body(), nil
}
func (y *Cloud189PC) get(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
return y.request(url, http.MethodGet, callback, nil, resp)
func (y *Cloud189PC) get(url string, callback base.ReqCallback, resp interface{}, isFamily ...bool) ([]byte, error) {
return y.request(url, http.MethodGet, callback, nil, resp, isFamily...)
}
func (y *Cloud189PC) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
return y.request(url, http.MethodPost, callback, nil, resp)
func (y *Cloud189PC) post(url string, callback base.ReqCallback, resp interface{}, isFamily ...bool) ([]byte, error) {
return y.request(url, http.MethodPost, callback, nil, resp, isFamily...)
}
func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]string, sign bool, file io.Reader) ([]byte, error) {
func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]string, sign bool, file io.Reader, isFamily bool) ([]byte, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, file)
if err != nil {
return nil, err
@ -154,7 +155,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
}
if sign {
for key, value := range y.SignatureHeader(url, http.MethodPut, "") {
for key, value := range y.SignatureHeader(url, http.MethodPut, "", isFamily) {
req.Header.Add(key, value)
}
}
@ -181,9 +182,9 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
}
return body, nil
}
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string) ([]model.Obj, error) {
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
fullUrl := API_URL
if y.isFamily() {
if isFamily {
fullUrl += "/family/file"
}
fullUrl += "/listFiles.action"
@ -201,7 +202,7 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string) ([]model.Obj,
"pageNum": fmt.Sprint(pageNum),
"pageSize": "130",
})
if y.isFamily() {
if isFamily {
r.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"orderBy": toFamilyOrderBy(y.OrderBy),
@ -214,7 +215,7 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string) ([]model.Obj,
"descending": toDesc(y.OrderDirection),
})
}
}, &resp)
}, &resp, isFamily)
if err != nil {
return nil, err
}
@ -437,7 +438,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
// 普通上传
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
lastPartSize := file.GetSize() % sliceSize
@ -454,7 +455,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
}
fullUrl := UPLOAD_URL
if y.isFamily() {
if isFamily {
params.Set("familyId", y.FamilyID)
fullUrl += "/family"
} else {
@ -466,7 +467,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
var initMultiUpload InitMultiUploadResp
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, params, &initMultiUpload)
}, params, &initMultiUpload, isFamily)
if err != nil {
return nil, err
}
@ -502,14 +503,14 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error {
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData), isFamily)
if err != nil {
return err
}
@ -538,21 +539,21 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
"sliceMd5": sliceMd5Hex,
"lazyCheck": "1",
"isLog": "0",
"opertype": "3",
}, &resp)
"opertype": IF(overwrite, "3", "1"),
}, &resp, isFamily)
if err != nil {
return nil, err
}
return resp.toFile(), nil
}
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
fileMd5 := stream.GetHash().GetHash(utils.MD5)
if len(fileMd5) < utils.MD5.Width {
return nil, errors.New("invalid hash")
}
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()), isFamily)
if err != nil {
return nil, err
}
@ -561,11 +562,11 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
return nil, errors.New("rapid upload fail")
}
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId, isFamily, overwrite)
}
// 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
@ -594,7 +595,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
silceMd5.Reset()
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
return nil, err
}
md5Byte := silceMd5.Sum(nil)
@ -609,7 +610,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
fullUrl := UPLOAD_URL
if y.isFamily() {
if isFamily {
fullUrl += "/family"
} else {
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
@ -628,13 +629,13 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
"sliceSize": fmt.Sprint(sliceSize),
"sliceMd5": sliceMd5Hex,
}
if y.isFamily() {
if isFamily {
params.Set("familyId", y.FamilyID)
}
var uploadInfo InitMultiUploadResp
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, params, &uploadInfo)
}, params, &uploadInfo, isFamily)
if err != nil {
return nil, err
}
@ -659,7 +660,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
i, uploadPart := i, uploadPart
threadG.Go(func(ctx context.Context) error {
// step.3 获取上传链接
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, uploadInfo.UploadFileID, uploadPart)
if err != nil {
return err
}
@ -671,7 +672,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily)
if err != nil {
return err
}
@ -698,8 +699,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
}, Params{
"uploadFileId": uploadInfo.UploadFileID,
"isLog": "0",
"opertype": "3",
}, &resp)
"opertype": IF(overwrite, "3", "1"),
}, &resp, isFamily)
if err != nil {
return nil, err
}
@ -708,9 +709,9 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
// 获取上传切片信息
// 对http body有大小限制分片信息太多会出错
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
fullUrl := UPLOAD_URL
if y.isFamily() {
if isFamily {
fullUrl += "/family"
} else {
fullUrl += "/person"
@ -723,7 +724,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string
}, Params{
"uploadFileId": uploadFileId,
"partInfo": strings.Join(partInfo, ","),
}, &uploadUrlsResp)
}, &uploadUrlsResp, isFamily)
if err != nil {
return nil, err
}
@ -752,7 +753,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string
}
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
@ -763,7 +764,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
}
// 创建上传会话
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
if err != nil {
return nil, err
}
@ -780,14 +781,14 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
"Expect": "100-continue",
}
if y.isFamily() {
if isFamily {
header["FamilyId"] = fmt.Sprint(y.FamilyID)
header["UploadFileId"] = fmt.Sprint(status.UploadFileId)
} else {
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
}
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile), isFamily)
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return nil, err
}
@ -802,10 +803,10 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
"uploadFileId": fmt.Sprint(status.UploadFileId),
"resumePolicy": "1",
})
if y.isFamily() {
if isFamily {
req.SetQueryParam("familyId", fmt.Sprint(y.FamilyID))
}
}, &status)
}, &status, isFamily)
if err != nil {
return nil, err
}
@ -815,20 +816,20 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
}
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId, isFamily, overwrite)
}
// 创建上传会话
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string, isFamily bool) (*CreateUploadFileResp, error) {
var uploadInfo CreateUploadFileResp
fullUrl := API_URL + "/createUploadFile.action"
if y.isFamily() {
if isFamily {
fullUrl = API_URL + "/family/file/createFamilyFile.action"
}
_, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
if isFamily {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"parentId": parentID,
@ -849,7 +850,7 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
"isLog": "0",
})
}
}, &uploadInfo)
}, &uploadInfo, isFamily)
if err != nil {
return nil, err
@ -858,11 +859,11 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
}
// 提交上传文件
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64, isFamily bool, overwrite bool) (model.Obj, error) {
var resp OldCommitUploadFileResp
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
if isFamily {
req.SetHeaders(map[string]string{
"ResumePolicy": "1",
"UploadFileId": fmt.Sprint(uploadFileID),
@ -870,13 +871,13 @@ func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string,
})
} else {
req.SetFormData(map[string]string{
"opertype": "3",
"opertype": IF(overwrite, "3", "1"),
"resumePolicy": "1",
"uploadFileId": fmt.Sprint(uploadFileID),
"isLog": "0",
})
}
}, &resp)
}, &resp, isFamily)
if err != nil {
return nil, err
}
@ -895,10 +896,100 @@ func (y *Cloud189PC) isLogin() bool {
return err == nil
}
// 创建家庭云中转文件夹
func (y *Cloud189PC) createFamilyTransferFolder(count int) (*ring.Ring, error) {
folders := ring.New(count)
var rootFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"folderName": "FamilyTransferFolder",
"familyId": y.FamilyID,
})
}, &rootFolder, true)
if err != nil {
return nil, err
}
folderCount := 0
// 获取已有目录
files, err := y.getFiles(context.TODO(), rootFolder.GetID(), true)
if err != nil {
return nil, err
}
for _, file := range files {
if folder, ok := file.(*Cloud189Folder); ok {
folders.Value = folder
folders = folders.Next()
folderCount++
}
}
// 创建新的目录
for folderCount < count {
var newFolder Cloud189Folder
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"folderName": uuid.NewString(),
"familyId": y.FamilyID,
"parentId": rootFolder.GetID(),
})
}, &newFolder, true)
if err != nil {
return nil, err
}
folders.Value = &newFolder
folders = folders.Next()
folderCount++
}
return folders, nil
}
// 清理中转文件夹
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
var tasks []BatchTaskInfo
r := y.familyTransferFolder
for p := r.Next(); p != r; p = p.Next() {
folder := p.Value.(*Cloud189Folder)
files, err := y.getFiles(ctx, folder.GetID(), true)
if err != nil {
return err
}
for _, file := range files {
tasks = append(tasks, BatchTaskInfo{
FileId: file.GetID(),
FileName: file.GetName(),
IsFolder: BoolToNumber(file.IsDir()),
})
}
}
if len(tasks) > 0 {
// 删除
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
if err != nil {
return err
}
// 永久删除
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
if err != nil {
return err
}
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
return err
}
return nil
}
// 获取家庭云所有用户信息
func (y *Cloud189PC) getFamilyInfoList() ([]FamilyInfoResp, error) {
var resp FamilyInfoListResp
_, err := y.get(API_URL+"/family/manage/getFamilyList.action", nil, &resp)
_, err := y.get(API_URL+"/family/manage/getFamilyList.action", nil, &resp, true)
if err != nil {
return nil, err
}
@ -922,6 +1013,73 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
return fmt.Sprint(infos[0].FamilyID), nil
}
// 保存家庭云中的文件到个人云
func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId string, srcObj, dstDir model.Obj, overwrite bool) error {
// _, err := y.post(API_URL+"/family/file/saveFileToMember.action", func(req *resty.Request) {
// req.SetQueryParams(map[string]string{
// "channelId": "home",
// "familyId": familyId,
// "destParentId": destParentId,
// "fileIdList": familyFileId,
// })
// }, nil)
// return err
task := BatchTaskInfo{
FileId: srcObj.GetID(),
FileName: srcObj.GetName(),
IsFolder: BoolToNumber(srcObj.IsDir()),
}
resp, err := y.CreateBatchTask("COPY", familyId, dstDir.GetID(), map[string]string{
"groupId": "null",
"copyType": "2",
"shareId": "null",
}, task)
if err != nil {
return err
}
for {
state, err := y.CheckBatchTask("COPY", resp.TaskID)
if err != nil {
return err
}
switch state.TaskStatus {
case 2:
task.DealWay = IF(overwrite, 3, 2)
// 冲突时覆盖文件
if err := y.ManageBatchTask("COPY", resp.TaskID, dstDir.GetID(), task); err != nil {
return err
}
case 4:
return nil
}
time.Sleep(time.Millisecond * 400)
}
}
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetFormData(map[string]string{
"type": aType,
"taskInfos": MustString(utils.Json.MarshalToString(taskInfos)),
})
if targetFolderId != "" {
req.SetFormData(map[string]string{"targetFolderId": targetFolderId})
}
if familyID != "" {
req.SetFormData(map[string]string{"familyId": familyID})
}
req.SetFormData(other)
}, &resp, familyID != "")
if err != nil {
return nil, err
}
return &resp, nil
}
// 检测任务状态
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
var resp BatchTaskStateResp
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
@ -936,6 +1094,37 @@ func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStat
return &resp, nil
}
// 获取冲突的任务信息
func (y *Cloud189PC) GetConflictTaskInfo(aType string, taskID string) (*BatchTaskConflictTaskInfoResp, error) {
var resp BatchTaskConflictTaskInfoResp
_, err := y.post(API_URL+"/batch/getConflictTaskInfo.action", func(req *resty.Request) {
req.SetFormData(map[string]string{
"type": aType,
"taskId": taskID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
// 处理冲突
func (y *Cloud189PC) ManageBatchTask(aType string, taskID string, targetFolderId string, taskInfos ...BatchTaskInfo) error {
_, err := y.post(API_URL+"/batch/manageBatchTask.action", func(req *resty.Request) {
req.SetFormData(map[string]string{
"targetFolderId": targetFolderId,
"type": aType,
"taskId": taskID,
"taskInfos": MustString(utils.Json.MarshalToString(taskInfos)),
})
}, nil)
return err
}
var ErrIsConflict = errors.New("there is a conflict with the target object")
// 等待任务完成
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
for {
state, err := y.CheckBatchTask(aType, taskID)
@ -944,7 +1133,7 @@ func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration)
}
switch state.TaskStatus {
case 2:
return errors.New("there is a conflict with the target object")
return ErrIsConflict
case 4:
return nil
}

View File

@ -194,7 +194,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
}
if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
io.CopyN(buf, file, 1024)
utils.CopyWithBufferN(buf, file, 1024)
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {

View File

@ -136,7 +136,7 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
if err != nil {
return "", err
}
_, err = io.CopyN(buf, reader, length)
_, err = utils.CopyWithBufferN(buf, reader, length)
if err != nil {
return "", err
}
@ -164,7 +164,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
createData["part_info_list"] = makePartInfos(count)
// rapid upload
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
rapidUpload := !stream.IsForceStreamUpload() && stream.GetSize() > 100*utils.KB && d.RapidUpload
if rapidUpload {
log.Debugf("[aliyundrive_open] start cal pre_hash")
// read 1024 bytes to calculate pre hash
@ -242,13 +242,16 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if rapidUpload {
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
rd = utils.NewMultiReadable(srd)
}
err = retry.Do(func() error {
//rd.Reset()
rd.Reset()
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
},
retry.Attempts(3),

View File

@ -165,9 +165,16 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
newFile.Ctime = stream.CreateTime().Unix()
newFile.Mtime = stream.ModTime().Unix()
return fileToObj(newFile), nil
}
// Put
//
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
@ -204,7 +211,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
if i == count {
byteSize = lastBlockSize
}
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
if err != nil && err != io.EOF {
return nil, err
}
@ -245,9 +252,9 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
//rapid upload, since got md5 match from baidu server
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
precreateResp.File.Ctime = ctime
precreateResp.File.Mtime = mtime
return fileToObj(precreateResp.File), nil
}
}
@ -298,6 +305,9 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
if err != nil {
return nil, err
}
// 修复时间,具体原因见 Put 方法注释的 **注意**
newFile.Ctime = ctime
newFile.Mtime = mtime
return fileToObj(newFile), nil
}

View File

@ -8,15 +8,16 @@ import (
type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"`
driver.RootPath
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
CustomUploadPartSize int64 `json:"custom_upload_part_size" default:"0" help:"0 for auto"`
}
var config = driver.Config{

View File

@ -249,6 +249,9 @@ const (
)
func (d *BaiduNetdisk) getSliceSize() int64 {
if d.CustomUploadPartSize != 0 {
return d.CustomUploadPartSize
}
switch d.vipType {
case 1:
return VipSliceSize

View File

@ -261,7 +261,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if i == count {
byteSize = lastBlockSize
}
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
if err != nil && err != io.EOF {
return nil, err
}

View File

@ -229,7 +229,7 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
if err != nil {
return err
}
_, err = io.Copy(filePart, stream)
_, err = utils.CopyWithBuffer(filePart, stream)
if err != nil {
return err
}

View File

@ -71,6 +71,9 @@ func (d *Cloudreve) Link(ctx context.Context, file model.Obj, args model.LinkArg
if err != nil {
return nil, err
}
if strings.HasPrefix(dUrl, "/api") {
dUrl = d.Address + dUrl
}
return &model.Link{
URL: dUrl,
}, nil

View File

@ -3,7 +3,6 @@ package crypt
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
stdpath "path"
"regexp"
@ -14,6 +13,7 @@ import (
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
@ -160,7 +160,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
// discarding hash as it's encrypted
}
if d.Thumbnail && thumb == "" {
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
thumb = utils.EncodePath(common.GetApiUrl(nil)+stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
}
if !ok && !d.Thumbnail {
result = append(result, &objRes)
@ -389,10 +389,11 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
Modified: streamer.ModTime(),
IsFolder: streamer.IsDir(),
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
Exist: streamer.GetExist(),
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
ForceStreamUpload: true,
Exist: streamer.GetExist(),
}
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
if err != nil {

View File

@ -271,7 +271,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
defer func() {
_ = tempFile.Close()
}()
if _, err = io.Copy(h, tempFile); err != nil {
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
return nil, err
}
_, err = tempFile.Seek(0, io.SeekStart)

View File

@ -206,7 +206,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
return err
}
h := md5.New()
_, err = io.Copy(h, tempFile)
_, err = utils.CopyWithBuffer(h, tempFile)
if err != nil {
return err
}

View File

@ -118,6 +118,7 @@ func (d *Onedrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName str
"folder": base.Json{},
"@microsoft.graph.conflictBehavior": "rename",
}
// todo 修复文件夹 ctime/mtime, onedrive 可在 data 里设置 fileSystemInfo 字段, 但是此接口未提供 ctime/mtime
_, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, nil)

View File

@ -24,12 +24,12 @@ type RespErr struct {
}
type File struct {
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
LastModifiedDateTime time.Time `json:"lastModifiedDateTime"`
Url string `json:"@microsoft.graph.downloadUrl"`
File *struct {
Id string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"`
Url string `json:"@microsoft.graph.downloadUrl"`
File *struct {
MimeType string `json:"mimeType"`
} `json:"file"`
Thumbnails []struct {
@ -58,7 +58,7 @@ func fileToObj(f File, parentID string) *Object {
ID: f.Id,
Name: f.Name,
Size: f.Size,
Modified: f.LastModifiedDateTime,
Modified: f.FileSystemInfo.LastModifiedDateTime,
IsFolder: f.File == nil,
},
Thumbnail: model.Thumbnail{Thumbnail: thumb},
@ -72,3 +72,20 @@ type Files struct {
Value []File `json:"value"`
NextLink string `json:"@odata.nextLink"`
}
// Metadata represents a request to update Metadata.
// It includes only the writeable properties.
// omitempty is intentionally included for all, per https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update?view=odsp-graph-online#request-body
type Metadata struct {
Description string `json:"description,omitempty"` // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal. Undocumented limit of 1024 characters.
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
}
// FileSystemInfoFacet contains properties that are reported by the
// device's local file system for the local version of an item. This
// facet can be used to specify the last modified date or created date
// of the item as it was on the local device.
type FileSystemInfoFacet struct {
CreatedDateTime time.Time `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
LastModifiedDateTime time.Time `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
}

View File

@ -127,7 +127,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
func (d *Onedrive) getFiles(path string) ([]File, error) {
var res []File
nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,lastModifiedDateTime,content.downloadUrl,file,parentReference"
nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference"
for nextLink != "" {
var files Files
_, err := d.Request(nextLink, http.MethodGet, nil, &files)
@ -148,7 +148,10 @@ func (d *Onedrive) GetFile(path string) (*File, error) {
}
func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) error {
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/content"
filepath := stdpath.Join(dstDir.GetPath(), stream.GetName())
// 1. upload new file
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content?view=odsp-graph-online
url := d.GetMetaUrl(false, filepath) + "/content"
data, err := io.ReadAll(stream)
if err != nil {
return err
@ -156,12 +159,50 @@ func (d *Onedrive) upSmall(ctx context.Context, dstDir model.Obj, stream model.F
_, err = d.Request(url, http.MethodPut, func(req *resty.Request) {
req.SetBody(data).SetContext(ctx)
}, nil)
if err != nil {
return fmt.Errorf("onedrive: Failed to upload new file(path=%v): %w", filepath, err)
}
// 2. update metadata
err = d.updateMetadata(ctx, stream, filepath)
if err != nil {
return fmt.Errorf("onedrive: Failed to update file(path=%v) metadata: %w", filepath, err)
}
return nil
}
func (d *Onedrive) updateMetadata(ctx context.Context, stream model.FileStreamer, filepath string) error {
url := d.GetMetaUrl(false, filepath)
metadata := toAPIMetadata(stream)
// ApiDoc: https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update?view=odsp-graph-online
_, err := d.Request(url, http.MethodPatch, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
return err
}
func toAPIMetadata(stream model.FileStreamer) Metadata {
metadata := Metadata{
FileSystemInfo: &FileSystemInfoFacet{},
}
if !stream.ModTime().IsZero() {
metadata.FileSystemInfo.LastModifiedDateTime = stream.ModTime()
}
if !stream.CreateTime().IsZero() {
metadata.FileSystemInfo.CreatedDateTime = stream.CreateTime()
}
if stream.CreateTime().IsZero() && !stream.ModTime().IsZero() {
metadata.FileSystemInfo.CreatedDateTime = stream.CreateTime()
}
return metadata
}
func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
url := d.GetMetaUrl(false, stdpath.Join(dstDir.GetPath(), stream.GetName())) + "/createUploadSession"
res, err := d.Request(url, http.MethodPost, nil, nil)
metadata := map[string]interface{}{"item": toAPIMetadata(stream)}
res, err := d.Request(url, http.MethodPost, func(req *resty.Request) {
req.SetBody(metadata).SetContext(ctx)
}, nil)
if err != nil {
return err
}

View File

@ -4,6 +4,7 @@ import (
"crypto/sha1"
"encoding/hex"
"errors"
"github.com/alist-org/alist/v3/pkg/utils"
"io"
"net/http"
@ -141,7 +142,7 @@ func getGcid(r io.Reader, size int64) (string, error) {
readSize := calcBlockSize(size)
for {
hash2.Reset()
if n, err := io.CopyN(hash2, r, readSize); err != nil && n == 0 {
if n, err := utils.CopyWithBufferN(hash2, r, readSize); err != nil && n == 0 {
if err != io.EOF {
return "", err
}

View File

@ -143,7 +143,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
_ = tempFile.Close()
}()
m := md5.New()
_, err = io.Copy(m, tempFile)
_, err = utils.CopyWithBuffer(m, tempFile)
if err != nil {
return err
}
@ -153,7 +153,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
}
md5Str := hex.EncodeToString(m.Sum(nil))
s := sha1.New()
_, err = io.Copy(s, tempFile)
_, err = utils.CopyWithBuffer(s, tempFile)
if err != nil {
return err
}

View File

@ -17,6 +17,7 @@ type TmpTokenResponse struct {
}
type TmpTokenResponseData struct {
Credentials Credentials `json:"Credentials"`
ExpiredAt int `json:"ExpiredAt"`
}
type Credentials struct {
AccessKeyId string `json:"accessKeyId,omitempty"`

View File

@ -11,6 +11,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
@ -28,6 +29,7 @@ type S3 struct {
linkClient *s3.S3
config driver.Config
cron *cron.Cron
}
func (d *S3) Config() driver.Config {
@ -42,6 +44,18 @@ func (d *S3) Init(ctx context.Context) error {
if d.Region == "" {
d.Region = "alist"
}
if d.config.Name == "Doge" {
// 多吉云每次临时生成的秘钥有效期为 2h所以这里设置为 118 分钟重新生成一次
d.cron = cron.NewCron(time.Minute * 118)
d.cron.Do(func() {
err := d.initSession()
if err != nil {
log.Errorln("Doge init session error:", err)
}
d.client = d.getClient(false)
d.linkClient = d.getClient(true)
})
}
err := d.initSession()
if err != nil {
return err
@ -52,6 +66,9 @@ func (d *S3) Init(ctx context.Context) error {
}
func (d *S3) Drop(ctx context.Context) error {
if d.cron != nil {
d.cron.Stop()
}
return nil
}

View File

@ -9,8 +9,9 @@ type Addition struct {
driver.RootPath
Address string `json:"address" required:"true"`
UserName string `json:"username" required:"true"`
Password string `json:"password" required:"true"`
UserName string `json:"username" required:"false"`
Password string `json:"password" required:"false"`
Token string `json:"token" required:"false"`
RepoId string `json:"repoId" required:"false"`
RepoPwd string `json:"repoPwd" required:"false"`
}

View File

@ -14,6 +14,10 @@ import (
)
func (d *Seafile) getToken() error {
if d.Token != "" {
d.authorization = fmt.Sprintf("Token %s", d.Token)
return nil
}
var authResp AuthTokenResp
res, err := base.RestyClient.R().
SetResult(&authResp).

View File

@ -1,7 +1,7 @@
package smb
import (
"io"
"github.com/alist-org/alist/v3/pkg/utils"
"io/fs"
"net"
"os"
@ -74,7 +74,7 @@ func (d *SMB) CopyFile(src, dst string) error {
}
defer dstfd.Close()
if _, err = io.Copy(dstfd, srcfd); err != nil {
if _, err = utils.CopyWithBuffer(dstfd, srcfd); err != nil {
return err
}
if srcinfo, err = d.fs.Stat(src); err != nil {

View File

@ -190,7 +190,7 @@ func getGcid(r io.Reader, size int64) (string, error) {
readSize := calcBlockSize(size)
for {
hash2.Reset()
if n, err := io.CopyN(hash2, r, readSize); err != nil && n == 0 {
if n, err := utils.CopyWithBufferN(hash2, r, readSize); err != nil && n == 0 {
if err != io.EOF {
return "", err
}

View File

@ -1,6 +1,7 @@
package authn
import (
"fmt"
"net/http"
"net/url"
@ -19,7 +20,7 @@ func NewAuthnInstance(r *http.Request) (*webauthn.WebAuthn, error) {
RPDisplayName: setting.GetStr(conf.SiteTitle),
RPID: siteUrl.Hostname(),
//RPOrigin: siteUrl.String(),
RPOrigins: []string{siteUrl.String()},
RPOrigins: []string{fmt.Sprintf("%s://%s", siteUrl.Scheme, siteUrl.Host)},
// RPOrigin: "http://localhost:5173"
})
}

View File

@ -21,8 +21,8 @@ func LoadStorages() {
if err != nil {
utils.Log.Errorf("failed get enabled storages: %+v", err)
} else {
utils.Log.Infof("success load storage: [%s], driver: [%s]",
storages[i].MountPath, storages[i].Driver)
utils.Log.Infof("success load storage: [%s], driver: [%s], order: [%d]",
storages[i].MountPath, storages[i].Driver, storages[i].Order)
}
}
conf.StoragesLoaded = true

View File

@ -77,7 +77,7 @@ type Config struct {
JwtSecret string `json:"jwt_secret" env:"JWT_SECRET"`
TokenExpiresIn int `json:"token_expires_in" env:"TOKEN_EXPIRES_IN"`
Database Database `json:"database" envPrefix:"DB_"`
Meilisearch Meilisearch `json:"meilisearch" env:"MEILISEARCH"`
Meilisearch Meilisearch `json:"meilisearch" envPrefix:"MEILISEARCH_"`
Scheme Scheme `json:"scheme"`
TempDir string `json:"temp_dir" env:"TEMP_DIR"`
BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"`

View File

@ -2,6 +2,7 @@ package db
import (
"fmt"
"sort"
"github.com/alist-org/alist/v3/internal/model"
"github.com/pkg/errors"
@ -65,5 +66,8 @@ func GetEnabledStorages() ([]model.Storage, error) {
if err := db.Where(fmt.Sprintf("%s = ?", columnName("disabled")), false).Find(&storages).Error; err != nil {
return nil, errors.WithStack(err)
}
sort.Slice(storages, func(i, j int) bool {
return storages[i].Order < storages[j].Order
})
return storages, nil
}

View File

@ -41,6 +41,7 @@ type FileStreamer interface {
GetMimetype() string
//SetReader(io.Reader)
NeedStore() bool
IsForceStreamUpload() bool
GetExist() Obj
SetExist(Obj)
//for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"io"
"math"
"net/http"
@ -271,7 +272,7 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
}
}
n, err := io.Copy(ch.buf, resp.Body)
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
if err != nil {
return n, &errReadingBody{err: err}

View File

@ -162,7 +162,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
pw.CloseWithError(err)
return
}
if _, err := io.CopyN(part, reader, ra.Length); err != nil {
if _, err := utils.CopyWithBufferN(part, reader, ra.Length); err != nil {
pw.CloseWithError(err)
return
}
@ -182,7 +182,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
w.WriteHeader(code)
if r.Method != "HEAD" {
written, err := io.CopyN(w, sendContent, sendSize)
written, err := utils.CopyWithBufferN(w, sendContent, sendSize)
if err != nil {
log.Warnf("ServeHttp error. err: %s ", err)
if written != sendSize {

View File

@ -2,6 +2,7 @@ package net
import (
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"io"
"math"
"mime/multipart"
@ -327,10 +328,10 @@ func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.Rea
length_int = int(length)
if offset > 100*1024*1024 {
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwith is expected")
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwidth is expected")
}
if _, err := io.Copy(io.Discard, io.LimitReader(readCloser, offset)); err != nil {
if _, err := utils.CopyWithBuffer(io.Discard, io.LimitReader(readCloser, offset)); err != nil {
return nil, err
}

View File

@ -18,9 +18,10 @@ type FileStream struct {
Ctx context.Context
model.Obj
io.Reader
Mimetype string
WebPutAsTask bool
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
Mimetype string
WebPutAsTask bool
ForceStreamUpload bool
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
utils.Closers
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
peekBuff *bytes.Reader
@ -43,6 +44,11 @@ func (f *FileStream) GetMimetype() string {
func (f *FileStream) NeedStore() bool {
return f.WebPutAsTask
}
func (f *FileStream) IsForceStreamUpload() bool {
return f.ForceStreamUpload
}
func (f *FileStream) Close() error {
var err1, err2 error
err1 = f.Closers.Close()
@ -98,7 +104,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
bufSize := utils.Min(httpRange.Length, f.GetSize())
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
n, err := io.CopyN(newBuf, f.Reader, bufSize)
n, err := utils.CopyWithBufferN(newBuf, f.Reader, bufSize)
if err != nil {
return nil, err
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"io"
"net/http"
"net/url"
@ -419,7 +420,7 @@ func (c *Client) ReadStreamRange(path string, offset, length int64) (io.ReadClos
// stream in rs.Body
if rs.StatusCode == 200 {
// discard first 'offset' bytes.
if _, err := io.Copy(io.Discard, io.LimitReader(rs.Body, offset)); err != nil {
if _, err := utils.CopyWithBuffer(io.Discard, io.LimitReader(rs.Body, offset)); err != nil {
return nil, newPathErrorErr("ReadStreamRange", path, err)
}

View File

@ -32,7 +32,7 @@ func CopyFile(src, dst string) error {
}
defer dstfd.Close()
if _, err = io.Copy(dstfd, srcfd); err != nil {
if _, err = CopyWithBuffer(dstfd, srcfd); err != nil {
return err
}
if srcinfo, err = os.Stat(src); err != nil {
@ -121,7 +121,7 @@ func CreateTempFile(r io.Reader, size int64) (*os.File, error) {
if err != nil {
return nil, err
}
readBytes, err := io.Copy(f, r)
readBytes, err := CopyWithBuffer(f, r)
if err != nil {
_ = os.Remove(f.Name())
return nil, errs.NewErr(err, "CreateTempFile failed")

View File

@ -96,7 +96,7 @@ func HashData(hashType *HashType, data []byte, params ...any) string {
// HashReader get hash of one hashType from a reader
func HashReader(hashType *HashType, reader io.Reader, params ...any) (string, error) {
h := hashType.NewFunc(params...)
_, err := io.Copy(h, reader)
_, err := CopyWithBuffer(h, reader)
if err != nil {
return "", errs.NewErr(err, "HashReader error")
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"io"
"testing"
)
@ -36,7 +35,7 @@ var hashTestSet = []hashTest{
func TestMultiHasher(t *testing.T) {
for _, test := range hashTestSet {
mh := NewMultiHasher([]*HashType{MD5, SHA1, SHA256})
n, err := io.Copy(mh, bytes.NewBuffer(test.input))
n, err := CopyWithBuffer(mh, bytes.NewBuffer(test.input))
require.NoError(t, err)
assert.Len(t, test.input, int(n))
hashInfo := mh.GetHashInfo()

View File

@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"sync"
"time"
"golang.org/x/exp/constraints"
@ -29,7 +30,7 @@ func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, p
// possible in the call process.
var finish int64 = 0
s := size / 100
_, err := io.Copy(out, readerFunc(func(p []byte) (int, error) {
_, err := CopyWithBuffer(out, readerFunc(func(p []byte) (int, error) {
// golang non-blocking channel: https://gobyexample.com/non-blocking-channel-operations
select {
// if context has been canceled
@ -204,3 +205,31 @@ func Max[T constraints.Ordered](a, b T) T {
}
return a
}
var IoBuffPool = &sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024*2) // Two times of size in io package
},
}
func CopyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
buff := IoBuffPool.Get().([]byte)
defer IoBuffPool.Put(buff)
written, err = io.CopyBuffer(dst, src, buff)
if err != nil {
return
}
return written, nil
}
func CopyWithBufferN(dst io.Writer, src io.Reader, n int64) (written int64, err error) {
written, err = CopyWithBuffer(dst, io.LimitReader(src, n))
if written == n {
return n, nil
}
if written < n && err == nil {
// src stopped early; must have been EOF.
err = io.EOF
}
return
}

View File

@ -37,3 +37,28 @@ func NewDebounce2(interval time.Duration, f func()) func() {
(*time.Timer)(timer).Reset(interval)
}
}
func NewThrottle(interval time.Duration) func(func()) {
var lastCall time.Time
return func(fn func()) {
now := time.Now()
if now.Sub(lastCall) < interval {
return
}
time.AfterFunc(interval, fn)
lastCall = now
}
}
func NewThrottle2(interval time.Duration, fn func()) func() {
var lastCall time.Time
return func() {
now := time.Now()
if now.Sub(lastCall) < interval {
return
}
time.AfterFunc(interval, fn)
lastCall = now
}
}

View File

@ -398,7 +398,7 @@ func SSOLoginCallback(c *gin.Context) {
}
userID := utils.Json.Get(resp.Body(), idField).ToString()
if utils.SliceContains([]string{"", "0"}, userID) {
common.ErrorResp(c, errors.New("error occured"), 400)
common.ErrorResp(c, errors.New("error occurred"), 400)
return
}
if argument == "get_sso_id" {

View File

@ -34,12 +34,6 @@ func S3(g *gin.RouterGroup) {
}
func S3Server(g *gin.RouterGroup) {
if !conf.Conf.S3.Enable {
g.Any("/*path", func(c *gin.Context) {
common.ErrorStrResp(c, "S3 server is not enabled", 403)
})
return
}
h, _ := s3.NewServer(context.Background())
g.Any("/*path", gin.WrapH(h))
}

View File

@ -8,16 +8,21 @@ import (
)
func (h *Handler) getModTime(r *http.Request) time.Time {
return h.getHeaderTime(r, "X-OC-Mtime")
return h.getHeaderTime(r, "X-OC-Mtime", "")
}
// owncloud/ nextcloud haven't impl this, but we can add the support since rclone may support this soon
// owncloud/ nextcloud haven't impl this, but we can add the support since rclone may support this soon.
// try ModTime if CreateTime not found in header
func (h *Handler) getCreateTime(r *http.Request) time.Time {
return h.getHeaderTime(r, "X-OC-Ctime")
return h.getHeaderTime(r, "X-OC-Ctime", "X-OC-Mtime")
}
func (h *Handler) getHeaderTime(r *http.Request, header string) time.Time {
func (h *Handler) getHeaderTime(r *http.Request, header, alternative string) time.Time {
hVal := r.Header.Get(header)
// try alternative
if hVal == "" && alternative != "" {
hVal = r.Header.Get(alternative)
}
if hVal != "" {
modTimeUnix, err := strconv.ParseInt(hVal, 10, 64)
if err == nil {

View File

@ -331,21 +331,21 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int,
Modified: h.getModTime(r),
Ctime: h.getCreateTime(r),
}
stream := &stream.FileStream{
fsStream := &stream.FileStream{
Obj: &obj,
Reader: r.Body,
Mimetype: r.Header.Get("Content-Type"),
}
if stream.Mimetype == "" {
stream.Mimetype = utils.GetMimeType(reqPath)
if fsStream.Mimetype == "" {
fsStream.Mimetype = utils.GetMimeType(reqPath)
}
err = fs.PutDirectly(ctx, path.Dir(reqPath), stream)
err = fs.PutDirectly(ctx, path.Dir(reqPath), fsStream)
if errs.IsNotFoundError(err) {
return http.StatusNotFound, err
}
_ = r.Body.Close()
_ = stream.Close()
_ = fsStream.Close()
// TODO(rost): Returning 405 Method Not Allowed might not be appropriate.
if err != nil {
return http.StatusMethodNotAllowed, err