Compare commits

...

23 Commits

Author SHA1 Message Date
28d2367a87 fix(ci): no space left on device 2024-11-17 22:24:06 +08:00
a4ad98ee3e fix(pikpak): domain block and change to NET (#7350) 2024-11-17 20:03:04 +08:00
1c01dc6839 fix(storage): delete storage fails if a panic occurred during initialization (#7501)
* fix(storage): store storages map when init storage panic

* fix(drivers): add nil check to drop method
2024-11-16 13:20:49 +08:00
c3c5843dce fix(terabox): panic due to slice out of range (#7499 close #7487) 2024-11-16 13:19:59 +08:00
6c38c5972d fix(terabox): big file upload issue (#7498 close #7490) 2024-11-16 13:18:49 +08:00
0a46979c51 feat(115): enhance cache (#7479) 2024-11-08 22:08:50 +08:00
67c93eed2b feat(baidu_netdisk,baidu_photo): add and fix hashinfo (#7469) 2024-11-08 22:08:25 +08:00
f58de9923a refactor(aliyunopen,config): Modify default properties (#7476) 2024-11-08 22:07:35 +08:00
2671c876f1 revert: "fix(115): enforce 20GB file size limit on uploadev"
This reverts commit 216e3909f3.
2024-11-02 21:08:19 +08:00
e707fa38f1 ci: remove specific tag for freebsd action 2024-11-02 17:05:00 +08:00
b803b0070e fix(115): 20GB file upload restriction (#7452)
* fix(115): multipart upload error

* feat(115): Modify default page size

* fix(115): Replace temporary repair scheme
2024-11-02 16:41:33 +08:00
64ceb5afb6 feat: support general users view and cancel own tasks (#7416 close #7398)
* feat: support general users view and cancel own tasks

Add a creator attribute to the upload, copy and offline download
tasks, so that a GENERAL task creator can view and cancel them.

BREAKING CHANGE:

1. A new internal package `task` including the struct `TaskWithCreator`
   which embeds `tache.Base` is created, and the past dependence on
   `tache.Task` will all be transferred to dependence on this package.
2. The API `/admin/task` can now also be accessed via `/task`, and the
   old endpoint is retained to ensure compatibility with legacy
   automation scripts.

Closes #7398

* fix(deps): update github.com/xhofe/tache to v0.1.3
2024-11-01 23:32:26 +08:00
10c7ebb1c0 fix(local): cross-device file move (#7430) 2024-11-01 23:31:33 +08:00
d0cda62703 ci: add freebsd release build (#7344) 2024-11-01 21:37:53 +08:00
ce0b99a510 fix(cloudreve): path not exist when moving/copying files (#7432)
Co-authored-by: 马建军 <1432318228@qq.com>
2024-11-01 21:12:29 +08:00
Mmx
34a148c83d feat(local): thumbnail token bucket smooth migration (#7425)
* feat(local): allow to migrate static token buckets

* improve(local): token bucket migration boundary handling
2024-11-01 20:58:53 +08:00
Mmx
4955d8cec8 ci(docker): support riscv64 and ppc64le (#7426)
* ci(docker): bump cache key of musl library

* build(docker): add new arches to build script

* ci(docker): add new arches to buildx platforms
2024-11-01 20:53:53 +08:00
216e3909f3 fix(115): enforce 20GB file size limit on uploadev (#7447 close #7413)
- Introduce a file size restriction to handle uploads more securely.
- Provide an informative error for uploads that exceed the new limit.
2024-11-01 20:52:19 +08:00
a701432b8b ci: add freebsd to beta release 2024-10-21 00:05:56 +08:00
a2dc45a80b fix(ilanzou): fix upload failure for small files (#7368 close #7250) 2024-10-20 23:53:56 +08:00
48ac23c8de fix(ilanzou): fix infinite loop when getting file list (#7366 close #7357) 2024-10-20 23:53:40 +08:00
2830575490 perf(123pan): change domain of login (#7325)
* Update driver.go

* 1

* Update util.go

* 123新登录接口

* Revert "Update util.go"

This reverts commit a13a58f8a86c7c36d4fd7d91137229a7667f1fb5.

* Update driver.go

* Update util.go

* Update util.go
2024-10-15 19:45:30 +08:00
e8538bd215 feat: add febbox driver (#7304 close #7293) 2024-10-14 22:44:20 +08:00
54 changed files with 1470 additions and 368 deletions

View File

@ -8,6 +8,9 @@ concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
permissions:
contents: write
jobs:
changelog:
strategy:
@ -54,7 +57,7 @@ jobs:
strategy:
matrix:
include:
- target: '!(*musl*|*windows-arm64*|*android*)' # xgo
- target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo
hash: "md5"
- target: 'linux-!(arm*)-musl*' #musl-not-arm
hash: "md5-linux-musl"
@ -64,6 +67,9 @@ jobs:
hash: "md5-windows-arm64"
- target: 'android-*' #android
hash: "md5-android"
- target: 'freebsd-*' #freebsd
hash: "md5-freebsd"
name: Beta Release
runs-on: ubuntu-latest
steps:

View File

@ -53,7 +53,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/musl-libs
key: docker-musl-libs
key: docker-musl-libs-v2
- name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true'
@ -84,7 +84,7 @@ jobs:
push: ${{ github.event_name == 'push' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
- name: Build and push with ffmpeg
id: docker_build_ffmpeg
@ -96,7 +96,7 @@ jobs:
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
build-args: INSTALL_FFMPEG=true
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
build_docker_with_aria2:
needs: build_docker

View File

@ -13,6 +13,23 @@ jobs:
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Prerelease
uses: irongut/EditRelease@v1.2.0
with:
@ -32,7 +49,6 @@ jobs:
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
docker pull crazymax/xgo:latest
go install github.com/crazy-max/xgo@latest
sudo apt install upx

View File

@ -22,7 +22,7 @@ jobs:
uses: actions/cache@v4
with:
path: build/musl-libs
key: docker-musl-libs
key: docker-musl-libs-v2
- name: Download Musl Library
if: steps.cache-musl.outputs.cache-hit != 'true'
@ -58,7 +58,7 @@ jobs:
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
- name: Docker meta with ffmpeg
id: meta-ffmpeg
@ -79,7 +79,7 @@ jobs:
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
build-args: INSTALL_FFMPEG=true
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
release_docker_with_aria2:
needs: release_docker

34
.github/workflows/release_freebsd.yml vendored Normal file
View File

@ -0,0 +1,34 @@
name: release_freebsd
on:
release:
types: [ published ]
jobs:
release_freebsd:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.21' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release freebsd
- name: Upload assets
uses: softprops/action-gh-release@v2
with:
files: build/compress/*

View File

@ -93,7 +93,7 @@ BuildDocker() {
PrepareBuildDockerMusl() {
mkdir -p build/musl-libs
BASE="https://musl.cc/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross)
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
lib_tgz="build/${i}.tgz"
@ -112,8 +112,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc)
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -233,6 +233,29 @@ BuildReleaseAndroid() {
done
}
BuildReleaseFreeBSD() {
rm -rf .git/
mkdir -p "build/freebsd"
OS_ARCHES=(amd64 arm64 i386)
GO_ARCHES=(amd64 arm64 386)
CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1)
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
echo building for freebsd-${os_arch}
sudo mkdir -p "/opt/freebsd/${os_arch}"
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
rm base.txz
export GOOS=freebsd
export GOARCH=${GO_ARCHES[$i]}
export CC=${cgo_cc}
export CGO_ENABLED=1
export CGO_LDFLAGS="-fuse-ld=lld"
go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter .
done
}
MakeRelease() {
cd build
mkdir compress
@ -251,6 +274,11 @@ MakeRelease() {
tar -czvf compress/"$i".tar.gz alist
rm -f alist
done
for i in $(find . -type f -name "$appName-freebsd-*"); do
cp "$i" alist
tar -czvf compress/"$i".tar.gz alist
rm -f alist
done
for i in $(find . -type f -name "$appName-windows-*"); do
cp "$i" alist.exe
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
@ -288,6 +316,9 @@ elif [ "$1" = "release" ]; then
elif [ "$2" = "android" ]; then
BuildReleaseAndroid
MakeRelease "md5-android.txt"
elif [ "$2" = "freebsd" ]; then
BuildReleaseFreeBSD
MakeRelease "md5-freebsd.txt"
elif [ "$2" = "web" ]; then
echo "web only"
else

View File

@ -79,28 +79,60 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
return link, nil
}
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return err
return nil, err
}
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
return err
result := driver115.MkdirResp{}
form := map[string]string{
"pid": parentDir.GetID(),
"cname": dirName,
}
return nil
req := d.client.NewRequest().
SetFormData(form).
SetResult(&result).
ForceContentType("application/json;charset=UTF-8")
resp, err := req.Post(driver115.ApiDirAdd)
err = driver115.CheckErr(err, &result, resp)
if err != nil {
return nil, err
}
f, err := d.getNewFile(result.FileID)
if err != nil {
return nil, nil
}
return f, nil
}
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return err
return nil, err
}
return d.client.Move(dstDir.GetID(), srcObj.GetID())
if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil {
return nil, err
}
f, err := d.getNewFile(srcObj.GetID())
if err != nil {
return nil, nil
}
return f, nil
}
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return err
return nil, err
}
return d.client.Rename(srcObj.GetID(), newName)
if err := d.client.Rename(srcObj.GetID(), newName); err != nil {
return nil, err
}
f, err := d.getNewFile((srcObj.GetID()))
if err != nil {
return nil, nil
}
return f, nil
}
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -117,9 +149,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
return d.client.Delete(obj.GetID())
}
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return err
return nil, err
}
var (
@ -128,10 +160,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
)
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
return err
return nil, err
}
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
return driver115.ErrUploadTooLarge
return nil, driver115.ErrUploadTooLarge
}
//if digest, err = d.client.GetDigestResult(stream); err != nil {
// return err
@ -144,22 +176,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
if err != nil {
return err
return nil, err
}
preHash, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return err
return nil, err
}
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
return nil, err
}
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return err
return nil, err
}
}
fullHash = strings.ToUpper(fullHash)
@ -168,20 +200,36 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// note that 115 add timeout for rapid-upload,
// and "sig invalid" err is thrown even when the hash is correct after timeout.
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
return err
return nil, err
}
if matched, err := fastInfo.Ok(); err != nil {
return err
return nil, err
} else if matched {
return nil
f, err := d.getNewFileByPickCode(fastInfo.PickCode)
if err != nil {
return nil, nil
}
return f, nil
}
var uploadResult *UploadResult
// 闪传失败,上传
if stream.GetSize() <= utils.KB { // 文件大小小于1KB改用普通模式上传
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB改用普通模式上传
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
return nil, err
}
} else {
// 分片上传
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
return nil, err
}
}
// 分片上传
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
file, err := d.getNewFile(uploadResult.Data.FileID)
if err != nil {
return nil, nil
}
return file, nil
}
func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) {

View File

@ -9,8 +9,8 @@ type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate ([limit]r/1s)"`
driver.RootID
}

View File

@ -1,10 +1,11 @@
package _115
import (
"time"
"github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
var _ model.Obj = (*FileObj)(nil)
@ -20,3 +21,18 @@ func (f *FileObj) CreateTime() time.Time {
func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}
type UploadResult struct {
driver.BasicResp
Data struct {
PickCode string `json:"pick_code"`
FileSize int `json:"file_size"`
FileID string `json:"file_id"`
ThumbURL string `json:"thumb_url"`
Sha1 string `json:"sha1"`
Aid int `json:"aid"`
FileName string `json:"file_name"`
Cid string `json:"cid"`
IsVideo int `json:"is_video"`
} `json:"data"`
}

View File

@ -10,7 +10,6 @@ import (
"io"
"net/http"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
@ -75,6 +74,34 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
return res, nil
}
func (d *Pan115) getNewFile(fileId string) (*FileObj, error) {
file, err := d.client.GetFile(fileId)
if err != nil {
return nil, err
}
return &FileObj{*file}, nil
}
func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) {
result := driver115.GetFileInfoResponse{}
req := d.client.NewRequest().
SetQueryParam("pick_code", pickCode).
ForceContentType("application/json;charset=UTF-8").
SetResult(&result)
resp, err := req.Get(driver115.ApiFileInfo)
if err := driver115.CheckErr(err, &result, resp); err != nil {
return nil, err
}
if len(result.Files) == 0 {
return nil, errors.New("not get file info")
}
fileInfo := result.Files[0]
f := &FileObj{}
f.From(fileInfo)
return f, nil
}
func (d *Pan115) getUA() string {
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
}
@ -245,8 +272,38 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
return
}
// UploadByOSS use aliyun sdk to upload
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
ossToken, err := c.client.GetOSSToken()
if err != nil {
return nil, err
}
ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret)
if err != nil {
return nil, err
}
bucket, err := ossClient.Bucket(params.Bucket)
if err != nil {
return nil, err
}
var bodyBytes []byte
if err = bucket.PutObject(params.Object, r, append(
driver115.OssOption(params, ossToken),
oss.CallbackResult(&bodyBytes),
)...); err != nil {
return nil, err
}
var uploadResult UploadResult
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
return nil, err
}
return &uploadResult, uploadResult.Err(string(bodyBytes))
}
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
@ -254,12 +311,13 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
ossClient *oss.Client
bucket *oss.Bucket
ossToken *driver115.UploadOSSTokenResp
bodyBytes []byte
err error
)
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
return nil, err
}
options := driver115.DefalutUploadMultipartOptions()
@ -268,17 +326,19 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
f(options)
}
}
// oss 启用Sequential必须按顺序上传
options.ThreadsNum = 1
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
return nil, err
}
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
return err
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil {
return nil, err
}
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
return err
return nil, err
}
// ossToken一小时后就会失效所以每50分钟重新获取一次
@ -288,14 +348,15 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
timeout := time.NewTimer(options.Timeout)
if chunks, err = SplitFile(fileSize); err != nil {
return err
return nil, err
}
if imur, err = bucket.InitiateMultipartUpload(params.Object,
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
oss.UserAgentHeader(driver115.OSSUserAgent),
oss.EnableSha1(), oss.Sequential(),
); err != nil {
return err
return nil, err
}
wg := sync.WaitGroup{}
@ -337,8 +398,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
continue
}
b := bytes.NewBuffer(buf)
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}
}
@ -362,25 +422,31 @@ LOOP:
case <-ticker.C:
// 到时重新获取ossToken
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
return nil, err
}
case <-quit:
break LOOP
case <-errCh:
return err
return nil, err
case <-timeout.C:
return fmt.Errorf("time out")
return nil, fmt.Errorf("time out")
}
}
// EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
return err
}
// 不知道啥原因oss那边分片上传不计算sha1导致115服务器校验错误
// params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1)
if _, err := bucket.CompleteMultipartUpload(imur, parts, append(
driver115.OssOption(params, ossToken),
oss.CallbackResult(&bodyBytes),
)...); err != nil {
return nil, err
}
return d.checkUploadStatus(dirID, params.SHA1)
var uploadResult UploadResult
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
return nil, err
}
return &uploadResult, uploadResult.Err(string(bodyBytes))
}
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
@ -389,27 +455,6 @@ func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
}
}
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
// 验证上传是否成功
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
opts := []driver115.GetFileOptions{
driver115.WithOrder(driver115.FileOrderByTime),
driver115.WithShowDirEnable(false),
driver115.WithAsc(false),
driver115.WithLimit(500),
}
fResp, err := driver115.GetFiles(req, dirID, opts...)
if err != nil {
return err
}
for _, fileInfo := range fResp.Files {
if fileInfo.Sha1 == sha1 {
return nil
}
}
return driver115.ErrUploadFailed
}
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
for i := int64(1); i < 10; i++ {
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片

View File

@ -9,7 +9,7 @@ type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"`
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"`
ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"`

View File

@ -82,6 +82,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
"type": f.Type,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
}, nil)
if err != nil {

View File

@ -26,8 +26,9 @@ const (
Api = "https://www.123pan.com/api"
AApi = "https://www.123pan.com/a/api"
BApi = "https://www.123pan.com/b/api"
LoginApi = "https://login.123pan.com/api"
MainApi = BApi
SignIn = MainApi + "/user/sign_in"
SignIn = LoginApi + "/user/sign_in"
Logout = MainApi + "/user/logout"
UserInfo = MainApi + "/user/info"
FileList = MainApi + "/file/list/new"

View File

@ -6,7 +6,7 @@ import (
)
type Addition struct {
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"`
driver.RootID
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`

View File

@ -22,6 +22,7 @@ import (
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/febbox"
_ "github.com/alist-org/alist/v3/drivers/ftp"
_ "github.com/alist-org/alist/v3/drivers/google_drive"
_ "github.com/alist-org/alist/v3/drivers/google_photo"

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type TokenErrResp struct {
@ -72,7 +73,7 @@ func fileToObj(f File) *model.ObjThumb {
IsFolder: f.Isdir == 1,
// 直接获取的MD5是错误的
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
}

View File

@ -1,11 +1,14 @@
package baidu_netdisk
import (
"encoding/hex"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"unicode"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/errs"
@ -153,8 +156,6 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
u = res.Header().Get("location")
//}
updateObjMd5(file, "pan.baidu.com", u)
return &model.Link{
URL: u,
Header: http.Header{
@ -178,8 +179,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
return nil, err
}
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
return &model.Link{
URL: resp.Info[0].Dlink,
Header: http.Header{
@ -229,19 +228,6 @@ func joinTime(form map[string]string, ctime, mtime int64) {
form["local_ctime"] = strconv.FormatInt(ctime, 10)
}
func updateObjMd5(obj model.Obj, userAgent, u string) {
object := model.GetRawObject(obj)
if object != nil {
req, _ := http.NewRequest(http.MethodHead, u, nil)
req.Header.Add("User-Agent", userAgent)
resp, _ := base.HttpClient.Do(req)
if resp != nil {
contentMd5 := resp.Header.Get("Content-Md5")
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
}
}
}
const (
DefaultSliceSize int64 = 4 * utils.MB
VipSliceSize = 16 * utils.MB
@ -267,3 +253,40 @@ func (d *BaiduNetdisk) getSliceSize() int64 {
// r = strings.ReplaceAll(r, "+", "%20")
// return r
// }
func DecryptMd5(encryptMd5 string) string {
if _, err := hex.DecodeString(encryptMd5); err == nil {
return encryptMd5
}
var out strings.Builder
out.Grow(len(encryptMd5))
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
if i == 9 {
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
} else {
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
}
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
}
encryptMd5 = out.String()
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
}
func EncryptMd5(originalMd5 string) string {
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
var out strings.Builder
out.Grow(len(reversed))
for i, n := 0, int64(0); i < len(reversed); i++ {
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
n ^= int64(15 & i)
if i == 9 {
out.WriteRune(rune(n) + 'g')
} else {
out.WriteString(strconv.FormatInt(n, 16))
}
}
return out.String()
}

View File

@ -72,7 +72,7 @@ func (c *File) Thumb() string {
}
func (c *File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5))
}
/*相册部分*/

View File

@ -2,8 +2,12 @@ package baiduphoto
import (
"context"
"encoding/hex"
"fmt"
"net/http"
"strconv"
"strings"
"unicode"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/errs"
@ -476,3 +480,40 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) {
}
return &info, nil
}
func DecryptMd5(encryptMd5 string) string {
if _, err := hex.DecodeString(encryptMd5); err == nil {
return encryptMd5
}
var out strings.Builder
out.Grow(len(encryptMd5))
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
if i == 9 {
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
} else {
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
}
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
}
encryptMd5 = out.String()
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
}
func EncryptMd5(originalMd5 string) string {
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
var out strings.Builder
out.Grow(len(reversed))
for i, n := 0, int64(0); i < len(reversed); i++ {
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
n ^= int64(15 & i)
if i == 9 {
out.WriteRune(rune(n) + 'g')
} else {
out.WriteString(strconv.FormatInt(n, 16))
}
}
return out.String()
}

View File

@ -67,7 +67,9 @@ func (d *ChaoXing) Init(ctx context.Context) error {
}
func (d *ChaoXing) Drop(ctx context.Context) error {
d.cron.Stop()
if d.cron != nil {
d.cron.Stop()
}
return nil
}

View File

@ -4,6 +4,7 @@ import (
"context"
"io"
"net/http"
"path"
"strconv"
"strings"
@ -90,7 +91,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st
func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{
"action": "move",
"src_dir": srcObj.GetPath(),
"src_dir": path.Dir(srcObj.GetPath()),
"dst": dstDir.GetPath(),
"src": convertSrc(srcObj),
}
@ -112,7 +113,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string
func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
body := base.Json{
"src_dir": srcObj.GetPath(),
"src_dir": path.Dir(srcObj.GetPath()),
"dst": dstDir.GetPath(),
"src": convertSrc(srcObj),
}

132
drivers/febbox/driver.go Normal file
View File

@ -0,0 +1,132 @@
package febbox
import (
"context"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
)
type FebBox struct {
model.Storage
Addition
accessToken string
oauth2Token oauth2.TokenSource
}
func (d *FebBox) Config() driver.Config {
return config
}
func (d *FebBox) GetAddition() driver.Additional {
return &d.Addition
}
func (d *FebBox) Init(ctx context.Context) error {
// 初始化 oauth2Config
oauth2Config := &clientcredentials.Config{
ClientID: d.ClientID,
ClientSecret: d.ClientSecret,
AuthStyle: oauth2.AuthStyleInParams,
TokenURL: "https://api.febbox.com/oauth/token",
}
d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken)
token, err := d.oauth2Token.Token()
if err != nil {
return err
}
d.accessToken = token.AccessToken
d.Addition.RefreshToken = token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *FebBox) Drop(ctx context.Context) error {
return nil
}
func (d *FebBox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.getFilesList(dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return fileToObj(src), nil
})
}
func (d *FebBox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var ip string
if d.Addition.UserIP != "" {
ip = d.Addition.UserIP
} else {
ip = args.IP
}
url, err := d.getDownloadLink(file.GetID(), ip)
if err != nil {
return nil, err
}
return &model.Link{
URL: url,
}, nil
}
func (d *FebBox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
err := d.makeDir(parentDir.GetID(), dirName)
if err != nil {
return nil, err
}
return nil, nil
}
func (d *FebBox) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
err := d.move(srcObj.GetID(), dstDir.GetID())
if err != nil {
return nil, err
}
return nil, nil
}
func (d *FebBox) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
err := d.rename(srcObj.GetID(), newName)
if err != nil {
return nil, err
}
return nil, nil
}
func (d *FebBox) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
err := d.copy(srcObj.GetID(), dstDir.GetID())
if err != nil {
return nil, err
}
return nil, nil
}
func (d *FebBox) Remove(ctx context.Context, obj model.Obj) error {
err := d.remove(obj.GetID())
if err != nil {
return err
}
return nil
}
func (d *FebBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return nil, errs.NotImplement
}
var _ driver.Driver = (*FebBox)(nil)

36
drivers/febbox/meta.go Normal file
View File

@ -0,0 +1,36 @@
package febbox
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
driver.RootID
ClientID string `json:"client_id" required:"true" default:""`
ClientSecret string `json:"client_secret" required:"true" default:""`
RefreshToken string
SortRule string `json:"sort_rule" required:"true" type:"select" options:"size_asc,size_desc,name_asc,name_desc,update_asc,update_desc,ext_asc,ext_desc" default:"name_asc"`
PageSize int64 `json:"page_size" required:"true" type:"number" default:"100" help:"list api per page size of FebBox driver"`
UserIP string `json:"user_ip" default:"" help:"user ip address for download link which can speed up the download"`
}
var config = driver.Config{
Name: "FebBox",
LocalSort: false,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &FebBox{}
})
}

88
drivers/febbox/oauth2.go Normal file
View File

@ -0,0 +1,88 @@
package febbox
import (
"context"
"encoding/json"
"errors"
"net/http"
"net/url"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
type customTokenSource struct {
config *clientcredentials.Config
ctx context.Context
refreshToken string
}
func (c *customTokenSource) Token() (*oauth2.Token, error) {
v := url.Values{}
if c.refreshToken != "" {
v.Set("grant_type", "refresh_token")
v.Set("refresh_token", c.refreshToken)
} else {
v.Set("grant_type", "client_credentials")
}
v.Set("client_id", c.config.ClientID)
v.Set("client_secret", c.config.ClientSecret)
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New("oauth2: cannot fetch token")
}
var tokenResp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data struct {
AccessToken string `json:"access_token"`
ExpiresIn int64 `json:"expires_in"`
TokenType string `json:"token_type"`
Scope string `json:"scope"`
RefreshToken string `json:"refresh_token"`
} `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
return nil, err
}
if tokenResp.Code != 1 {
return nil, errors.New("oauth2: server response error")
}
c.refreshToken = tokenResp.Data.RefreshToken
token := &oauth2.Token{
AccessToken: tokenResp.Data.AccessToken,
TokenType: tokenResp.Data.TokenType,
RefreshToken: tokenResp.Data.RefreshToken,
Expiry: time.Now().Add(time.Duration(tokenResp.Data.ExpiresIn) * time.Second),
}
return token, nil
}
func (d *FebBox) initializeOAuth2Token(ctx context.Context, oauth2Config *clientcredentials.Config, refreshToken string) {
d.oauth2Token = oauth2.ReuseTokenSource(nil, &customTokenSource{
config: oauth2Config,
ctx: ctx,
refreshToken: refreshToken,
})
}

123
drivers/febbox/types.go Normal file
View File

@ -0,0 +1,123 @@
package febbox
import (
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"strconv"
"time"
)
type ErrResp struct {
ErrorCode int64 `json:"code"`
ErrorMsg string `json:"msg"`
ServerRunTime float64 `json:"server_runtime"`
ServerName string `json:"server_name"`
}
func (e *ErrResp) IsError() bool {
return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ServerRunTime != 0 || e.ServerName != ""
}
func (e *ErrResp) Error() string {
return fmt.Sprintf("ErrorCode: %d ,Error: %s ,ServerRunTime: %f ,ServerName: %s", e.ErrorCode, e.ErrorMsg, e.ServerRunTime, e.ServerName)
}
type FileListResp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data struct {
FileList []File `json:"file_list"`
ShowType string `json:"show_type"`
} `json:"data"`
}
type Rules struct {
AllowCopy int64 `json:"allow_copy"`
AllowDelete int64 `json:"allow_delete"`
AllowDownload int64 `json:"allow_download"`
AllowComment int64 `json:"allow_comment"`
HideLocation int64 `json:"hide_location"`
}
type File struct {
Fid int64 `json:"fid"`
UID int64 `json:"uid"`
FileSize int64 `json:"file_size"`
Path string `json:"path"`
FileName string `json:"file_name"`
Ext string `json:"ext"`
AddTime int64 `json:"add_time"`
FileCreateTime int64 `json:"file_create_time"`
FileUpdateTime int64 `json:"file_update_time"`
ParentID int64 `json:"parent_id"`
UpdateTime int64 `json:"update_time"`
LastOpenTime int64 `json:"last_open_time"`
IsDir int64 `json:"is_dir"`
Epub int64 `json:"epub"`
IsMusicList int64 `json:"is_music_list"`
OssFid int64 `json:"oss_fid"`
Faststart int64 `json:"faststart"`
HasVideoQuality int64 `json:"has_video_quality"`
TotalDownload int64 `json:"total_download"`
Status int64 `json:"status"`
Remark string `json:"remark"`
OldHash string `json:"old_hash"`
Hash string `json:"hash"`
HashType string `json:"hash_type"`
FromUID int64 `json:"from_uid"`
FidOrg int64 `json:"fid_org"`
ShareID int64 `json:"share_id"`
InvitePermission int64 `json:"invite_permission"`
ThumbSmall string `json:"thumb_small"`
ThumbSmallWidth int64 `json:"thumb_small_width"`
ThumbSmallHeight int64 `json:"thumb_small_height"`
Thumb string `json:"thumb"`
ThumbWidth int64 `json:"thumb_width"`
ThumbHeight int64 `json:"thumb_height"`
ThumbBig string `json:"thumb_big"`
ThumbBigWidth int64 `json:"thumb_big_width"`
ThumbBigHeight int64 `json:"thumb_big_height"`
IsCustomThumb int64 `json:"is_custom_thumb"`
Photos int64 `json:"photos"`
IsAlbum int64 `json:"is_album"`
ReadOnly int64 `json:"read_only"`
Rules Rules `json:"rules"`
IsShared int64 `json:"is_shared"`
}
func fileToObj(f File) *model.ObjThumb {
return &model.ObjThumb{
Object: model.Object{
ID: strconv.FormatInt(f.Fid, 10),
Name: f.FileName,
Size: f.FileSize,
Ctime: time.Unix(f.FileCreateTime, 0),
Modified: time.Unix(f.FileUpdateTime, 0),
IsFolder: f.IsDir == 1,
HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash),
},
Thumbnail: model.Thumbnail{
Thumbnail: f.Thumb,
},
}
}
type FileDownloadResp struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data []struct {
Error int `json:"error"`
DownloadURL string `json:"download_url"`
Hash string `json:"hash"`
HashType string `json:"hash_type"`
Fid int `json:"fid"`
FileName string `json:"file_name"`
ParentID int `json:"parent_id"`
FileSize int `json:"file_size"`
Ext string `json:"ext"`
Thumb string `json:"thumb"`
VipLink int `json:"vip_link"`
} `json:"data"`
}

224
drivers/febbox/util.go Normal file
View File

@ -0,0 +1,224 @@
package febbox
import (
"encoding/json"
"errors"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/op"
"github.com/go-resty/resty/v2"
"net/http"
"strconv"
)
func (d *FebBox) refreshTokenByOAuth2() error {
token, err := d.oauth2Token.Token()
if err != nil {
return err
}
d.Status = "work"
d.accessToken = token.AccessToken
d.Addition.RefreshToken = token.RefreshToken
op.MustSaveDriverStorage(d)
return nil
}
func (d *FebBox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
// 使用oauth2 获取 access_token
token, err := d.oauth2Token.Token()
if err != nil {
return nil, err
}
req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken)
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e ErrResp
req.SetError(&e)
res, err := req.Execute(method, url)
if err != nil {
return nil, err
}
switch e.ErrorCode {
case 0:
return res.Body(), nil
case 1:
return res.Body(), nil
case -10001:
if e.ServerName != "" {
// access_token 过期
if err = d.refreshTokenByOAuth2(); err != nil {
return nil, err
}
return d.request(url, method, callback, resp)
} else {
return nil, errors.New(e.Error())
}
default:
return nil, errors.New(e.Error())
}
}
func (d *FebBox) getFilesList(id string) ([]File, error) {
if d.PageSize <= 0 {
d.PageSize = 100
}
res, err := d.listWithLimit(id, d.PageSize)
if err != nil {
return nil, err
}
return *res, nil
}
func (d *FebBox) listWithLimit(dirID string, pageLimit int64) (*[]File, error) {
var files []File
page := int64(1)
for {
result, err := d.getFiles(dirID, page, pageLimit)
if err != nil {
return nil, err
}
files = append(files, *result...)
if int64(len(*result)) < pageLimit {
break
} else {
page++
}
}
return &files, nil
}
func (d *FebBox) getFiles(dirID string, page, pageLimit int64) (*[]File, error) {
var fileList FileListResp
queryParams := map[string]string{
"module": "file_list",
"parent_id": dirID,
"page": strconv.FormatInt(page, 10),
"pagelimit": strconv.FormatInt(pageLimit, 10),
"order": d.Addition.SortRule,
}
res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, &fileList)
if err != nil {
return nil, err
}
if err = json.Unmarshal(res, &fileList); err != nil {
return nil, err
}
return &fileList.Data.FileList, nil
}
func (d *FebBox) getDownloadLink(id string, ip string) (string, error) {
var fileDownloadResp FileDownloadResp
queryParams := map[string]string{
"module": "file_get_download_url",
"fids[]": id,
"ip": ip,
}
res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, &fileDownloadResp)
if err != nil {
return "", err
}
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
return "", err
}
return fileDownloadResp.Data[0].DownloadURL, nil
}
func (d *FebBox) makeDir(id string, name string) error {
queryParams := map[string]string{
"module": "create_dir",
"parent_id": id,
"name": name,
}
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, nil)
if err != nil {
return err
}
return nil
}
func (d *FebBox) move(id string, id2 string) error {
queryParams := map[string]string{
"module": "file_move",
"fids[]": id,
"to": id2,
}
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, nil)
if err != nil {
return err
}
return nil
}
func (d *FebBox) rename(id string, name string) error {
queryParams := map[string]string{
"module": "file_rename",
"fid": id,
"name": name,
}
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, nil)
if err != nil {
return err
}
return nil
}
func (d *FebBox) copy(id string, id2 string) error {
queryParams := map[string]string{
"module": "file_copy",
"fids[]": id,
"to": id2,
}
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, nil)
if err != nil {
return err
}
return nil
}
func (d *FebBox) remove(id string) error {
queryParams := map[string]string{
"module": "file_delete",
"fids[]": id,
}
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
req.SetMultipartFormData(queryParams)
}, nil)
if err != nil {
return err
}
return nil
}

View File

@ -66,12 +66,13 @@ func (d *ILanZou) Drop(ctx context.Context) error {
}
func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
offset := 1
var res []ListItem
for {
var resp ListResp
_, err := d.proved("/record/file/list", http.MethodGet, func(req *resty.Request) {
params := []string{
"offset=1",
"offset=" + strconv.Itoa(offset),
"limit=60",
"folderId=" + dir.GetID(),
"type=0",
@ -83,7 +84,9 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return nil, err
}
res = append(res, resp.List...)
if resp.TotalPage <= resp.Offset {
if resp.Offset < resp.TotalPage {
offset++
} else {
break
}
}
@ -286,7 +289,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
req.SetBody(base.Json{
"fileId": "",
"fileName": stream.GetName(),
"fileSize": stream.GetSize() / 1024,
"fileSize": stream.GetSize()/1024 + 1,
"folderId": dstDir.GetID(),
"md5": etag,
"type": 1,

View File

@ -22,6 +22,7 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/alist-org/times"
cp "github.com/otiai10/copy"
log "github.com/sirupsen/logrus"
_ "golang.org/x/image/webp"
)
@ -76,7 +77,7 @@ func (d *Local) Init(ctx context.Context) error {
if d.thumbConcurrency == 0 {
d.thumbTokenBucket = NewNopTokenBucket()
} else {
d.thumbTokenBucket = NewStaticTokenBucket(d.thumbConcurrency)
d.thumbTokenBucket = NewStaticTokenBucketWithMigration(d.thumbTokenBucket, d.thumbConcurrency)
}
return nil
}
@ -241,11 +242,22 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
err := os.Rename(srcPath, dstPath)
if err != nil {
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
// Handle cross-device file move in local driver
if err = d.Copy(ctx, srcObj, dstDir); err != nil {
return err
} else {
// Directly remove file without check recycle bin if successfully copied
if srcObj.IsDir() {
err = os.RemoveAll(srcObj.GetPath())
} else {
err = os.Remove(srcObj.GetPath())
}
return err
}
} else {
return err
}
return nil
}
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
@ -258,22 +270,18 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
return nil
}
func (d *Local) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
srcPath := srcObj.GetPath()
dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName())
if utils.IsSubPath(srcPath, dstPath) {
return fmt.Errorf("the destination folder is a subfolder of the source folder")
}
var err error
if srcObj.IsDir() {
err = utils.CopyDir(srcPath, dstPath)
} else {
err = utils.CopyFile(srcPath, dstPath)
}
if err != nil {
return err
}
return nil
// Copy using otiai10/copy to perform more secure & efficient copy
return cp.Copy(srcPath, dstPath, cp.Options{
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
PreserveTimes: true,
NumOfWorkers: 0, // Serialized copy without using goroutine
})
}
func (d *Local) Remove(ctx context.Context, obj model.Obj) error {

View File

@ -23,6 +23,38 @@ func NewStaticTokenBucket(size int) StaticTokenBucket {
return StaticTokenBucket{bucket: bucket}
}
func NewStaticTokenBucketWithMigration(oldBucket TokenBucket, size int) StaticTokenBucket {
if oldBucket != nil {
oldStaticBucket, ok := oldBucket.(StaticTokenBucket)
if ok {
oldSize := cap(oldStaticBucket.bucket)
migrateSize := oldSize
if size < migrateSize {
migrateSize = size
}
bucket := make(chan struct{}, size)
for range size - migrateSize {
bucket <- struct{}{}
}
if migrateSize != 0 {
go func() {
for range migrateSize {
<-oldStaticBucket.bucket
bucket <- struct{}{}
}
close(oldStaticBucket.bucket)
}()
}
return StaticTokenBucket{bucket: bucket}
}
}
return NewStaticTokenBucket(size)
}
// Take channel maybe closed when local driver is modified.
// don't call Put method after the channel is closed.
func (b StaticTokenBucket) Take() <-chan struct{} {
return b.bucket
}
@ -35,8 +67,10 @@ func (b StaticTokenBucket) Do(ctx context.Context, f func() error) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-b.bucket:
defer b.Put()
case _, ok := <-b.Take():
if ok {
defer b.Put()
}
}
return f()
}

View File

@ -91,8 +91,8 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
ClientID: d.ClientID,
ClientSecret: d.ClientSecret,
Endpoint: oauth2.Endpoint{
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthURL: "https://user.mypikpak.net/v1/auth/signin",
TokenURL: "https://user.mypikpak.net/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
}
@ -124,7 +124,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
}
// 获取CaptchaToken
err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/files"), d.Common.GetUserID())
err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/files"), d.Common.GetUserID())
if err != nil {
return err
}
@ -174,7 +174,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
if !d.DisableMediaLink {
queryParams["usage"] = "CACHE"
}
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.com/drive/v1/files/%s", file.GetID()),
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.net/drive/v1/files/%s", file.GetID()),
http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(queryParams)
}, &resp)
@ -200,7 +200,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"kind": "drive#folder",
"parent_id": parentDir.GetID(),
@ -211,7 +211,7 @@ func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
}
func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
@ -223,7 +223,7 @@ func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
req.SetBody(base.Json{
"name": newName,
})
@ -232,7 +232,7 @@ func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) e
}
func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"ids": []string{srcObj.GetID()},
"to": base.Json{
@ -244,7 +244,7 @@ func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"ids": []string{obj.GetID()},
})
@ -268,7 +268,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
var resp UploadTaskData
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
res, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"kind": "drive#file",
"name": stream.GetName(),
@ -292,9 +292,9 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
params := resp.Resumable.Params
//endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
// web 端上传 返回的endpoint 为 `mypikpak.com` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.com
// web 端上传 返回的endpoint 为 `mypikpak.net` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.net
if d.Addition.Platform == "android" {
params.Endpoint = "mypikpak.com"
params.Endpoint = "mypikpak.net"
}
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB改用普通模式上传
@ -318,7 +318,7 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
}
var resp OfflineDownloadResp
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(requestBody)
}, &resp)
@ -336,7 +336,7 @@ PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING
*/
func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []string) ([]OfflineTask, error) {
res := make([]OfflineTask, 0)
url := "https://api-drive.mypikpak.com/drive/v1/tasks"
url := "https://api-drive.mypikpak.net/drive/v1/tasks"
if len(phase) == 0 {
phase = []string{"PHASE_TYPE_RUNNING", "PHASE_TYPE_ERROR", "PHASE_TYPE_COMPLETE", "PHASE_TYPE_PENDING"}
@ -377,7 +377,7 @@ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []
}
func (d *PikPak) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error {
url := "https://api-drive.mypikpak.com/drive/v1/tasks"
url := "https://api-drive.mypikpak.net/drive/v1/tasks"
params := map[string]string{
"task_ids": strings.Join(taskIDs, ","),
"delete_files": strconv.FormatBool(deleteFiles),

View File

@ -86,51 +86,51 @@ const (
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "2.0.0"
WebPackageName = "mypikpak.com"
WebPackageName = "mypikpak.net"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCPackageName = "mypikpak.com"
PCPackageName = "mypikpak.net"
PCSdkVersion = "8.0.3"
)
var DlAddr = []string{
"dl-a10b-0621.mypikpak.com",
"dl-a10b-0622.mypikpak.com",
"dl-a10b-0623.mypikpak.com",
"dl-a10b-0624.mypikpak.com",
"dl-a10b-0625.mypikpak.com",
"dl-a10b-0858.mypikpak.com",
"dl-a10b-0859.mypikpak.com",
"dl-a10b-0860.mypikpak.com",
"dl-a10b-0861.mypikpak.com",
"dl-a10b-0862.mypikpak.com",
"dl-a10b-0863.mypikpak.com",
"dl-a10b-0864.mypikpak.com",
"dl-a10b-0865.mypikpak.com",
"dl-a10b-0866.mypikpak.com",
"dl-a10b-0867.mypikpak.com",
"dl-a10b-0868.mypikpak.com",
"dl-a10b-0869.mypikpak.com",
"dl-a10b-0870.mypikpak.com",
"dl-a10b-0871.mypikpak.com",
"dl-a10b-0872.mypikpak.com",
"dl-a10b-0873.mypikpak.com",
"dl-a10b-0874.mypikpak.com",
"dl-a10b-0875.mypikpak.com",
"dl-a10b-0876.mypikpak.com",
"dl-a10b-0877.mypikpak.com",
"dl-a10b-0878.mypikpak.com",
"dl-a10b-0879.mypikpak.com",
"dl-a10b-0880.mypikpak.com",
"dl-a10b-0881.mypikpak.com",
"dl-a10b-0882.mypikpak.com",
"dl-a10b-0883.mypikpak.com",
"dl-a10b-0884.mypikpak.com",
"dl-a10b-0885.mypikpak.com",
"dl-a10b-0886.mypikpak.com",
"dl-a10b-0887.mypikpak.com",
"dl-a10b-0621.mypikpak.net",
"dl-a10b-0622.mypikpak.net",
"dl-a10b-0623.mypikpak.net",
"dl-a10b-0624.mypikpak.net",
"dl-a10b-0625.mypikpak.net",
"dl-a10b-0858.mypikpak.net",
"dl-a10b-0859.mypikpak.net",
"dl-a10b-0860.mypikpak.net",
"dl-a10b-0861.mypikpak.net",
"dl-a10b-0862.mypikpak.net",
"dl-a10b-0863.mypikpak.net",
"dl-a10b-0864.mypikpak.net",
"dl-a10b-0865.mypikpak.net",
"dl-a10b-0866.mypikpak.net",
"dl-a10b-0867.mypikpak.net",
"dl-a10b-0868.mypikpak.net",
"dl-a10b-0869.mypikpak.net",
"dl-a10b-0870.mypikpak.net",
"dl-a10b-0871.mypikpak.net",
"dl-a10b-0872.mypikpak.net",
"dl-a10b-0873.mypikpak.net",
"dl-a10b-0874.mypikpak.net",
"dl-a10b-0875.mypikpak.net",
"dl-a10b-0876.mypikpak.net",
"dl-a10b-0877.mypikpak.net",
"dl-a10b-0878.mypikpak.net",
"dl-a10b-0879.mypikpak.net",
"dl-a10b-0880.mypikpak.net",
"dl-a10b-0881.mypikpak.net",
"dl-a10b-0882.mypikpak.net",
"dl-a10b-0883.mypikpak.net",
"dl-a10b-0884.mypikpak.net",
"dl-a10b-0885.mypikpak.net",
"dl-a10b-0886.mypikpak.net",
"dl-a10b-0887.mypikpak.net",
}
func (d *PikPak) login() error {
@ -139,7 +139,7 @@ func (d *PikPak) login() error {
return errors.New("username or password is empty")
}
url := "https://user.mypikpak.com/v1/auth/signin"
url := "https://user.mypikpak.net/v1/auth/signin"
// 使用 用户填写的 CaptchaToken —————— (验证后的captcha_token)
if d.GetCaptchaToken() == "" {
if err := d.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), d.Username); err != nil {
@ -169,7 +169,7 @@ func (d *PikPak) login() error {
}
func (d *PikPak) refreshToken(refreshToken string) error {
url := "https://user.mypikpak.com/v1/auth/token"
url := "https://user.mypikpak.net/v1/auth/token"
var e ErrResp
res, err := base.RestyClient.SetRetryCount(1).R().SetError(&e).
SetHeader("user-agent", "").SetBody(base.Json{
@ -307,7 +307,7 @@ func (d *PikPak) getFiles(id string) ([]File, error) {
"page_token": pageToken,
}
var resp Files
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodGet, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
@ -473,7 +473,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
}
var e ErrResp
var resp CaptchaTokenResponse
_, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
req.SetError(&e).SetBody(param).SetQueryParam("client_id", d.ClientID)
}, &resp)

View File

@ -80,7 +80,7 @@ func (d *PikPakShare) Init(ctx context.Context) error {
}
// 获取CaptchaToken
err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/share:batch_file_info"), "")
err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/share:batch_file_info"), "")
if err != nil {
return err
}
@ -113,7 +113,7 @@ func (d *PikPakShare) Link(ctx context.Context, file model.Obj, args model.LinkA
"file_id": file.GetID(),
"pass_code_token": d.PassCodeToken,
}
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {

View File

@ -68,51 +68,51 @@ const (
WebClientID = "YUMx5nI8ZU8Ap8pm"
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
WebClientVersion = "2.0.0"
WebPackageName = "mypikpak.com"
WebPackageName = "mypikpak.net"
WebSdkVersion = "8.0.3"
PCClientID = "YvtoWO6GNHiuCl7x"
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
PCClientVersion = "undefined" // 2.5.6.4831
PCPackageName = "mypikpak.com"
PCPackageName = "mypikpak.net"
PCSdkVersion = "8.0.3"
)
var DlAddr = []string{
"dl-a10b-0621.mypikpak.com",
"dl-a10b-0622.mypikpak.com",
"dl-a10b-0623.mypikpak.com",
"dl-a10b-0624.mypikpak.com",
"dl-a10b-0625.mypikpak.com",
"dl-a10b-0858.mypikpak.com",
"dl-a10b-0859.mypikpak.com",
"dl-a10b-0860.mypikpak.com",
"dl-a10b-0861.mypikpak.com",
"dl-a10b-0862.mypikpak.com",
"dl-a10b-0863.mypikpak.com",
"dl-a10b-0864.mypikpak.com",
"dl-a10b-0865.mypikpak.com",
"dl-a10b-0866.mypikpak.com",
"dl-a10b-0867.mypikpak.com",
"dl-a10b-0868.mypikpak.com",
"dl-a10b-0869.mypikpak.com",
"dl-a10b-0870.mypikpak.com",
"dl-a10b-0871.mypikpak.com",
"dl-a10b-0872.mypikpak.com",
"dl-a10b-0873.mypikpak.com",
"dl-a10b-0874.mypikpak.com",
"dl-a10b-0875.mypikpak.com",
"dl-a10b-0876.mypikpak.com",
"dl-a10b-0877.mypikpak.com",
"dl-a10b-0878.mypikpak.com",
"dl-a10b-0879.mypikpak.com",
"dl-a10b-0880.mypikpak.com",
"dl-a10b-0881.mypikpak.com",
"dl-a10b-0882.mypikpak.com",
"dl-a10b-0883.mypikpak.com",
"dl-a10b-0884.mypikpak.com",
"dl-a10b-0885.mypikpak.com",
"dl-a10b-0886.mypikpak.com",
"dl-a10b-0887.mypikpak.com",
"dl-a10b-0621.mypikpak.net",
"dl-a10b-0622.mypikpak.net",
"dl-a10b-0623.mypikpak.net",
"dl-a10b-0624.mypikpak.net",
"dl-a10b-0625.mypikpak.net",
"dl-a10b-0858.mypikpak.net",
"dl-a10b-0859.mypikpak.net",
"dl-a10b-0860.mypikpak.net",
"dl-a10b-0861.mypikpak.net",
"dl-a10b-0862.mypikpak.net",
"dl-a10b-0863.mypikpak.net",
"dl-a10b-0864.mypikpak.net",
"dl-a10b-0865.mypikpak.net",
"dl-a10b-0866.mypikpak.net",
"dl-a10b-0867.mypikpak.net",
"dl-a10b-0868.mypikpak.net",
"dl-a10b-0869.mypikpak.net",
"dl-a10b-0870.mypikpak.net",
"dl-a10b-0871.mypikpak.net",
"dl-a10b-0872.mypikpak.net",
"dl-a10b-0873.mypikpak.net",
"dl-a10b-0874.mypikpak.net",
"dl-a10b-0875.mypikpak.net",
"dl-a10b-0876.mypikpak.net",
"dl-a10b-0877.mypikpak.net",
"dl-a10b-0878.mypikpak.net",
"dl-a10b-0879.mypikpak.net",
"dl-a10b-0880.mypikpak.net",
"dl-a10b-0881.mypikpak.net",
"dl-a10b-0882.mypikpak.net",
"dl-a10b-0883.mypikpak.net",
"dl-a10b-0884.mypikpak.net",
"dl-a10b-0885.mypikpak.net",
"dl-a10b-0886.mypikpak.net",
"dl-a10b-0887.mypikpak.net",
}
func (d *PikPakShare) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
@ -159,7 +159,7 @@ func (d *PikPakShare) getSharePassToken() error {
"limit": "100",
}
var resp ShareResp
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share", http.MethodGet, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
@ -187,7 +187,7 @@ func (d *PikPakShare) getFiles(id string) ([]File, error) {
"pass_code_token": d.PassCodeToken,
}
var resp ShareResp
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) {
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
@ -345,7 +345,7 @@ func (d *PikPakShare) refreshCaptchaToken(action string, metas map[string]string
}
var e ErrResp
var resp CaptchaTokenResponse
_, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
_, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
req.SetError(&e).SetBody(param)
}, &resp)

View File

@ -10,8 +10,6 @@ import (
"math"
stdpath "path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
@ -24,9 +22,9 @@ import (
type Terabox struct {
model.Storage
Addition
JsToken string
JsToken string
url_domain_prefix string
base_url string
base_url string
}
func (d *Terabox) Config() driver.Config {
@ -145,52 +143,24 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
}
log.Debugln(locateupload_resp)
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
var Default int64 = 4 * 1024 * 1024
defaultByteData := make([]byte, Default)
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
// cal md5
h1 := md5.New()
h2 := md5.New()
block_list := make([]string, 0)
left := stream.GetSize()
for i := 0; i < count; i++ {
byteSize := Default
var byteData []byte
if left < Default {
byteSize = left
byteData = make([]byte, byteSize)
} else {
byteData = defaultByteData
}
left -= byteSize
_, err = io.ReadFull(tempFile, byteData)
if err != nil {
return err
}
h1.Write(byteData)
h2.Write(byteData)
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
h2.Reset()
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
// precreate file
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
path := encodeURIComponent(rawPath)
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
var precreateBlockListStr string
if stream.GetSize() > initialChunkSize {
precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761","a5fc157d78e6ad1c7e114b056c92821e"]`
} else {
precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761"]`
}
data := map[string]string{
"path": rawPath,
"autoinit": "1",
"target_path": dstDir.GetPath(),
"block_list": block_list_str,
"local_mtime": strconv.FormatInt(time.Now().Unix(), 10),
"path": rawPath,
"autoinit": "1",
"target_path": dstDir.GetPath(),
"block_list": precreateBlockListStr,
"local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10),
"file_limit_switch_v34": "true",
}
var precreateResp PrecreateResp
log.Debugln(data)
@ -206,6 +176,13 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
if precreateResp.ReturnType == 2 {
return nil
}
// upload chunks
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
params := map[string]string{
"method": "upload",
"path": path,
@ -215,24 +192,37 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
"channel": "dubox",
"clienttype": "0",
}
left = stream.GetSize()
for i, partseq := range precreateResp.BlockList {
streamSize := stream.GetSize()
chunkSize := calculateChunkSize(streamSize)
chunkByteData := make([]byte, chunkSize)
count := int(math.Ceil(float64(streamSize) / float64(chunkSize)))
left := streamSize
uploadBlockList := make([]string, 0, count)
h := md5.New()
for partseq := 0; partseq < count; partseq++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
byteSize := Default
byteSize := chunkSize
var byteData []byte
if left < Default {
if left >= chunkSize {
byteData = chunkByteData
} else {
byteSize = left
byteData = make([]byte, byteSize)
} else {
byteData = defaultByteData
}
left -= byteSize
_, err = io.ReadFull(tempFile, byteData)
if err != nil {
return err
}
// calculate md5
h.Write(byteData)
uploadBlockList = append(uploadBlockList, hex.EncodeToString(h.Sum(nil)))
h.Reset()
u := "https://" + locateupload_resp.Host + "/rest/2.0/pcs/superfile2"
params["partseq"] = strconv.Itoa(partseq)
res, err := base.RestyClient.R().
@ -245,25 +235,39 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
return err
}
log.Debugln(res.String())
if len(precreateResp.BlockList) > 0 {
up(float64(i) * 100 / float64(len(precreateResp.BlockList)))
if count > 0 {
up(float64(partseq) * 100 / float64(count))
}
}
// create file
params = map[string]string{
"isdir": "0",
"rtype": "1",
}
uploadBlockListStr, err := utils.Json.MarshalToString(uploadBlockList)
if err != nil {
return err
}
data = map[string]string{
"path": rawPath,
"size": strconv.FormatInt(stream.GetSize(), 10),
"uploadid": precreateResp.Uploadid,
"target_path": dstDir.GetPath(),
"block_list": block_list_str,
"local_mtime": strconv.FormatInt(time.Now().Unix(), 10),
"block_list": uploadBlockListStr,
"local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10),
}
res, err = d.post_form("/api/create", params, data, nil)
var createResp CreateResp
res, err = d.post_form("/api/create", params, data, &createResp)
log.Debugln(string(res))
return err
if err != nil {
return err
}
if createResp.Errno != 0 {
return fmt.Errorf("[terabox] failed to create file, errno: %d", createResp.Errno)
}
return nil
}
var _ driver.Driver = (*Terabox)(nil)

View File

@ -99,3 +99,7 @@ type CheckLoginResp struct {
type LocateUploadResp struct {
Host string `json:"host"`
}
type CreateResp struct {
Errno int `json:"errno"`
}

View File

@ -17,6 +17,11 @@ import (
log "github.com/sirupsen/logrus"
)
const (
initialChunkSize int64 = 4 << 20 // 4MB
initialSizeThreshold int64 = 4 << 30 // 4GB
)
func getStrBetween(raw, start, end string) string {
regexPattern := fmt.Sprintf(`%s(.*?)%s`, regexp.QuoteMeta(start), regexp.QuoteMeta(end))
regex := regexp.MustCompile(regexPattern)
@ -86,11 +91,15 @@ func (d *Terabox) request(rurl string, method string, callback base.ReqCallback,
return d.request(rurl, method, callback, resp, true)
}
} else if errno == -6 {
log.Debugln(res.Header())
d.url_domain_prefix = res.Header()["Url-Domain-Prefix"][0]
d.base_url = "https://" + d.url_domain_prefix + ".terabox.com"
log.Debugln("Redirect base_url to", d.base_url)
return d.request(rurl, method, callback, resp, noRetry...)
header := res.Header()
log.Debugln(header)
urlDomainPrefix := header.Get("Url-Domain-Prefix")
if len(urlDomainPrefix) > 0 {
d.url_domain_prefix = urlDomainPrefix
d.base_url = "https://" + d.url_domain_prefix + ".terabox.com"
log.Debugln("Redirect base_url to", d.base_url)
return d.request(rurl, method, callback, resp, noRetry...)
}
}
return res.Body(), nil
}
@ -258,3 +267,19 @@ func encodeURIComponent(str string) string {
r = strings.ReplaceAll(r, "+", "%20")
return r
}
func calculateChunkSize(streamSize int64) int64 {
chunkSize := initialChunkSize
sizeThreshold := initialSizeThreshold
if streamSize < chunkSize {
return streamSize
}
for streamSize > sizeThreshold {
chunkSize <<= 1
sizeThreshold <<= 1
}
return chunkSize
}

View File

@ -55,7 +55,9 @@ func (d *Vtencent) Init(ctx context.Context) error {
}
func (d *Vtencent) Drop(ctx context.Context) error {
d.cron.Stop()
if d.cron != nil {
d.cron.Stop()
}
return nil
}

3
go.mod
View File

@ -56,7 +56,7 @@ require (
github.com/u2takey/ffmpeg-go v0.5.0
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
github.com/xhofe/tache v0.1.2
github.com/xhofe/tache v0.1.3
github.com/xhofe/wopan-sdk-go v0.1.3
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
golang.org/x/crypto v0.27.0
@ -189,6 +189,7 @@ require (
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.4.1 // indirect
github.com/multiformats/go-varint v0.0.7 // indirect
github.com/otiai10/copy v1.14.0
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pierrec/lz4/v4 v4.1.18 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect

4
go.sum
View File

@ -391,6 +391,8 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ=
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
@ -512,6 +514,8 @@ github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3K
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg=
github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE=
github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

View File

@ -131,22 +131,22 @@ func DefaultConfig() *Config {
TlsInsecureSkipVerify: true,
Tasks: TasksConfig{
Download: TaskConfig{
Workers: 5,
MaxRetry: 1,
TaskPersistant: true,
Workers: 5,
MaxRetry: 1,
// TaskPersistant: true,
},
Transfer: TaskConfig{
Workers: 5,
MaxRetry: 2,
TaskPersistant: true,
Workers: 5,
MaxRetry: 2,
// TaskPersistant: true,
},
Upload: TaskConfig{
Workers: 5,
},
Copy: TaskConfig{
Workers: 5,
MaxRetry: 2,
TaskPersistant: true,
Workers: 5,
MaxRetry: 2,
// TaskPersistant: true,
},
},
Cors: Cors{

View File

@ -11,13 +11,14 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"github.com/xhofe/tache"
)
type CopyTask struct {
tache.Base
task.TaskWithCreator
Status string `json:"-"` //don't save status to save space
SrcObjPath string `json:"src_path"`
DstDirPath string `json:"dst_path"`
@ -53,7 +54,7 @@ var CopyTaskManager *tache.Manager[*CopyTask]
// Copy if in the same storage, call move method
// if not, add copy task
func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) {
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
if err != nil {
return nil, errors.WithMessage(err, "failed get src storage")
@ -92,7 +93,11 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool
}
}
// not in the same storage
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
t := &CopyTask{
TaskWithCreator: task.TaskWithCreator{
Creator: taskCreator,
},
srcStorage: srcStorage,
dstStorage: dstStorage,
SrcObjPath: srcObjActualPath,
@ -123,6 +128,9 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src
srcObjPath := stdpath.Join(srcObjPath, obj.GetName())
dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName())
CopyTaskManager.Add(&CopyTask{
TaskWithCreator: task.TaskWithCreator{
Creator: t.Creator,
},
srcStorage: srcStorage,
dstStorage: dstStorage,
SrcObjPath: srcObjPath,

View File

@ -5,8 +5,8 @@ import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/task"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
)
// the param named path of functions in this package is a mount path
@ -69,7 +69,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er
return err
}
func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) {
res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...)
if err != nil {
log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err)
@ -101,8 +101,8 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
return err
}
func PutAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
t, err := putAsTask(dstDirPath, file)
func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) {
t, err := putAsTask(ctx, dstDirPath, file)
if err != nil {
log.Errorf("failed put %s: %+v", dstDirPath, err)
}

View File

@ -7,12 +7,13 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
"github.com/xhofe/tache"
)
type UploadTask struct {
tache.Base
task.TaskWithCreator
storage driver.Driver
dstDirActualPath string
file model.FileStreamer
@ -33,7 +34,7 @@ func (t *UploadTask) Run() error {
var UploadTaskManager *tache.Manager[*UploadTask]
// putAsTask add as a put task and return immediately
func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) {
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
if err != nil {
return nil, errors.WithMessage(err, "failed get storage")
@ -49,7 +50,11 @@ func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo,
//file.SetReader(tempFile)
//file.SetTmpFile(tempFile)
}
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
t := &UploadTask{
TaskWithCreator: task.TaskWithCreator{
Creator: taskCreator,
},
storage: storage,
dstDirActualPath: dstDirActualPath,
file: file,

View File

@ -2,6 +2,8 @@ package tool
import (
"context"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/task"
"path/filepath"
"github.com/alist-org/alist/v3/internal/conf"
@ -9,7 +11,6 @@ import (
"github.com/alist-org/alist/v3/internal/op"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/xhofe/tache"
)
type DeletePolicy string
@ -28,7 +29,7 @@ type AddURLArgs struct {
DeletePolicy DeletePolicy
}
func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskInfoWithCreator, error) {
// get tool
tool, err := Tools.Get(args.Tool)
if err != nil {
@ -77,8 +78,12 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
// 防止将下载好的文件删除
deletePolicy = DeleteNever
}
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
t := &DownloadTask{
TaskWithCreator: task.TaskWithCreator{
Creator: taskCreator,
},
Url: args.URL,
DstDirPath: args.DstDirPath,
TempDir: tempDir,

View File

@ -7,13 +7,14 @@ import (
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
)
type DownloadTask struct {
tache.Base
task.TaskWithCreator
Url string `json:"url"`
DstDirPath string `json:"dst_dir_path"`
TempDir string `json:"temp_dir"`
@ -171,6 +172,9 @@ func (t *DownloadTask) Complete() error {
for i := range files {
file := files[i]
TransferTaskManager.Add(&TransferTask{
TaskWithCreator: task.TaskWithCreator{
Creator: t.Creator,
},
file: file,
DstDirPath: t.DstDirPath,
TempDir: t.TempDir,

View File

@ -8,6 +8,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
@ -15,7 +16,7 @@ import (
)
type TransferTask struct {
tache.Base
task.TaskWithCreator
FileDir string `json:"file_dir"`
DstDirPath string `json:"dst_dir_path"`
TempDir string `json:"temp_dir"`

View File

@ -101,7 +101,7 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive
log.Errorf("panic init storage: %s", errInfo)
driverStorage.SetStatus(errInfo)
MustSaveDriverStorage(storageDriver)
storagesMap.Delete(driverStorage.MountPath)
storagesMap.Store(driverStorage.MountPath, storageDriver)
}
}()
// Unmarshal Addition

26
internal/task/base.go Normal file
View File

@ -0,0 +1,26 @@
package task
import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/xhofe/tache"
)
type TaskWithCreator struct {
tache.Base
Creator *model.User
}
func (t *TaskWithCreator) SetCreator(creator *model.User) {
t.Creator = creator
t.Persist()
}
func (t *TaskWithCreator) GetCreator() *model.User {
return t.Creator
}
type TaskInfoWithCreator interface {
tache.TaskWithInfo
SetCreator(creator *model.User)
GetCreator() *model.User
}

View File

@ -2,7 +2,7 @@ package handles
import (
"fmt"
"github.com/xhofe/tache"
"github.com/alist-org/alist/v3/internal/task"
"io"
stdpath "path"
@ -121,7 +121,7 @@ func FsCopy(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
var addedTasks []tache.TaskWithInfo
var addedTasks []task.TaskInfoWithCreator
for i, name := range req.Names {
t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
if t != nil {

View File

@ -1,17 +1,16 @@
package handles
import (
"github.com/xhofe/tache"
"github.com/alist-org/alist/v3/internal/task"
"io"
"net/url"
stdpath "path"
"strconv"
"time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
)
@ -58,9 +57,9 @@ func FsStream(c *gin.Context) {
Mimetype: c.GetHeader("Content-Type"),
WebPutAsTask: asTask,
}
var t tache.TaskWithInfo
var t task.TaskInfoWithCreator
if asTask {
t, err = fs.PutAsTask(dir, s)
t, err = fs.PutAsTask(c, dir, s)
} else {
err = fs.PutDirectly(c, dir, s, true)
}
@ -123,12 +122,12 @@ func FsForm(c *gin.Context) {
Mimetype: file.Header.Get("Content-Type"),
WebPutAsTask: asTask,
}
var t tache.TaskWithInfo
var t task.TaskInfoWithCreator
if asTask {
s.Reader = struct {
io.Reader
}{f}
t, err = fs.PutAsTask(dir, &s)
t, err = fs.PutAsTask(c, dir, &s)
} else {
ss, err := stream.NewSeekableStream(s, nil)
if err != nil {

View File

@ -5,9 +5,9 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/offline_download/tool"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/task"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
"github.com/xhofe/tache"
)
type SetAria2Req struct {
@ -133,7 +133,7 @@ func AddOfflineDownload(c *gin.Context) {
common.ErrorResp(c, err, 403)
return
}
var tasks []tache.TaskWithInfo
var tasks []task.TaskInfoWithCreator
for _, url := range req.Urls {
t, err := tool.AddURL(c, &tool.AddURLArgs{
URL: url,

View File

@ -1,6 +1,8 @@
package handles
import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/task"
"math"
"github.com/alist-org/alist/v3/internal/fs"
@ -12,15 +14,17 @@ import (
)
type TaskInfo struct {
ID string `json:"id"`
Name string `json:"name"`
State tache.State `json:"state"`
Status string `json:"status"`
Progress float64 `json:"progress"`
Error string `json:"error"`
ID string `json:"id"`
Name string `json:"name"`
Creator string `json:"creator"`
CreatorRole int `json:"creator_role"`
State tache.State `json:"state"`
Status string `json:"status"`
Progress float64 `json:"progress"`
Error string `json:"error"`
}
func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
func getTaskInfo[T task.TaskInfoWithCreator](task T) TaskInfo {
errMsg := ""
if task.GetErr() != nil {
errMsg = task.GetErr().Error()
@ -30,62 +34,142 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
if math.IsNaN(progress) {
progress = 100
}
creatorName := ""
creatorRole := -1
if task.GetCreator() != nil {
creatorName = task.GetCreator().Username
creatorRole = task.GetCreator().Role
}
return TaskInfo{
ID: task.GetID(),
Name: task.GetName(),
State: task.GetState(),
Status: task.GetStatus(),
Progress: progress,
Error: errMsg,
ID: task.GetID(),
Name: task.GetName(),
Creator: creatorName,
CreatorRole: creatorRole,
State: task.GetState(),
Status: task.GetStatus(),
Progress: progress,
Error: errMsg,
}
}
func getTaskInfos[T tache.TaskWithInfo](tasks []T) []TaskInfo {
func getTaskInfos[T task.TaskInfoWithCreator](tasks []T) []TaskInfo {
return utils.MustSliceConvert(tasks, getTaskInfo[T])
}
func taskRoute[T tache.TaskWithInfo](g *gin.RouterGroup, manager *tache.Manager[T]) {
g.GET("/undone", func(c *gin.Context) {
common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StatePending, tache.StateRunning,
tache.StateCanceling, tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)))
})
g.GET("/done", func(c *gin.Context) {
common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)))
})
g.POST("/info", func(c *gin.Context) {
tid := c.Query("tid")
task, ok := manager.GetByID(tid)
func argsContains[T comparable](v T, slice ...T) bool {
return utils.SliceContains(slice, v)
}
func getUserInfo(c *gin.Context) (bool, uint, bool) {
if user, ok := c.Value("user").(*model.User); ok {
return user.IsAdmin(), user.ID, true
} else {
return false, 0, false
}
}
func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc {
return func(c *gin.Context) {
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
t, ok := manager.GetByID(c.Query("tid"))
if !ok {
common.ErrorStrResp(c, "task not found", 404)
return
}
if !isAdmin && uid != t.GetCreator().ID {
// to avoid an attacker using error messages to guess valid TID, return a 404 rather than a 403
common.ErrorStrResp(c, "task not found", 404)
return
}
callback(c, t)
}
}
func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Manager[T]) {
g.GET("/undone", func(c *gin.Context) {
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
// avoid directly passing the user object into the function to reduce closure size
return (isAdmin || uid == task.GetCreator().ID) &&
argsContains(task.GetState(), tache.StatePending, tache.StateRunning, tache.StateCanceling,
tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)
})))
})
g.GET("/done", func(c *gin.Context) {
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
return (isAdmin || uid == task.GetCreator().ID) &&
argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
})))
})
g.POST("/info", getTargetedHandler(manager, func(c *gin.Context, task T) {
common.SuccessResp(c, getTaskInfo(task))
})
g.POST("/cancel", func(c *gin.Context) {
tid := c.Query("tid")
manager.Cancel(tid)
}))
g.POST("/cancel", getTargetedHandler(manager, func(c *gin.Context, task T) {
manager.Cancel(task.GetID())
common.SuccessResp(c)
})
g.POST("/delete", func(c *gin.Context) {
tid := c.Query("tid")
manager.Remove(tid)
}))
g.POST("/delete", getTargetedHandler(manager, func(c *gin.Context, task T) {
manager.Remove(task.GetID())
common.SuccessResp(c)
})
g.POST("/retry", func(c *gin.Context) {
tid := c.Query("tid")
manager.Retry(tid)
}))
g.POST("/retry", getTargetedHandler(manager, func(c *gin.Context, task T) {
manager.Retry(task.GetID())
common.SuccessResp(c)
})
}))
g.POST("/clear_done", func(c *gin.Context) {
manager.RemoveByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
manager.RemoveByCondition(func(task T) bool {
return (isAdmin || uid == task.GetCreator().ID) &&
argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
})
common.SuccessResp(c)
})
g.POST("/clear_succeeded", func(c *gin.Context) {
manager.RemoveByState(tache.StateSucceeded)
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
manager.RemoveByCondition(func(task T) bool {
return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateSucceeded
})
common.SuccessResp(c)
})
g.POST("/retry_failed", func(c *gin.Context) {
manager.RetryAllFailed()
isAdmin, uid, ok := getUserInfo(c)
if !ok {
// if there is no bug, here is unreachable
common.ErrorStrResp(c, "user invalid", 401)
return
}
tasks := manager.GetByCondition(func(task T) bool {
return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateFailed
})
for _, t := range tasks {
manager.Retry(t.GetID())
}
common.SuccessResp(c)
})
}

View File

@ -127,6 +127,16 @@ func Authn(c *gin.Context) {
c.Next()
}
func AuthNotGuest(c *gin.Context) {
user := c.MustGet("user").(*model.User)
if user.IsGuest() {
common.ErrorStrResp(c, "You are a guest", 403)
c.Abort()
} else {
c.Next()
}
}
func AuthAdmin(c *gin.Context) {
user := c.MustGet("user").(*model.User)
if !user.IsAdmin() {

View File

@ -76,6 +76,7 @@ func Init(e *gin.Engine) {
public.Any("/offline_download_tools", handles.OfflineDownloadTools)
_fs(auth.Group("/fs"))
_task(auth.Group("/task", middlewares.AuthNotGuest))
admin(auth.Group("/admin", middlewares.AuthAdmin))
if flags.Debug || flags.Dev {
debug(g.Group("/debug"))
@ -127,8 +128,8 @@ func admin(g *gin.RouterGroup) {
setting.POST("/set_qbit", handles.SetQbittorrent)
setting.POST("/set_transmission", handles.SetTransmission)
task := g.Group("/task")
handles.SetupTaskRoute(task)
// retain /admin/task API to ensure compatibility with legacy automation scripts
_task(g.Group("/task"))
ms := g.Group("/message")
ms.POST("/get", message.HttpInstance.GetHandle)
@ -166,6 +167,10 @@ func _fs(g *gin.RouterGroup) {
g.POST("/add_offline_download", handles.AddOfflineDownload)
}
func _task(g *gin.RouterGroup) {
handles.SetupTaskRoute(g)
}
func Cors(r *gin.Engine) {
config := cors.DefaultConfig()
// config.AllowAllOrigins = true