Compare commits
50 Commits
Author | SHA1 | Date | |
---|---|---|---|
cf58ab3a78 | |||
33ba7f1521 | |||
201e25c17f | |||
ecefa5e0eb | |||
650b03aeb1 | |||
7341846499 | |||
a3908fd9a6 | |||
2a035302b2 | |||
016e169c41 | |||
088120df82 | |||
aa45a82914 | |||
5084d98398 | |||
fa15c576f0 | |||
2d3605c684 | |||
492b49d77a | |||
94915b2148 | |||
2dec756f23 | |||
4c0cffd29b | |||
25c5e075a9 | |||
398c04386a | |||
12b429584e | |||
150dcc2147 | |||
0ba754fd40 | |||
28d2367a87 | |||
a4ad98ee3e | |||
1c01dc6839 | |||
c3c5843dce | |||
6c38c5972d | |||
0a46979c51 | |||
67c93eed2b | |||
f58de9923a | |||
2671c876f1 | |||
e707fa38f1 | |||
b803b0070e | |||
64ceb5afb6 | |||
10c7ebb1c0 | |||
d0cda62703 | |||
ce0b99a510 | |||
34a148c83d | |||
4955d8cec8 | |||
216e3909f3 | |||
a701432b8b | |||
a2dc45a80b | |||
48ac23c8de | |||
2830575490 | |||
e8538bd215 | |||
c3e43ff605 | |||
5f19d73fcc | |||
bdf4b52885 | |||
6106a2d4cc |
35
.github/workflows/beta_release.yml
vendored
35
.github/workflows/beta_release.yml
vendored
@ -8,6 +8,9 @@ concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
changelog:
|
||||
strategy:
|
||||
@ -54,7 +57,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: '!(*musl*|*windows-arm64*|*android*)' # xgo
|
||||
- target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo
|
||||
hash: "md5"
|
||||
- target: 'linux-!(arm*)-musl*' #musl-not-arm
|
||||
hash: "md5-linux-musl"
|
||||
@ -64,6 +67,9 @@ jobs:
|
||||
hash: "md5-windows-arm64"
|
||||
- target: 'android-*' #android
|
||||
hash: "md5-android"
|
||||
- target: 'freebsd-*' #freebsd
|
||||
hash: "md5-freebsd"
|
||||
|
||||
name: Beta Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@ -105,14 +111,23 @@ jobs:
|
||||
name: Beta Release Desktop
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: peter-evans/create-or-update-comment@v4
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
issue-number: 69
|
||||
body: |
|
||||
/release-beta
|
||||
- triggered by @${{ github.actor }}
|
||||
- commit sha: ${{ github.sha }}
|
||||
- view files: https://github.com/alist-org/alist/tree/${{ github.sha }}
|
||||
reactions: 'rocket'
|
||||
token: ${{ secrets.MY_TOKEN }}
|
||||
repository: alist-org/desktop-release
|
||||
ref: main
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Commit
|
||||
run: |
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||
|
||||
- name: Push commit
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.MY_TOKEN }}
|
||||
branch: main
|
||||
repository: alist-org/desktop-release
|
6
.github/workflows/build_docker.yml
vendored
6
.github/workflows/build_docker.yml
vendored
@ -53,7 +53,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: build/musl-libs
|
||||
key: docker-musl-libs
|
||||
key: docker-musl-libs-v2
|
||||
|
||||
- name: Download Musl Library
|
||||
if: steps.cache-musl.outputs.cache-hit != 'true'
|
||||
@ -84,7 +84,7 @@ jobs:
|
||||
push: ${{ github.event_name == 'push' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
|
||||
|
||||
- name: Build and push with ffmpeg
|
||||
id: docker_build_ffmpeg
|
||||
@ -96,7 +96,7 @@ jobs:
|
||||
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
|
||||
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
|
||||
build-args: INSTALL_FFMPEG=true
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
|
||||
|
||||
build_docker_with_aria2:
|
||||
needs: build_docker
|
||||
|
17
.github/workflows/release.yml
vendored
17
.github/workflows/release.yml
vendored
@ -13,6 +13,23 @@ jobs:
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: false
|
||||
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
|
||||
- name: Prerelease
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
|
6
.github/workflows/release_docker.yml
vendored
6
.github/workflows/release_docker.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: build/musl-libs
|
||||
key: docker-musl-libs
|
||||
key: docker-musl-libs-v2
|
||||
|
||||
- name: Download Musl Library
|
||||
if: steps.cache-musl.outputs.cache-hit != 'true'
|
||||
@ -58,7 +58,7 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
|
||||
|
||||
- name: Docker meta with ffmpeg
|
||||
id: meta-ffmpeg
|
||||
@ -79,7 +79,7 @@ jobs:
|
||||
tags: ${{ steps.meta-ffmpeg.outputs.tags }}
|
||||
labels: ${{ steps.meta-ffmpeg.outputs.labels }}
|
||||
build-args: INSTALL_FFMPEG=true
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64
|
||||
|
||||
release_docker_with_aria2:
|
||||
needs: release_docker
|
||||
|
34
.github/workflows/release_freebsd.yml
vendored
Normal file
34
.github/workflows/release_freebsd.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: release_freebsd
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_freebsd:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.21' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release freebsd
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: build/compress/*
|
@ -58,7 +58,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
- [x] [Terabox](https://www.terabox.com/main)
|
||||
@ -98,7 +98,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
|
||||
|
||||
## Document
|
||||
|
||||
<https://alist.nn.ci/>
|
||||
<https://alistgo.com/>
|
||||
|
||||
## Demo
|
||||
|
||||
@ -138,4 +138,4 @@ The `AList` is open-source software licensed under the AGPL-3.0 license.
|
||||
|
||||
---
|
||||
|
||||
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
||||
> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
||||
|
@ -58,7 +58,7 @@
|
||||
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
||||
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||
- [x] [分秒帧](https://www.mediatrack.cn/)
|
||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云)
|
||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
|
||||
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
||||
- [x] [百度网盘](http://pan.baidu.com/)
|
||||
- [x] [UC网盘](https://drive.uc.cn)
|
||||
|
@ -58,7 +58,7 @@
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
- [x] [Terabox](https://www.terabox.com/main)
|
||||
|
37
build.sh
37
build.sh
@ -93,7 +93,7 @@ BuildDocker() {
|
||||
PrepareBuildDockerMusl() {
|
||||
mkdir -p build/musl-libs
|
||||
BASE="https://musl.cc/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross)
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
lib_tgz="build/${i}.tgz"
|
||||
@ -112,8 +112,8 @@ BuildDockerMultiplatform() {
|
||||
docker_lflags="--extldflags '-static -fpic' $ldflags"
|
||||
export CGO_ENABLED=1
|
||||
|
||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc)
|
||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
@ -233,6 +233,29 @@ BuildReleaseAndroid() {
|
||||
done
|
||||
}
|
||||
|
||||
BuildReleaseFreeBSD() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build/freebsd"
|
||||
OS_ARCHES=(amd64 arm64 i386)
|
||||
GO_ARCHES=(amd64 arm64 386)
|
||||
CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
||||
echo building for freebsd-${os_arch}
|
||||
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
|
||||
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
||||
rm base.txz
|
||||
export GOOS=freebsd
|
||||
export GOARCH=${GO_ARCHES[$i]}
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
export CGO_LDFLAGS="-fuse-ld=lld"
|
||||
go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
MakeRelease() {
|
||||
cd build
|
||||
mkdir compress
|
||||
@ -251,6 +274,11 @@ MakeRelease() {
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-freebsd-*"); do
|
||||
cp "$i" alist
|
||||
tar -czvf compress/"$i".tar.gz alist
|
||||
rm -f alist
|
||||
done
|
||||
for i in $(find . -type f -name "$appName-windows-*"); do
|
||||
cp "$i" alist.exe
|
||||
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
|
||||
@ -288,6 +316,9 @@ elif [ "$1" = "release" ]; then
|
||||
elif [ "$2" = "android" ]; then
|
||||
BuildReleaseAndroid
|
||||
MakeRelease "md5-android.txt"
|
||||
elif [ "$2" = "freebsd" ]; then
|
||||
BuildReleaseFreeBSD
|
||||
MakeRelease "md5-freebsd.txt"
|
||||
elif [ "$2" = "web" ]; then
|
||||
echo "web only"
|
||||
else
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
func Init() {
|
||||
bootstrap.InitConfig()
|
||||
bootstrap.Log()
|
||||
bootstrap.InitHostKey()
|
||||
bootstrap.InitDB()
|
||||
data.InitData()
|
||||
bootstrap.InitIndex()
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/KirCute/sftpd-alist"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -112,6 +114,42 @@ the address is defined in config file`,
|
||||
}
|
||||
}()
|
||||
}
|
||||
var ftpDriver *server.FtpMainDriver
|
||||
var ftpServer *ftpserver.FtpServer
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
||||
var err error
|
||||
ftpDriver, err = server.NewMainDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
||||
} else {
|
||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
||||
go func() {
|
||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
||||
err = ftpServer.ListenAndServe()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
var sftpDriver *server.SftpDriver
|
||||
var sftpServer *sftpd.SftpServer
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
||||
var err error
|
||||
sftpDriver, err = server.NewSftpDriver()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
||||
} else {
|
||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
||||
go func() {
|
||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
||||
err = sftpServer.RunServer()
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
// Wait for interrupt signal to gracefully shutdown the server with
|
||||
// a timeout of 1 second.
|
||||
quit := make(chan os.Signal, 1)
|
||||
@ -152,6 +190,25 @@ the address is defined in config file`,
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ftpDriver.Stop()
|
||||
if err := ftpServer.Stop(); err != nil {
|
||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := sftpServer.Close(); err != nil {
|
||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
utils.Log.Println("Server exit")
|
||||
},
|
||||
|
43
drivers/115/appver.go
Normal file
43
drivers/115/appver.go
Normal file
@ -0,0 +1,43 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
md5Salt = "Qclm8MGWUv59TnrR0XPg"
|
||||
appVer = "27.0.5.7"
|
||||
)
|
||||
|
||||
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
|
||||
result := driver115.VersionResp{}
|
||||
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
||||
|
||||
err = driver115.CheckErr(err, &result, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result.Data.GetAppVersions(), nil
|
||||
}
|
||||
|
||||
func (d *Pan115) getAppVer() string {
|
||||
// todo add some cache?
|
||||
vers, err := d.getAppVersion()
|
||||
if err != nil {
|
||||
log.Warnf("[115] get app version failed: %v", err)
|
||||
return appVer
|
||||
}
|
||||
for _, ver := range vers {
|
||||
if ver.AppName == "win" {
|
||||
return ver.Version
|
||||
}
|
||||
}
|
||||
return appVer
|
||||
}
|
||||
|
||||
func (d *Pan115) initAppVer() {
|
||||
appVer = d.getAppVer()
|
||||
}
|
@ -3,6 +3,7 @@ package _115
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -18,6 +19,7 @@ type Pan115 struct {
|
||||
Addition
|
||||
client *driver115.Pan115Client
|
||||
limiter *rate.Limiter
|
||||
appVerOnce sync.Once
|
||||
}
|
||||
|
||||
func (d *Pan115) Config() driver.Config {
|
||||
@ -29,6 +31,7 @@ func (d *Pan115) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Pan115) Init(ctx context.Context) error {
|
||||
d.appVerOnce.Do(d.initAppVer)
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
@ -76,28 +79,60 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
return link, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
|
||||
return err
|
||||
|
||||
result := driver115.MkdirResp{}
|
||||
form := map[string]string{
|
||||
"pid": parentDir.GetID(),
|
||||
"cname": dirName,
|
||||
}
|
||||
return nil
|
||||
req := d.client.NewRequest().
|
||||
SetFormData(form).
|
||||
SetResult(&result).
|
||||
ForceContentType("application/json;charset=UTF-8")
|
||||
|
||||
resp, err := req.Post(driver115.ApiDirAdd)
|
||||
|
||||
err = driver115.CheckErr(err, &result, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := d.getNewFile(result.FileID)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return d.client.Move(dstDir.GetID(), srcObj.GetID())
|
||||
if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := d.getNewFile(srcObj.GetID())
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return d.client.Rename(srcObj.GetID(), newName)
|
||||
if err := d.client.Rename(srcObj.GetID(), newName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := d.getNewFile((srcObj.GetID()))
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -114,9 +149,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return d.client.Delete(obj.GetID())
|
||||
}
|
||||
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
@ -125,10 +160,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
)
|
||||
|
||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||
return driver115.ErrUploadTooLarge
|
||||
return nil, driver115.ErrUploadTooLarge
|
||||
}
|
||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||
// return err
|
||||
@ -141,22 +176,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) <= 0 {
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fullHash = strings.ToUpper(fullHash)
|
||||
@ -165,20 +200,36 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// note that 115 add timeout for rapid-upload,
|
||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if matched, err := fastInfo.Ok(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
} else if matched {
|
||||
return nil
|
||||
f, err := d.getNewFileByPickCode(fastInfo.PickCode)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
var uploadResult *UploadResult
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
||||
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
file, err := d.getNewFile(uploadResult.Data.FileID)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) {
|
||||
|
@ -9,8 +9,8 @@ type Addition struct {
|
||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
||||
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
|
||||
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
||||
driver.RootID
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,11 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ model.Obj = (*FileObj)(nil)
|
||||
@ -20,3 +21,18 @@ func (f *FileObj) CreateTime() time.Time {
|
||||
func (f *FileObj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||
}
|
||||
|
||||
type UploadResult struct {
|
||||
driver.BasicResp
|
||||
Data struct {
|
||||
PickCode string `json:"pick_code"`
|
||||
FileSize int `json:"file_size"`
|
||||
FileID string `json:"file_id"`
|
||||
ThumbURL string `json:"thumb_url"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Aid int `json:"aid"`
|
||||
FileName string `json:"file_name"`
|
||||
Cid string `json:"cid"`
|
||||
IsVideo int `json:"is_video"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
@ -2,13 +2,14 @@ package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -26,12 +27,11 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var UserAgent = driver115.UA115Browser
|
||||
|
||||
// var UserAgent = driver115.UA115Browser
|
||||
func (d *Pan115) login() error {
|
||||
var err error
|
||||
opts := []driver115.Option{
|
||||
driver115.UA(UserAgent),
|
||||
driver115.UA(d.getUA()),
|
||||
func(c *driver115.Pan115Client) {
|
||||
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
},
|
||||
@ -45,7 +45,7 @@ func (d *Pan115) login() error {
|
||||
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
|
||||
return errors.Wrap(err, "failed to login by qrcode")
|
||||
}
|
||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
|
||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID)
|
||||
d.QRCodeToken = ""
|
||||
} else if d.Cookie != "" {
|
||||
if err = cr.FromCookie(d.Cookie); err != nil {
|
||||
@ -73,11 +73,39 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
appVer = "2.0.3.6"
|
||||
)
|
||||
func (d *Pan115) getNewFile(fileId string) (*FileObj, error) {
|
||||
file, err := d.client.GetFile(fileId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileObj{*file}, nil
|
||||
}
|
||||
|
||||
func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
|
||||
func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) {
|
||||
result := driver115.GetFileInfoResponse{}
|
||||
req := d.client.NewRequest().
|
||||
SetQueryParam("pick_code", pickCode).
|
||||
ForceContentType("application/json;charset=UTF-8").
|
||||
SetResult(&result)
|
||||
resp, err := req.Get(driver115.ApiFileInfo)
|
||||
if err := driver115.CheckErr(err, &result, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(result.Files) == 0 {
|
||||
return nil, errors.New("not get file info")
|
||||
}
|
||||
fileInfo := result.Files[0]
|
||||
|
||||
f := &FileObj{}
|
||||
f.From(fileInfo)
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (d *Pan115) getUA() string {
|
||||
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
|
||||
}
|
||||
|
||||
func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
|
||||
key := crypto.GenerateKey()
|
||||
result := driver115.DownloadResp{}
|
||||
params, err := utils.Json.Marshal(map[string]string{"pickcode": pickCode})
|
||||
@ -91,10 +119,10 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
reqUrl := fmt.Sprintf("%s?t=%s", driver115.ApiDownloadGetUrl, driver115.Now().String())
|
||||
req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader)
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
req.Header.Set("Cookie", c.Cookie)
|
||||
req.Header.Set("Cookie", d.Cookie)
|
||||
req.Header.Set("User-Agent", ua)
|
||||
|
||||
resp, err := c.client.Client.GetClient().Do(req)
|
||||
resp, err := d.client.Client.GetClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -132,6 +160,13 @@ func (c *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, e
|
||||
return nil, driver115.ErrUnexpected
|
||||
}
|
||||
|
||||
func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string {
|
||||
userID := strconv.FormatInt(c.client.UserID, 10)
|
||||
userIDMd5 := md5.Sum([]byte(userID))
|
||||
tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer))
|
||||
return hex.EncodeToString(tokenMd5[:])
|
||||
}
|
||||
|
||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||
var (
|
||||
ecdhCipher *cipher.EcdhCipher
|
||||
@ -161,7 +196,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
|
||||
|
||||
signKey, signVal := "", ""
|
||||
for retry := true; retry; {
|
||||
t := driver115.Now()
|
||||
t := driver115.NowMilli()
|
||||
|
||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||
return nil, err
|
||||
@ -172,7 +207,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
|
||||
}
|
||||
|
||||
form.Set("t", t.String())
|
||||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
if signKey != "" && signVal != "" {
|
||||
form.Set("sign_key", signKey)
|
||||
form.Set("sign_val", signVal)
|
||||
@ -225,6 +260,9 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
||||
|
||||
length := end - start + 1
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -233,8 +271,38 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
||||
return
|
||||
}
|
||||
|
||||
// UploadByOSS use aliyun sdk to upload
|
||||
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
|
||||
ossToken, err := c.client.GetOSSToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket, err := ossClient.Bucket(params.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bodyBytes []byte
|
||||
if err = bucket.PutObject(params.Object, r, append(
|
||||
driver115.OssOption(params, ossToken),
|
||||
oss.CallbackResult(&bodyBytes),
|
||||
)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var uploadResult UploadResult
|
||||
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &uploadResult, uploadResult.Err(string(bodyBytes))
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
@ -242,12 +310,13 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
ossToken *driver115.UploadOSSTokenResp
|
||||
bodyBytes []byte
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
options := driver115.DefalutUploadMultipartOptions()
|
||||
@ -256,17 +325,19 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
f(options)
|
||||
}
|
||||
}
|
||||
// oss 启用Sequential必须按顺序上传
|
||||
options.ThreadsNum = 1
|
||||
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||
return err
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||
@ -276,14 +347,15 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
timeout := time.NewTimer(options.Timeout)
|
||||
|
||||
if chunks, err = SplitFile(fileSize); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||
oss.EnableSha1(), oss.Sequential(),
|
||||
); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
@ -325,8 +397,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -350,25 +421,31 @@ LOOP:
|
||||
case <-ticker.C:
|
||||
// 到时重新获取ossToken
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
case <-quit:
|
||||
break LOOP
|
||||
case <-errCh:
|
||||
return err
|
||||
return nil, err
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("time out")
|
||||
return nil, fmt.Errorf("time out")
|
||||
}
|
||||
}
|
||||
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
// 不知道啥原因,oss那边分片上传不计算sha1,导致115服务器校验错误
|
||||
// params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1)
|
||||
if _, err := bucket.CompleteMultipartUpload(imur, parts, append(
|
||||
driver115.OssOption(params, ossToken),
|
||||
oss.CallbackResult(&bodyBytes),
|
||||
)...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var uploadResult UploadResult
|
||||
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.checkUploadStatus(dirID, params.SHA1)
|
||||
return &uploadResult, uploadResult.Err(string(bodyBytes))
|
||||
}
|
||||
|
||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
@ -377,27 +454,6 @@ func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||
// 验证上传是否成功
|
||||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||
opts := []driver115.GetFileOptions{
|
||||
driver115.WithOrder(driver115.FileOrderByTime),
|
||||
driver115.WithShowDirEnable(false),
|
||||
driver115.WithAsc(false),
|
||||
driver115.WithLimit(500),
|
||||
}
|
||||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fileInfo := range fResp.Files {
|
||||
if fileInfo.Sha1 == sha1 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return driver115.ErrUploadFailed
|
||||
}
|
||||
|
||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||
for i := int64(1); i < 10; i++ {
|
||||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||
|
@ -9,7 +9,7 @@ type Addition struct {
|
||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
|
||||
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
||||
ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"`
|
||||
ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"`
|
||||
|
@ -96,7 +96,7 @@ func (d *Pan115Share) login() error {
|
||||
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
|
||||
return errors.Wrap(err, "failed to login by qrcode")
|
||||
}
|
||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
|
||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID)
|
||||
d.QRCodeToken = ""
|
||||
} else if d.Cookie != "" {
|
||||
if err = cr.FromCookie(d.Cookie); err != nil {
|
||||
|
@ -82,6 +82,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
"type": f.Type,
|
||||
}
|
||||
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
|
||||
req.SetBody(data).SetHeaders(headers)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
|
@ -26,8 +26,9 @@ const (
|
||||
Api = "https://www.123pan.com/api"
|
||||
AApi = "https://www.123pan.com/a/api"
|
||||
BApi = "https://www.123pan.com/b/api"
|
||||
LoginApi = "https://login.123pan.com/api"
|
||||
MainApi = BApi
|
||||
SignIn = MainApi + "/user/sign_in"
|
||||
SignIn = LoginApi + "/user/sign_in"
|
||||
Logout = MainApi + "/user/logout"
|
||||
UserInfo = MainApi + "/user/info"
|
||||
FileList = MainApi + "/file/list/new"
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -14,8 +15,9 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -56,6 +58,11 @@ func (d *Yun139) Init(ctx context.Context) error {
|
||||
d.RootFolderID = "root"
|
||||
}
|
||||
fallthrough
|
||||
case MetaGroup:
|
||||
if len(d.Addition.RootFolderID) == 0 {
|
||||
d.RootFolderID = d.CloudID
|
||||
}
|
||||
fallthrough
|
||||
case MetaFamily:
|
||||
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
|
||||
if err != nil {
|
||||
@ -96,6 +103,8 @@ func (d *Yun139) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
return d.getFiles(dir.GetID())
|
||||
case MetaFamily:
|
||||
return d.familyGetFiles(dir.GetID())
|
||||
case MetaGroup:
|
||||
return d.groupGetFiles(dir.GetID())
|
||||
default:
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
@ -108,9 +117,11 @@ func (d *Yun139) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
case MetaPersonalNew:
|
||||
url, err = d.personalGetLink(file.GetID())
|
||||
case MetaPersonal:
|
||||
fallthrough
|
||||
case MetaFamily:
|
||||
url, err = d.getLink(file.GetID())
|
||||
case MetaFamily:
|
||||
url, err = d.familyGetLink(file.GetID(), file.GetPath())
|
||||
case MetaGroup:
|
||||
url, err = d.groupGetLink(file.GetID(), file.GetPath())
|
||||
default:
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
@ -154,8 +165,22 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
"accountType": 1,
|
||||
},
|
||||
"docLibName": dirName,
|
||||
"path": path.Join(parentDir.GetPath(), parentDir.GetID()),
|
||||
}
|
||||
pathname := "/orchestration/familyCloud/cloudCatalog/v1.0/createCloudDoc"
|
||||
pathname := "/orchestration/familyCloud-rebuild/cloudCatalog/v1.0/createCloudDoc"
|
||||
_, err = d.post(pathname, data, nil)
|
||||
case MetaGroup:
|
||||
data := base.Json{
|
||||
"catalogName": dirName,
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
"groupID": d.CloudID,
|
||||
"parentFileId": parentDir.GetID(),
|
||||
"path": path.Join(parentDir.GetPath(), parentDir.GetID()),
|
||||
}
|
||||
pathname := "/orchestration/group-rebuild/catalog/v1.0/createGroupCatalog"
|
||||
_, err = d.post(pathname, data, nil)
|
||||
default:
|
||||
err = errs.NotImplement
|
||||
@ -176,6 +201,34 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
case MetaGroup:
|
||||
var contentList []string
|
||||
var catalogList []string
|
||||
if srcObj.IsDir() {
|
||||
catalogList = append(catalogList, srcObj.GetID())
|
||||
} else {
|
||||
contentList = append(contentList, srcObj.GetID())
|
||||
}
|
||||
data := base.Json{
|
||||
"taskType": 3,
|
||||
"srcType": 2,
|
||||
"srcGroupID": d.CloudID,
|
||||
"destType": 2,
|
||||
"destGroupID": d.CloudID,
|
||||
"destPath": dstDir.GetPath(),
|
||||
"contentList": contentList,
|
||||
"catalogList": catalogList,
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
}
|
||||
pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask"
|
||||
_, err := d.post(pathname, data, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
case MetaPersonal:
|
||||
var contentInfoList []string
|
||||
var catalogInfoList []string
|
||||
@ -246,6 +299,65 @@ func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) e
|
||||
pathname = "/orchestration/personalCloud/content/v1.0/updateContentInfo"
|
||||
}
|
||||
_, err = d.post(pathname, data, nil)
|
||||
case MetaGroup:
|
||||
var data base.Json
|
||||
var pathname string
|
||||
if srcObj.IsDir() {
|
||||
data = base.Json{
|
||||
"groupID": d.CloudID,
|
||||
"modifyCatalogID": srcObj.GetID(),
|
||||
"modifyCatalogName": newName,
|
||||
"path": srcObj.GetPath(),
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
}
|
||||
pathname = "/orchestration/group-rebuild/catalog/v1.0/modifyGroupCatalog"
|
||||
} else {
|
||||
data = base.Json{
|
||||
"groupID": d.CloudID,
|
||||
"contentID": srcObj.GetID(),
|
||||
"contentName": newName,
|
||||
"path": srcObj.GetPath(),
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
}
|
||||
pathname = "/orchestration/group-rebuild/content/v1.0/modifyGroupContent"
|
||||
}
|
||||
_, err = d.post(pathname, data, nil)
|
||||
case MetaFamily:
|
||||
var data base.Json
|
||||
var pathname string
|
||||
if srcObj.IsDir() {
|
||||
// 网页接口不支持重命名家庭云文件夹
|
||||
// data = base.Json{
|
||||
// "catalogType": 3,
|
||||
// "catalogID": srcObj.GetID(),
|
||||
// "catalogName": newName,
|
||||
// "commonAccountInfo": base.Json{
|
||||
// "account": d.Account,
|
||||
// "accountType": 1,
|
||||
// },
|
||||
// "path": srcObj.GetPath(),
|
||||
// }
|
||||
// pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyCatalogInfo"
|
||||
return errs.NotImplement
|
||||
} else {
|
||||
data = base.Json{
|
||||
"contentID": srcObj.GetID(),
|
||||
"contentName": newName,
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
"path": srcObj.GetPath(),
|
||||
}
|
||||
pathname = "/orchestration/familyCloud-rebuild/photoContent/v1.0/modifyContentInfo"
|
||||
}
|
||||
_, err = d.post(pathname, data, nil)
|
||||
default:
|
||||
err = errs.NotImplement
|
||||
}
|
||||
@ -303,6 +415,28 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
|
||||
pathname := "/hcy/recyclebin/batchTrash"
|
||||
_, err := d.personalPost(pathname, data, nil)
|
||||
return err
|
||||
case MetaGroup:
|
||||
var contentList []string
|
||||
var catalogList []string
|
||||
// 必须使用完整路径删除
|
||||
if obj.IsDir() {
|
||||
catalogList = append(catalogList, obj.GetPath())
|
||||
} else {
|
||||
contentList = append(contentList, path.Join(obj.GetPath(), obj.GetID()))
|
||||
}
|
||||
data := base.Json{
|
||||
"taskType": 2,
|
||||
"srcGroupID": d.CloudID,
|
||||
"contentList": contentList,
|
||||
"catalogList": catalogList,
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
}
|
||||
pathname := "/orchestration/group-rebuild/task/v1.0/createBatchOprTask"
|
||||
_, err := d.post(pathname, data, nil)
|
||||
return err
|
||||
case MetaPersonal:
|
||||
fallthrough
|
||||
case MetaFamily:
|
||||
@ -337,10 +471,12 @@ func (d *Yun139) Remove(ctx context.Context, obj model.Obj) error {
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
"sourceCloudID": d.CloudID,
|
||||
"sourceCatalogType": 1002,
|
||||
"taskType": 2,
|
||||
"path": obj.GetPath(),
|
||||
}
|
||||
pathname = "/orchestration/familyCloud/batchOprTask/v1.0/createBatchOprTask"
|
||||
pathname = "/orchestration/familyCloud-rebuild/batchOprTask/v1.0/createBatchOprTask"
|
||||
}
|
||||
_, err := d.post(pathname, data, nil)
|
||||
return err
|
||||
@ -357,7 +493,10 @@ const (
|
||||
TB
|
||||
)
|
||||
|
||||
func getPartSize(size int64) int64 {
|
||||
func (d *Yun139) getPartSize(size int64) int64 {
|
||||
if d.CustomUploadPartSize != 0 {
|
||||
return d.CustomUploadPartSize
|
||||
}
|
||||
// 网盘对于分片数量存在上限
|
||||
if size/GB > 30 {
|
||||
return 512 * MB
|
||||
@ -380,19 +519,46 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return err
|
||||
}
|
||||
}
|
||||
// return errs.NotImplement
|
||||
|
||||
partInfos := []PartInfo{}
|
||||
var partSize = d.getPartSize(stream.GetSize())
|
||||
part := (stream.GetSize() + partSize - 1) / partSize
|
||||
if part == 0 {
|
||||
part = 1
|
||||
}
|
||||
for i := int64(0); i < part; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
start := i * partSize
|
||||
byteSize := stream.GetSize() - start
|
||||
if byteSize > partSize {
|
||||
byteSize = partSize
|
||||
}
|
||||
partNumber := i + 1
|
||||
partInfo := PartInfo{
|
||||
PartNumber: partNumber,
|
||||
PartSize: byteSize,
|
||||
ParallelHashCtx: ParallelHashCtx{
|
||||
PartOffset: start,
|
||||
},
|
||||
}
|
||||
partInfos = append(partInfos, partInfo)
|
||||
}
|
||||
|
||||
// 筛选出前 100 个 partInfos
|
||||
firstPartInfos := partInfos
|
||||
if len(firstPartInfos) > 100 {
|
||||
firstPartInfos = firstPartInfos[:100]
|
||||
}
|
||||
|
||||
// 获取上传信息和前100个分片的上传地址
|
||||
data := base.Json{
|
||||
"contentHash": fullHash,
|
||||
"contentHashAlgorithm": "SHA256",
|
||||
"contentType": "application/octet-stream",
|
||||
"parallelUpload": false,
|
||||
"partInfos": []base.Json{{
|
||||
"parallelHashCtx": base.Json{
|
||||
"partOffset": 0,
|
||||
},
|
||||
"partNumber": 1,
|
||||
"partSize": stream.GetSize(),
|
||||
}},
|
||||
"partInfos": firstPartInfos,
|
||||
"size": stream.GetSize(),
|
||||
"parentFileId": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
@ -410,33 +576,68 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return nil
|
||||
}
|
||||
|
||||
uploadPartInfos := resp.Data.PartInfos
|
||||
|
||||
// 获取后续分片的上传地址
|
||||
for i := 101; i < len(partInfos); i += 100 {
|
||||
end := i + 100
|
||||
if end > len(partInfos) {
|
||||
end = len(partInfos)
|
||||
}
|
||||
batchPartInfos := partInfos[i:end]
|
||||
|
||||
moredata := base.Json{
|
||||
"fileId": resp.Data.FileId,
|
||||
"uploadId": resp.Data.UploadId,
|
||||
"partInfos": batchPartInfos,
|
||||
"commonAccountInfo": base.Json{
|
||||
"account": d.Account,
|
||||
"accountType": 1,
|
||||
},
|
||||
}
|
||||
pathname := "/hcy/file/getUploadUrl"
|
||||
var moreresp PersonalUploadUrlResp
|
||||
_, err = d.personalPost(pathname, moredata, &moreresp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadPartInfos = append(uploadPartInfos, moreresp.Data.PartInfos...)
|
||||
}
|
||||
|
||||
// Progress
|
||||
p := driver.NewProgress(stream.GetSize(), up)
|
||||
|
||||
// Update Progress
|
||||
r := io.TeeReader(stream, p)
|
||||
// 上传所有分片
|
||||
for _, uploadPartInfo := range uploadPartInfos {
|
||||
index := uploadPartInfo.PartNumber - 1
|
||||
partSize := partInfos[index].PartSize
|
||||
log.Debugf("[139] uploading part %+v/%+v", index, len(uploadPartInfos))
|
||||
limitReader := io.LimitReader(stream, partSize)
|
||||
|
||||
req, err := http.NewRequest("PUT", resp.Data.PartInfos[0].UploadUrl, r)
|
||||
// Update Progress
|
||||
r := io.TeeReader(limitReader, p)
|
||||
|
||||
req, err := http.NewRequest("PUT", uploadPartInfo.UploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
req.Header.Set("Content-Length", fmt.Sprint(stream.GetSize()))
|
||||
req.Header.Set("Content-Length", fmt.Sprint(partSize))
|
||||
req.Header.Set("Origin", "https://yun.139.com")
|
||||
req.Header.Set("Referer", "https://yun.139.com/")
|
||||
req.ContentLength = stream.GetSize()
|
||||
req.ContentLength = partSize
|
||||
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = res.Body.Close()
|
||||
log.Debugf("%+v", res)
|
||||
log.Debugf("[139] uploaded: %+v", res)
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
data = base.Json{
|
||||
"contentHash": fullHash,
|
||||
@ -471,21 +672,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
pathname := "/orchestration/personalCloud/uploadAndDownload/v1.0/pcUploadFileRequest"
|
||||
if d.isFamily() {
|
||||
// data = d.newJson(base.Json{
|
||||
// "fileCount": 1,
|
||||
// "manualRename": 2,
|
||||
// "operation": 0,
|
||||
// "path": "",
|
||||
// "seqNo": "",
|
||||
// "totalSize": 0,
|
||||
// "uploadContentList": []base.Json{{
|
||||
// "contentName": stream.GetName(),
|
||||
// "contentSize": 0,
|
||||
// // "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||
// }},
|
||||
// })
|
||||
// pathname = "/orchestration/familyCloud/content/v1.0/getFileUploadURL"
|
||||
return errs.NotImplement
|
||||
data = d.newJson(base.Json{
|
||||
"fileCount": 1,
|
||||
"manualRename": 2,
|
||||
"operation": 0,
|
||||
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
||||
"seqNo": random.String(32), //序列号不能为空
|
||||
"totalSize": 0,
|
||||
"uploadContentList": []base.Json{{
|
||||
"contentName": stream.GetName(),
|
||||
"contentSize": 0,
|
||||
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||
}},
|
||||
})
|
||||
pathname = "/orchestration/familyCloud-rebuild/content/v1.0/getFileUploadURL"
|
||||
}
|
||||
var resp UploadResp
|
||||
_, err := d.post(pathname, data, &resp)
|
||||
@ -496,7 +696,7 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// Progress
|
||||
p := driver.NewProgress(stream.GetSize(), up)
|
||||
|
||||
var partSize = getPartSize(stream.GetSize())
|
||||
var partSize = d.getPartSize(stream.GetSize())
|
||||
part := (stream.GetSize() + partSize - 1) / partSize
|
||||
if part == 0 {
|
||||
part = 1
|
||||
|
@ -11,6 +11,7 @@ type Addition struct {
|
||||
driver.RootID
|
||||
Type string `json:"type" type:"select" options:"personal,family,personal_new" default:"personal"`
|
||||
CloudID string `json:"cloud_id"`
|
||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
const (
|
||||
MetaPersonal string = "personal"
|
||||
MetaFamily string = "family"
|
||||
MetaGroup string = "group"
|
||||
MetaPersonalNew string = "personal_new"
|
||||
)
|
||||
|
||||
@ -54,6 +55,7 @@ type Content struct {
|
||||
//ContentDesc string `json:"contentDesc"`
|
||||
//ContentType int `json:"contentType"`
|
||||
//ContentOrigin int `json:"contentOrigin"`
|
||||
CreateTime string `json:"createTime"`
|
||||
UpdateTime string `json:"updateTime"`
|
||||
//CommentCount int `json:"commentCount"`
|
||||
ThumbnailURL string `json:"thumbnailURL"`
|
||||
@ -196,6 +198,37 @@ type QueryContentListResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type QueryGroupContentListResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
Result struct {
|
||||
ResultCode string `json:"resultCode"`
|
||||
ResultDesc string `json:"resultDesc"`
|
||||
} `json:"result"`
|
||||
GetGroupContentResult struct {
|
||||
ParentCatalogID string `json:"parentCatalogID"` // 根目录是"0"
|
||||
CatalogList []struct {
|
||||
Catalog
|
||||
Path string `json:"path"`
|
||||
} `json:"catalogList"`
|
||||
ContentList []Content `json:"contentList"`
|
||||
NodeCount int `json:"nodeCount"` // 文件+文件夹数量
|
||||
CtlgCnt int `json:"ctlgCnt"` // 文件夹数量
|
||||
ContCnt int `json:"contCnt"` // 文件数量
|
||||
} `json:"getGroupContentResult"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ParallelHashCtx struct {
|
||||
PartOffset int64 `json:"partOffset"`
|
||||
}
|
||||
|
||||
type PartInfo struct {
|
||||
PartNumber int64 `json:"partNumber"`
|
||||
PartSize int64 `json:"partSize"`
|
||||
ParallelHashCtx ParallelHashCtx `json:"parallelHashCtx"`
|
||||
}
|
||||
|
||||
type PersonalThumbnail struct {
|
||||
Style string `json:"style"`
|
||||
Url string `json:"url"`
|
||||
@ -235,6 +268,15 @@ type PersonalUploadResp struct {
|
||||
}
|
||||
}
|
||||
|
||||
type PersonalUploadUrlResp struct {
|
||||
BaseResp
|
||||
Data struct {
|
||||
FileId string `json:"fileId"`
|
||||
UploadId string `json:"uploadId"`
|
||||
PartInfos []PersonalPartInfo `json:"partInfos"`
|
||||
}
|
||||
}
|
||||
|
||||
type RefreshTokenResp struct {
|
||||
XMLName xml.Name `xml:"root"`
|
||||
Return string `xml:"return"`
|
||||
|
@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -220,10 +220,11 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
"sortDirection": 1,
|
||||
})
|
||||
var resp QueryContentListResp
|
||||
_, err := d.post("/orchestration/familyCloud/content/v1.0/queryContentList", data, &resp)
|
||||
_, err := d.post("/orchestration/familyCloud-rebuild/content/v1.2/queryContentList", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path := resp.Data.Path
|
||||
for _, catalog := range resp.Data.CloudCatalogList {
|
||||
f := model.Object{
|
||||
ID: catalog.CatalogID,
|
||||
@ -232,6 +233,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
IsFolder: true,
|
||||
Modified: getTime(catalog.LastUpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
Path: path, // 文件夹上一级的Path
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@ -243,6 +245,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.LastUpdateTime),
|
||||
Ctime: getTime(content.CreateTime),
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
@ -257,6 +260,61 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
pageNum := 1
|
||||
files := make([]model.Obj, 0)
|
||||
for {
|
||||
data := d.newJson(base.Json{
|
||||
"groupID": d.CloudID,
|
||||
"catalogID": catalogID,
|
||||
"contentSortType": 0,
|
||||
"sortDirection": 1,
|
||||
"startNumber": pageNum,
|
||||
"endNumber": pageNum + 99,
|
||||
"path": catalogID,
|
||||
})
|
||||
|
||||
var resp QueryGroupContentListResp
|
||||
_, err := d.post("/orchestration/group-rebuild/content/v1.0/queryGroupContentList", data, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path := resp.Data.GetGroupContentResult.ParentCatalogID
|
||||
for _, catalog := range resp.Data.GetGroupContentResult.CatalogList {
|
||||
f := model.Object{
|
||||
ID: catalog.CatalogID,
|
||||
Name: catalog.CatalogName,
|
||||
Size: 0,
|
||||
IsFolder: true,
|
||||
Modified: getTime(catalog.UpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
Path: catalog.Path, // 文件夹的真实Path, root:/开头
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
for _, content := range resp.Data.GetGroupContentResult.ContentList {
|
||||
f := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: content.ContentID,
|
||||
Name: content.ContentName,
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.UpdateTime),
|
||||
Ctime: getTime(content.CreateTime),
|
||||
Path: path, // 文件所在目录的Path
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
if pageNum > resp.Data.GetGroupContentResult.NodeCount {
|
||||
break
|
||||
}
|
||||
pageNum = pageNum + 100
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) getLink(contentId string) (string, error) {
|
||||
data := base.Json{
|
||||
"appName": "",
|
||||
@ -273,6 +331,32 @@ func (d *Yun139) getLink(contentId string) (string, error) {
|
||||
}
|
||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||
}
|
||||
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
||||
data := d.newJson(base.Json{
|
||||
"contentID": contentId,
|
||||
"path": path,
|
||||
})
|
||||
res, err := d.post("/orchestration/familyCloud-rebuild/content/v1.0/getFileDownLoadURL",
|
||||
data, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *Yun139) groupGetLink(contentId string, path string) (string, error) {
|
||||
data := d.newJson(base.Json{
|
||||
"contentID": contentId,
|
||||
"groupID": d.CloudID,
|
||||
"path": path,
|
||||
})
|
||||
res, err := d.post("/orchestration/group-rebuild/groupManage/v1.0/getGroupFileDownLoadURL",
|
||||
data, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||
}
|
||||
|
||||
func unicode(str string) string {
|
||||
textQuoted := strconv.QuoteToASCII(str)
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
|
||||
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"`
|
||||
driver.RootID
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type TokenErrResp struct {
|
||||
@ -55,11 +56,11 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
if f.ServerFilename == "" {
|
||||
f.ServerFilename = path.Base(f.Path)
|
||||
}
|
||||
if f.LocalCtime == 0 {
|
||||
f.LocalCtime = f.Ctime
|
||||
if f.ServerCtime == 0 {
|
||||
f.ServerCtime = f.Ctime
|
||||
}
|
||||
if f.LocalMtime == 0 {
|
||||
f.LocalMtime = f.Mtime
|
||||
if f.ServerMtime == 0 {
|
||||
f.ServerMtime = f.Mtime
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
@ -67,12 +68,12 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Path: f.Path,
|
||||
Name: f.ServerFilename,
|
||||
Size: f.Size,
|
||||
Modified: time.Unix(f.LocalMtime, 0),
|
||||
Ctime: time.Unix(f.LocalCtime, 0),
|
||||
Modified: time.Unix(f.ServerMtime, 0),
|
||||
Ctime: time.Unix(f.ServerCtime, 0),
|
||||
IsFolder: f.Isdir == 1,
|
||||
|
||||
// 直接获取的MD5是错误的
|
||||
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||
}
|
||||
|
@ -1,11 +1,14 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -153,8 +156,6 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
|
||||
u = res.Header().Get("location")
|
||||
//}
|
||||
|
||||
updateObjMd5(file, "pan.baidu.com", u)
|
||||
|
||||
return &model.Link{
|
||||
URL: u,
|
||||
Header: http.Header{
|
||||
@ -178,8 +179,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
|
||||
|
||||
return &model.Link{
|
||||
URL: resp.Info[0].Dlink,
|
||||
Header: http.Header{
|
||||
@ -229,19 +228,6 @@ func joinTime(form map[string]string, ctime, mtime int64) {
|
||||
form["local_ctime"] = strconv.FormatInt(ctime, 10)
|
||||
}
|
||||
|
||||
func updateObjMd5(obj model.Obj, userAgent, u string) {
|
||||
object := model.GetRawObject(obj)
|
||||
if object != nil {
|
||||
req, _ := http.NewRequest(http.MethodHead, u, nil)
|
||||
req.Header.Add("User-Agent", userAgent)
|
||||
resp, _ := base.HttpClient.Do(req)
|
||||
if resp != nil {
|
||||
contentMd5 := resp.Header.Get("Content-Md5")
|
||||
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultSliceSize int64 = 4 * utils.MB
|
||||
VipSliceSize = 16 * utils.MB
|
||||
@ -267,3 +253,40 @@ func (d *BaiduNetdisk) getSliceSize() int64 {
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
// return r
|
||||
// }
|
||||
|
||||
func DecryptMd5(encryptMd5 string) string {
|
||||
if _, err := hex.DecodeString(encryptMd5); err == nil {
|
||||
return encryptMd5
|
||||
}
|
||||
|
||||
var out strings.Builder
|
||||
out.Grow(len(encryptMd5))
|
||||
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
|
||||
if i == 9 {
|
||||
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
|
||||
} else {
|
||||
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
|
||||
}
|
||||
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
|
||||
}
|
||||
|
||||
encryptMd5 = out.String()
|
||||
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
|
||||
}
|
||||
|
||||
func EncryptMd5(originalMd5 string) string {
|
||||
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
|
||||
|
||||
var out strings.Builder
|
||||
out.Grow(len(reversed))
|
||||
for i, n := 0, int64(0); i < len(reversed); i++ {
|
||||
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
|
||||
n ^= int64(15 & i)
|
||||
if i == 9 {
|
||||
out.WriteRune(rune(n) + 'g')
|
||||
} else {
|
||||
out.WriteString(strconv.FormatInt(n, 16))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ type BaiduPhoto struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
AccessToken string
|
||||
// AccessToken string
|
||||
Uk int64
|
||||
root model.Obj
|
||||
|
||||
@ -48,9 +48,9 @@ func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if err := d.refreshToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
// if err := d.refreshToken(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// root
|
||||
if d.AlbumID != "" {
|
||||
@ -82,7 +82,7 @@ func (d *BaiduPhoto) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Drop(ctx context.Context) error {
|
||||
d.AccessToken = ""
|
||||
// d.AccessToken = ""
|
||||
d.Uk = 0
|
||||
d.root = nil
|
||||
return nil
|
||||
@ -140,14 +140,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
||||
// 处理共享相册
|
||||
if d.Uk != file.Uk {
|
||||
// 有概率无法获取到链接
|
||||
return d.linkAlbum(ctx, file, args)
|
||||
// return d.linkAlbum(ctx, file, args)
|
||||
|
||||
// 接口被限制,只能使用cookie
|
||||
// f, err := d.CopyAlbumFile(ctx, file)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return d.linkFile(ctx, f, args)
|
||||
f, err := d.CopyAlbumFile(ctx, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.linkFile(ctx, f, args)
|
||||
}
|
||||
return d.linkFile(ctx, &file.File, args)
|
||||
}
|
||||
@ -292,7 +291,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
}
|
||||
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, strconv.FormatInt(d.Uk, 10), contentMd5)
|
||||
if !ok {
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
@ -343,7 +342,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
base.SaveUploadProgress(d, strconv.FormatInt(d.Uk, 10), contentMd5)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
@ -6,13 +6,14 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
// RefreshToken string `json:"refresh_token" required:"true"`
|
||||
Cookie string `json:"cookie" required:"true"`
|
||||
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
||||
AlbumID string `json:"album_id"`
|
||||
//AlbumPassword string `json:"album_password"`
|
||||
DeleteOrigin bool `json:"delete_origin"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
// ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
// ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ func (c *File) Thumb() string {
|
||||
}
|
||||
|
||||
func (c *File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5))
|
||||
}
|
||||
|
||||
/*相册部分*/
|
||||
|
@ -2,13 +2,15 @@ package baiduphoto
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
@ -23,7 +25,8 @@ const (
|
||||
|
||||
func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
req := client.R().
|
||||
SetQueryParam("access_token", d.AccessToken)
|
||||
// SetQueryParam("access_token", d.AccessToken)
|
||||
SetHeader("Cookie", d.Cookie)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
@ -45,10 +48,10 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
|
||||
return nil, fmt.Errorf("no shared albums found")
|
||||
case 50100:
|
||||
return nil, fmt.Errorf("illegal title, only supports 50 characters")
|
||||
case -6:
|
||||
if err = d.refreshToken(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// case -6:
|
||||
// if err = d.refreshToken(); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
default:
|
||||
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
||||
}
|
||||
@ -63,29 +66,29 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
|
||||
// return res.Body(), nil
|
||||
//}
|
||||
|
||||
func (d *BaiduPhoto) refreshToken() error {
|
||||
u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||
var resp base.TokenResp
|
||||
var e TokenErrResp
|
||||
_, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": d.RefreshToken,
|
||||
"client_id": d.ClientID,
|
||||
"client_secret": d.ClientSecret,
|
||||
}).Get(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.ErrorMsg != "" {
|
||||
return &e
|
||||
}
|
||||
if resp.RefreshToken == "" {
|
||||
return errs.EmptyToken
|
||||
}
|
||||
d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
// func (d *BaiduPhoto) refreshToken() error {
|
||||
// u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||
// var resp base.TokenResp
|
||||
// var e TokenErrResp
|
||||
// _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{
|
||||
// "grant_type": "refresh_token",
|
||||
// "refresh_token": d.RefreshToken,
|
||||
// "client_id": d.ClientID,
|
||||
// "client_secret": d.ClientSecret,
|
||||
// }).Get(u)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if e.ErrorMsg != "" {
|
||||
// return &e
|
||||
// }
|
||||
// if resp.RefreshToken == "" {
|
||||
// return errs.EmptyToken
|
||||
// }
|
||||
// d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken
|
||||
// op.MustSaveDriverStorage(d)
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp)
|
||||
@ -359,10 +362,6 @@ func (d *BaiduPhoto) linkAlbum(ctx context.Context, file *AlbumFile, args model.
|
||||
|
||||
location := resp.Header().Get("Location")
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
link := &model.Link{
|
||||
URL: location,
|
||||
Header: http.Header{
|
||||
@ -384,36 +383,36 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
|
||||
headers["X-Forwarded-For"] = args.IP
|
||||
}
|
||||
|
||||
// var downloadUrl struct {
|
||||
// Dlink string `json:"dlink"`
|
||||
// }
|
||||
// _, err := d.Get(FILE_API_URL_V1+"/download", func(r *resty.Request) {
|
||||
// r.SetContext(ctx)
|
||||
// r.SetHeaders(headers)
|
||||
// r.SetQueryParams(map[string]string{
|
||||
// "fsid": fmt.Sprint(file.Fsid),
|
||||
// })
|
||||
// }, &downloadUrl)
|
||||
|
||||
resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) {
|
||||
var downloadUrl struct {
|
||||
Dlink string `json:"dlink"`
|
||||
}
|
||||
_, err := d.Get(FILE_API_URL_V2+"/download", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetHeaders(headers)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"fsid": fmt.Sprint(file.Fsid),
|
||||
})
|
||||
}, nil)
|
||||
}, &downloadUrl)
|
||||
|
||||
// resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) {
|
||||
// r.SetContext(ctx)
|
||||
// r.SetHeaders(headers)
|
||||
// r.SetQueryParams(map[string]string{
|
||||
// "fsid": fmt.Sprint(file.Fsid),
|
||||
// })
|
||||
// }, nil)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode() != 302 {
|
||||
return nil, fmt.Errorf("not found 302 redirect")
|
||||
}
|
||||
// if resp.StatusCode() != 302 {
|
||||
// return nil, fmt.Errorf("not found 302 redirect")
|
||||
// }
|
||||
|
||||
location := resp.Header().Get("Location")
|
||||
// location := resp.Header().Get("Location")
|
||||
link := &model.Link{
|
||||
URL: location,
|
||||
URL: downloadUrl.Dlink,
|
||||
Header: http.Header{
|
||||
"User-Agent": []string{headers["User-Agent"]},
|
||||
"Referer": []string{"https://photo.baidu.com/"},
|
||||
@ -476,3 +475,40 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
func DecryptMd5(encryptMd5 string) string {
|
||||
if _, err := hex.DecodeString(encryptMd5); err == nil {
|
||||
return encryptMd5
|
||||
}
|
||||
|
||||
var out strings.Builder
|
||||
out.Grow(len(encryptMd5))
|
||||
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
|
||||
if i == 9 {
|
||||
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
|
||||
} else {
|
||||
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
|
||||
}
|
||||
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
|
||||
}
|
||||
|
||||
encryptMd5 = out.String()
|
||||
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
|
||||
}
|
||||
|
||||
func EncryptMd5(originalMd5 string) string {
|
||||
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
|
||||
|
||||
var out strings.Builder
|
||||
out.Grow(len(reversed))
|
||||
for i, n := 0, int64(0); i < len(reversed); i++ {
|
||||
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
|
||||
n ^= int64(15 & i)
|
||||
if i == 9 {
|
||||
out.WriteRune(rune(n) + 'g')
|
||||
} else {
|
||||
out.WriteString(strconv.FormatInt(n, 16))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
@ -67,7 +67,9 @@ func (d *ChaoXing) Init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *ChaoXing) Drop(ctx context.Context) error {
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -4,11 +4,13 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -90,7 +92,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st
|
||||
func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
body := base.Json{
|
||||
"action": "move",
|
||||
"src_dir": srcObj.GetPath(),
|
||||
"src_dir": path.Dir(srcObj.GetPath()),
|
||||
"dst": dstDir.GetPath(),
|
||||
"src": convertSrc(srcObj),
|
||||
}
|
||||
@ -112,7 +114,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string
|
||||
|
||||
func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
body := base.Json{
|
||||
"src_dir": srcObj.GetPath(),
|
||||
"src_dir": path.Dir(srcObj.GetPath()),
|
||||
"dst": dstDir.GetPath(),
|
||||
"src": convertSrc(srcObj),
|
||||
}
|
||||
@ -133,6 +135,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
if io.ReadCloser(stream) == http.NoBody {
|
||||
return d.create(ctx, dstDir, stream)
|
||||
}
|
||||
|
||||
// 获取存储策略
|
||||
var r DirectoryResp
|
||||
err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r)
|
||||
if err != nil {
|
||||
@ -145,6 +149,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
"policy_id": r.Policy.Id,
|
||||
"last_modified": stream.ModTime().Unix(),
|
||||
}
|
||||
|
||||
// 获取上传会话信息
|
||||
var u UploadInfo
|
||||
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
||||
req.SetBody(uploadBody)
|
||||
@ -152,6 +158,14 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 根据存储方式选择分片上传的方法
|
||||
switch r.Policy.Type {
|
||||
case "onedrive":
|
||||
err = d.upOneDrive(ctx, stream, u, up)
|
||||
case "remote": // 从机存储
|
||||
err = d.upRemote(ctx, stream, u, up)
|
||||
case "local": // 本机存储
|
||||
var chunkSize = u.ChunkSize
|
||||
var buf []byte
|
||||
var chunk int
|
||||
@ -165,7 +179,6 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
break
|
||||
}
|
||||
@ -179,9 +192,16 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
break
|
||||
}
|
||||
chunk++
|
||||
|
||||
}
|
||||
default:
|
||||
err = errs.NotImplement
|
||||
}
|
||||
if err != nil {
|
||||
// 删除失败的会话
|
||||
err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error {
|
||||
|
@ -24,6 +24,8 @@ type UploadInfo struct {
|
||||
SessionID string `json:"sessionID"`
|
||||
ChunkSize int `json:"chunkSize"`
|
||||
Expires int `json:"expires"`
|
||||
UploadURLs []string `json:"uploadURLs"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
}
|
||||
|
||||
type DirectoryResp struct {
|
||||
|
@ -1,16 +1,23 @@
|
||||
package cloudreve
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
json "github.com/json-iterator/go"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
@ -172,3 +179,95 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||
Thumbnail: resp.Header().Get("Location"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
credential := u.Credential
|
||||
var finish int64 = 0
|
||||
var chunk int = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk), bytes.NewBuffer(byteData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
chunk++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||
uploadUrl := u.UploadURLs[0]
|
||||
var finish int64 = 0
|
||||
DEFAULT := int64(u.ChunkSize)
|
||||
for finish < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
||||
var byteSize = DEFAULT
|
||||
left := stream.GetSize() - finish
|
||||
if left < DEFAULT {
|
||||
byteSize = left
|
||||
}
|
||||
byteData := make([]byte, byteSize)
|
||||
n, err := io.ReadFull(stream, byteData)
|
||||
utils.Log.Debug(err, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, bytes.NewBuffer(byteData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||
finish += byteSize
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
// 上传成功发送回调请求
|
||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
||||
req.SetBody("{}")
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
132
drivers/febbox/driver.go
Normal file
132
drivers/febbox/driver.go
Normal file
@ -0,0 +1,132 @@
|
||||
package febbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type FebBox struct {
|
||||
model.Storage
|
||||
Addition
|
||||
accessToken string
|
||||
oauth2Token oauth2.TokenSource
|
||||
}
|
||||
|
||||
func (d *FebBox) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *FebBox) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *FebBox) Init(ctx context.Context) error {
|
||||
// 初始化 oauth2Config
|
||||
oauth2Config := &clientcredentials.Config{
|
||||
ClientID: d.ClientID,
|
||||
ClientSecret: d.ClientSecret,
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
TokenURL: "https://api.febbox.com/oauth/token",
|
||||
}
|
||||
|
||||
d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken)
|
||||
|
||||
token, err := d.oauth2Token.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.accessToken = token.AccessToken
|
||||
d.Addition.RefreshToken = token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
files, err := d.getFilesList(dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return fileToObj(src), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *FebBox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
var ip string
|
||||
if d.Addition.UserIP != "" {
|
||||
ip = d.Addition.UserIP
|
||||
} else {
|
||||
ip = args.IP
|
||||
}
|
||||
|
||||
url, err := d.getDownloadLink(file.GetID(), ip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
err := d.makeDir(parentDir.GetID(), dirName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
err := d.move(srcObj.GetID(), dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
err := d.rename(srcObj.GetID(), newName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
err := d.copy(srcObj.GetID(), dstDir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
err := d.remove(obj.GetID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*FebBox)(nil)
|
36
drivers/febbox/meta.go
Normal file
36
drivers/febbox/meta.go
Normal file
@ -0,0 +1,36 @@
|
||||
package febbox
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
driver.RootID
|
||||
ClientID string `json:"client_id" required:"true" default:""`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:""`
|
||||
RefreshToken string
|
||||
SortRule string `json:"sort_rule" required:"true" type:"select" options:"size_asc,size_desc,name_asc,name_desc,update_asc,update_desc,ext_asc,ext_desc" default:"name_asc"`
|
||||
PageSize int64 `json:"page_size" required:"true" type:"number" default:"100" help:"list api per page size of FebBox driver"`
|
||||
UserIP string `json:"user_ip" default:"" help:"user ip address for download link which can speed up the download"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "FebBox",
|
||||
LocalSort: false,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &FebBox{}
|
||||
})
|
||||
}
|
88
drivers/febbox/oauth2.go
Normal file
88
drivers/febbox/oauth2.go
Normal file
@ -0,0 +1,88 @@
|
||||
package febbox
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
)
|
||||
|
||||
type customTokenSource struct {
|
||||
config *clientcredentials.Config
|
||||
ctx context.Context
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
func (c *customTokenSource) Token() (*oauth2.Token, error) {
|
||||
v := url.Values{}
|
||||
if c.refreshToken != "" {
|
||||
v.Set("grant_type", "refresh_token")
|
||||
v.Set("refresh_token", c.refreshToken)
|
||||
} else {
|
||||
v.Set("grant_type", "client_credentials")
|
||||
}
|
||||
|
||||
v.Set("client_id", c.config.ClientID)
|
||||
v.Set("client_secret", c.config.ClientSecret)
|
||||
|
||||
req, err := http.NewRequest("POST", c.config.TokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req.WithContext(c.ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New("oauth2: cannot fetch token")
|
||||
}
|
||||
|
||||
var tokenResp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Data struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
TokenType string `json:"token_type"`
|
||||
Scope string `json:"scope"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&tokenResp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tokenResp.Code != 1 {
|
||||
return nil, errors.New("oauth2: server response error")
|
||||
}
|
||||
|
||||
c.refreshToken = tokenResp.Data.RefreshToken
|
||||
|
||||
token := &oauth2.Token{
|
||||
AccessToken: tokenResp.Data.AccessToken,
|
||||
TokenType: tokenResp.Data.TokenType,
|
||||
RefreshToken: tokenResp.Data.RefreshToken,
|
||||
Expiry: time.Now().Add(time.Duration(tokenResp.Data.ExpiresIn) * time.Second),
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) initializeOAuth2Token(ctx context.Context, oauth2Config *clientcredentials.Config, refreshToken string) {
|
||||
d.oauth2Token = oauth2.ReuseTokenSource(nil, &customTokenSource{
|
||||
config: oauth2Config,
|
||||
ctx: ctx,
|
||||
refreshToken: refreshToken,
|
||||
})
|
||||
}
|
123
drivers/febbox/types.go
Normal file
123
drivers/febbox/types.go
Normal file
@ -0,0 +1,123 @@
|
||||
package febbox
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ErrResp struct {
|
||||
ErrorCode int64 `json:"code"`
|
||||
ErrorMsg string `json:"msg"`
|
||||
ServerRunTime float64 `json:"server_runtime"`
|
||||
ServerName string `json:"server_name"`
|
||||
}
|
||||
|
||||
func (e *ErrResp) IsError() bool {
|
||||
return e.ErrorCode != 0 || e.ErrorMsg != "" || e.ServerRunTime != 0 || e.ServerName != ""
|
||||
}
|
||||
|
||||
func (e *ErrResp) Error() string {
|
||||
return fmt.Sprintf("ErrorCode: %d ,Error: %s ,ServerRunTime: %f ,ServerName: %s", e.ErrorCode, e.ErrorMsg, e.ServerRunTime, e.ServerName)
|
||||
}
|
||||
|
||||
type FileListResp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Data struct {
|
||||
FileList []File `json:"file_list"`
|
||||
ShowType string `json:"show_type"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type Rules struct {
|
||||
AllowCopy int64 `json:"allow_copy"`
|
||||
AllowDelete int64 `json:"allow_delete"`
|
||||
AllowDownload int64 `json:"allow_download"`
|
||||
AllowComment int64 `json:"allow_comment"`
|
||||
HideLocation int64 `json:"hide_location"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Fid int64 `json:"fid"`
|
||||
UID int64 `json:"uid"`
|
||||
FileSize int64 `json:"file_size"`
|
||||
Path string `json:"path"`
|
||||
FileName string `json:"file_name"`
|
||||
Ext string `json:"ext"`
|
||||
AddTime int64 `json:"add_time"`
|
||||
FileCreateTime int64 `json:"file_create_time"`
|
||||
FileUpdateTime int64 `json:"file_update_time"`
|
||||
ParentID int64 `json:"parent_id"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
LastOpenTime int64 `json:"last_open_time"`
|
||||
IsDir int64 `json:"is_dir"`
|
||||
Epub int64 `json:"epub"`
|
||||
IsMusicList int64 `json:"is_music_list"`
|
||||
OssFid int64 `json:"oss_fid"`
|
||||
Faststart int64 `json:"faststart"`
|
||||
HasVideoQuality int64 `json:"has_video_quality"`
|
||||
TotalDownload int64 `json:"total_download"`
|
||||
Status int64 `json:"status"`
|
||||
Remark string `json:"remark"`
|
||||
OldHash string `json:"old_hash"`
|
||||
Hash string `json:"hash"`
|
||||
HashType string `json:"hash_type"`
|
||||
FromUID int64 `json:"from_uid"`
|
||||
FidOrg int64 `json:"fid_org"`
|
||||
ShareID int64 `json:"share_id"`
|
||||
InvitePermission int64 `json:"invite_permission"`
|
||||
ThumbSmall string `json:"thumb_small"`
|
||||
ThumbSmallWidth int64 `json:"thumb_small_width"`
|
||||
ThumbSmallHeight int64 `json:"thumb_small_height"`
|
||||
Thumb string `json:"thumb"`
|
||||
ThumbWidth int64 `json:"thumb_width"`
|
||||
ThumbHeight int64 `json:"thumb_height"`
|
||||
ThumbBig string `json:"thumb_big"`
|
||||
ThumbBigWidth int64 `json:"thumb_big_width"`
|
||||
ThumbBigHeight int64 `json:"thumb_big_height"`
|
||||
IsCustomThumb int64 `json:"is_custom_thumb"`
|
||||
Photos int64 `json:"photos"`
|
||||
IsAlbum int64 `json:"is_album"`
|
||||
ReadOnly int64 `json:"read_only"`
|
||||
Rules Rules `json:"rules"`
|
||||
IsShared int64 `json:"is_shared"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: strconv.FormatInt(f.Fid, 10),
|
||||
Name: f.FileName,
|
||||
Size: f.FileSize,
|
||||
Ctime: time.Unix(f.FileCreateTime, 0),
|
||||
Modified: time.Unix(f.FileUpdateTime, 0),
|
||||
IsFolder: f.IsDir == 1,
|
||||
HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: f.Thumb,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type FileDownloadResp struct {
|
||||
Code int `json:"code"`
|
||||
Msg string `json:"msg"`
|
||||
Data []struct {
|
||||
Error int `json:"error"`
|
||||
DownloadURL string `json:"download_url"`
|
||||
Hash string `json:"hash"`
|
||||
HashType string `json:"hash_type"`
|
||||
Fid int `json:"fid"`
|
||||
FileName string `json:"file_name"`
|
||||
ParentID int `json:"parent_id"`
|
||||
FileSize int `json:"file_size"`
|
||||
Ext string `json:"ext"`
|
||||
Thumb string `json:"thumb"`
|
||||
VipLink int `json:"vip_link"`
|
||||
} `json:"data"`
|
||||
}
|
224
drivers/febbox/util.go
Normal file
224
drivers/febbox/util.go
Normal file
@ -0,0 +1,224 @@
|
||||
package febbox
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (d *FebBox) refreshTokenByOAuth2() error {
|
||||
token, err := d.oauth2Token.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Status = "work"
|
||||
d.accessToken = token.AccessToken
|
||||
d.Addition.RefreshToken = token.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
// 使用oauth2 获取 access_token
|
||||
token, err := d.oauth2Token.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken)
|
||||
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
var e ErrResp
|
||||
req.SetError(&e)
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch e.ErrorCode {
|
||||
case 0:
|
||||
return res.Body(), nil
|
||||
case 1:
|
||||
return res.Body(), nil
|
||||
case -10001:
|
||||
if e.ServerName != "" {
|
||||
// access_token 过期
|
||||
if err = d.refreshTokenByOAuth2(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.request(url, method, callback, resp)
|
||||
} else {
|
||||
return nil, errors.New(e.Error())
|
||||
}
|
||||
default:
|
||||
return nil, errors.New(e.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *FebBox) getFilesList(id string) ([]File, error) {
|
||||
if d.PageSize <= 0 {
|
||||
d.PageSize = 100
|
||||
}
|
||||
res, err := d.listWithLimit(id, d.PageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return *res, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) listWithLimit(dirID string, pageLimit int64) (*[]File, error) {
|
||||
var files []File
|
||||
page := int64(1)
|
||||
for {
|
||||
result, err := d.getFiles(dirID, page, pageLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files = append(files, *result...)
|
||||
if int64(len(*result)) < pageLimit {
|
||||
break
|
||||
} else {
|
||||
page++
|
||||
}
|
||||
}
|
||||
return &files, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) getFiles(dirID string, page, pageLimit int64) (*[]File, error) {
|
||||
var fileList FileListResp
|
||||
queryParams := map[string]string{
|
||||
"module": "file_list",
|
||||
"parent_id": dirID,
|
||||
"page": strconv.FormatInt(page, 10),
|
||||
"pagelimit": strconv.FormatInt(pageLimit, 10),
|
||||
"order": d.Addition.SortRule,
|
||||
}
|
||||
|
||||
res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, &fileList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(res, &fileList); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fileList.Data.FileList, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) getDownloadLink(id string, ip string) (string, error) {
|
||||
var fileDownloadResp FileDownloadResp
|
||||
queryParams := map[string]string{
|
||||
"module": "file_get_download_url",
|
||||
"fids[]": id,
|
||||
"ip": ip,
|
||||
}
|
||||
|
||||
res, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, &fileDownloadResp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(res, &fileDownloadResp); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return fileDownloadResp.Data[0].DownloadURL, nil
|
||||
}
|
||||
|
||||
func (d *FebBox) makeDir(id string, name string) error {
|
||||
queryParams := map[string]string{
|
||||
"module": "create_dir",
|
||||
"parent_id": id,
|
||||
"name": name,
|
||||
}
|
||||
|
||||
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) move(id string, id2 string) error {
|
||||
queryParams := map[string]string{
|
||||
"module": "file_move",
|
||||
"fids[]": id,
|
||||
"to": id2,
|
||||
}
|
||||
|
||||
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) rename(id string, name string) error {
|
||||
queryParams := map[string]string{
|
||||
"module": "file_rename",
|
||||
"fid": id,
|
||||
"name": name,
|
||||
}
|
||||
|
||||
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) copy(id string, id2 string) error {
|
||||
queryParams := map[string]string{
|
||||
"module": "file_copy",
|
||||
"fids[]": id,
|
||||
"to": id2,
|
||||
}
|
||||
|
||||
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *FebBox) remove(id string) error {
|
||||
queryParams := map[string]string{
|
||||
"module": "file_delete",
|
||||
"fids[]": id,
|
||||
}
|
||||
|
||||
_, err := d.request("https://api.febbox.com/oauth", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetMultipartFormData(queryParams)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -66,12 +66,13 @@ func (d *ILanZou) Drop(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
offset := 1
|
||||
var res []ListItem
|
||||
for {
|
||||
var resp ListResp
|
||||
_, err := d.proved("/record/file/list", http.MethodGet, func(req *resty.Request) {
|
||||
params := []string{
|
||||
"offset=1",
|
||||
"offset=" + strconv.Itoa(offset),
|
||||
"limit=60",
|
||||
"folderId=" + dir.GetID(),
|
||||
"type=0",
|
||||
@ -83,7 +84,9 @@ func (d *ILanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
return nil, err
|
||||
}
|
||||
res = append(res, resp.List...)
|
||||
if resp.TotalPage <= resp.Offset {
|
||||
if resp.Offset < resp.TotalPage {
|
||||
offset++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -286,7 +289,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
req.SetBody(base.Json{
|
||||
"fileId": "",
|
||||
"fileName": stream.GetName(),
|
||||
"fileSize": stream.GetSize() / 1024,
|
||||
"fileSize": stream.GetSize()/1024 + 1,
|
||||
"folderId": dstDir.GetID(),
|
||||
"md5": etag,
|
||||
"type": 1,
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/alist-org/times"
|
||||
cp "github.com/otiai10/copy"
|
||||
log "github.com/sirupsen/logrus"
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
@ -76,7 +77,7 @@ func (d *Local) Init(ctx context.Context) error {
|
||||
if d.thumbConcurrency == 0 {
|
||||
d.thumbTokenBucket = NewNopTokenBucket()
|
||||
} else {
|
||||
d.thumbTokenBucket = NewStaticTokenBucket(d.thumbConcurrency)
|
||||
d.thumbTokenBucket = NewStaticTokenBucketWithMigration(d.thumbTokenBucket, d.thumbConcurrency)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -241,11 +242,22 @@ func (d *Local) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if utils.IsSubPath(srcPath, dstPath) {
|
||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||
}
|
||||
err := os.Rename(srcPath, dstPath)
|
||||
if err != nil {
|
||||
if err := os.Rename(srcPath, dstPath); err != nil && strings.Contains(err.Error(), "invalid cross-device link") {
|
||||
// Handle cross-device file move in local driver
|
||||
if err = d.Copy(ctx, srcObj, dstDir); err != nil {
|
||||
return err
|
||||
} else {
|
||||
// Directly remove file without check recycle bin if successfully copied
|
||||
if srcObj.IsDir() {
|
||||
err = os.RemoveAll(srcObj.GetPath())
|
||||
} else {
|
||||
err = os.Remove(srcObj.GetPath())
|
||||
}
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
@ -258,22 +270,18 @@ func (d *Local) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Local) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Local) Copy(_ context.Context, srcObj, dstDir model.Obj) error {
|
||||
srcPath := srcObj.GetPath()
|
||||
dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName())
|
||||
if utils.IsSubPath(srcPath, dstPath) {
|
||||
return fmt.Errorf("the destination folder is a subfolder of the source folder")
|
||||
}
|
||||
var err error
|
||||
if srcObj.IsDir() {
|
||||
err = utils.CopyDir(srcPath, dstPath)
|
||||
} else {
|
||||
err = utils.CopyFile(srcPath, dstPath)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
// Copy using otiai10/copy to perform more secure & efficient copy
|
||||
return cp.Copy(srcPath, dstPath, cp.Options{
|
||||
Sync: true, // Sync file to disk after copy, may have performance penalty in filesystem such as ZFS
|
||||
PreserveTimes: true,
|
||||
PreserveOwner: true,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Local) Remove(ctx context.Context, obj model.Obj) error {
|
||||
|
@ -23,6 +23,38 @@ func NewStaticTokenBucket(size int) StaticTokenBucket {
|
||||
return StaticTokenBucket{bucket: bucket}
|
||||
}
|
||||
|
||||
func NewStaticTokenBucketWithMigration(oldBucket TokenBucket, size int) StaticTokenBucket {
|
||||
if oldBucket != nil {
|
||||
oldStaticBucket, ok := oldBucket.(StaticTokenBucket)
|
||||
if ok {
|
||||
oldSize := cap(oldStaticBucket.bucket)
|
||||
migrateSize := oldSize
|
||||
if size < migrateSize {
|
||||
migrateSize = size
|
||||
}
|
||||
|
||||
bucket := make(chan struct{}, size)
|
||||
for range size - migrateSize {
|
||||
bucket <- struct{}{}
|
||||
}
|
||||
|
||||
if migrateSize != 0 {
|
||||
go func() {
|
||||
for range migrateSize {
|
||||
<-oldStaticBucket.bucket
|
||||
bucket <- struct{}{}
|
||||
}
|
||||
close(oldStaticBucket.bucket)
|
||||
}()
|
||||
}
|
||||
return StaticTokenBucket{bucket: bucket}
|
||||
}
|
||||
}
|
||||
return NewStaticTokenBucket(size)
|
||||
}
|
||||
|
||||
// Take channel maybe closed when local driver is modified.
|
||||
// don't call Put method after the channel is closed.
|
||||
func (b StaticTokenBucket) Take() <-chan struct{} {
|
||||
return b.bucket
|
||||
}
|
||||
@ -35,9 +67,11 @@ func (b StaticTokenBucket) Do(ctx context.Context, f func() error) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-b.bucket:
|
||||
case _, ok := <-b.Take():
|
||||
if ok {
|
||||
defer b.Put()
|
||||
}
|
||||
}
|
||||
return f()
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func (d *Onedrive) Request(url string, method string, callback base.ReqCallback,
|
||||
|
||||
func (d *Onedrive) getFiles(path string) ([]File, error) {
|
||||
var res []File
|
||||
nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference"
|
||||
nextLink := d.GetMetaUrl(false, path) + "/children?$top=1000&$expand=thumbnails($select=medium)&$select=id,name,size,fileSystemInfo,content.downloadUrl,file,parentReference"
|
||||
for nextLink != "" {
|
||||
var files Files
|
||||
_, err := d.Request(nextLink, http.MethodGet, nil, &files)
|
||||
|
@ -118,7 +118,7 @@ func (d *OnedriveAPP) Request(url string, method string, callback base.ReqCallba
|
||||
|
||||
func (d *OnedriveAPP) getFiles(path string) ([]File, error) {
|
||||
var res []File
|
||||
nextLink := d.GetMetaUrl(false, path) + "/children?$top=5000&$expand=thumbnails($select=medium)&$select=id,name,size,lastModifiedDateTime,content.downloadUrl,file,parentReference"
|
||||
nextLink := d.GetMetaUrl(false, path) + "/children?$top=1000&$expand=thumbnails($select=medium)&$select=id,name,size,lastModifiedDateTime,content.downloadUrl,file,parentReference"
|
||||
for nextLink != "" {
|
||||
var files Files
|
||||
_, err := d.Request(nextLink, http.MethodGet, nil, &files)
|
||||
|
@ -12,9 +12,7 @@ import (
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/oauth2"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@ -25,7 +23,6 @@ type PikPak struct {
|
||||
*Common
|
||||
RefreshToken string
|
||||
AccessToken string
|
||||
oauth2Token oauth2.TokenSource
|
||||
}
|
||||
|
||||
func (d *PikPak) Config() driver.Config {
|
||||
@ -49,7 +46,6 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
|
||||
d.Common.CaptchaToken = token
|
||||
op.MustSaveDriverStorage(d)
|
||||
},
|
||||
LowLatencyAddr: "",
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,45 +82,20 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
|
||||
d.Addition.DeviceID = d.Common.DeviceID
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
// 初始化 oauth2Config
|
||||
oauth2Config := &oauth2.Config{
|
||||
ClientID: d.ClientID,
|
||||
ClientSecret: d.ClientSecret,
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||
AuthStyle: oauth2.AuthStyleInParams,
|
||||
},
|
||||
}
|
||||
|
||||
// 如果已经有RefreshToken,直接获取AccessToken
|
||||
if d.Addition.RefreshToken != "" {
|
||||
if d.RefreshTokenMethod == "oauth2" {
|
||||
// 使用 oauth2 刷新令牌
|
||||
// 初始化 oauth2Token
|
||||
d.initializeOAuth2Token(ctx, oauth2Config, d.Addition.RefreshToken)
|
||||
if err := d.refreshTokenByOAuth2(); err != nil {
|
||||
if err = d.refreshToken(d.Addition.RefreshToken); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := d.refreshToken(d.Addition.RefreshToken); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// 如果没有填写RefreshToken,尝试登录 获取 refreshToken
|
||||
if err := d.login(); err != nil {
|
||||
if err = d.login(); err != nil {
|
||||
return err
|
||||
}
|
||||
if d.RefreshTokenMethod == "oauth2" {
|
||||
d.initializeOAuth2Token(ctx, oauth2Config, d.RefreshToken)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// 获取CaptchaToken
|
||||
err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/files"), d.Common.GetUserID())
|
||||
err = d.RefreshCaptchaTokenAtLogin(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/files"), d.Common.GetUserID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -138,14 +109,6 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
|
||||
d.Addition.RefreshToken = d.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
if d.UseLowLatencyAddress && d.Addition.CustomLowLatencyAddress != "" {
|
||||
d.Common.LowLatencyAddr = d.Addition.CustomLowLatencyAddress
|
||||
} else if d.UseLowLatencyAddress {
|
||||
d.Common.LowLatencyAddr = findLowestLatencyAddress(DlAddr)
|
||||
d.Addition.CustomLowLatencyAddress = d.Common.LowLatencyAddr
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -174,7 +137,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
if !d.DisableMediaLink {
|
||||
queryParams["usage"] = "CACHE"
|
||||
}
|
||||
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.com/drive/v1/files/%s", file.GetID()),
|
||||
_, err := d.request(fmt.Sprintf("https://api-drive.mypikpak.net/drive/v1/files/%s", file.GetID()),
|
||||
http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(queryParams)
|
||||
}, &resp)
|
||||
@ -188,19 +151,13 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
url = resp.Medias[0].Link.Url
|
||||
}
|
||||
|
||||
if d.UseLowLatencyAddress && d.Common.LowLatencyAddr != "" {
|
||||
// 替换为加速链接
|
||||
re := regexp.MustCompile(`https://[^/]+/download/`)
|
||||
url = re.ReplaceAllString(url, "https://"+d.Common.LowLatencyAddr+"/download/")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"kind": "drive#folder",
|
||||
"parent_id": parentDir.GetID(),
|
||||
@ -211,7 +168,7 @@ func (d *PikPak) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
}
|
||||
|
||||
func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchMove", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"to": base.Json{
|
||||
@ -223,7 +180,7 @@ func (d *PikPak) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files/"+srcObj.GetID(), http.MethodPatch, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"name": newName,
|
||||
})
|
||||
@ -232,7 +189,7 @@ func (d *PikPak) Rename(ctx context.Context, srcObj model.Obj, newName string) e
|
||||
}
|
||||
|
||||
func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchCopy", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"ids": []string{srcObj.GetID()},
|
||||
"to": base.Json{
|
||||
@ -244,7 +201,7 @@ func (d *PikPak) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files:batchTrash", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"ids": []string{obj.GetID()},
|
||||
})
|
||||
@ -268,7 +225,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
|
||||
var resp UploadTaskData
|
||||
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
res, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"kind": "drive#file",
|
||||
"name": stream.GetName(),
|
||||
@ -292,9 +249,9 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
|
||||
params := resp.Resumable.Params
|
||||
//endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||
// web 端上传 返回的endpoint 为 `mypikpak.com` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.com`·
|
||||
// web 端上传 返回的endpoint 为 `mypikpak.net` | android 端上传 返回的endpoint 为 `vip-lixian-07.mypikpak.net`·
|
||||
if d.Addition.Platform == "android" {
|
||||
params.Endpoint = "mypikpak.com"
|
||||
params.Endpoint = "mypikpak.net"
|
||||
}
|
||||
|
||||
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传
|
||||
@ -318,7 +275,7 @@ func (d *PikPak) OfflineDownload(ctx context.Context, fileUrl string, parentDir
|
||||
}
|
||||
|
||||
var resp OfflineDownloadResp
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(requestBody)
|
||||
}, &resp)
|
||||
|
||||
@ -336,7 +293,7 @@ PHASE_TYPE_RUNNING, PHASE_TYPE_ERROR, PHASE_TYPE_COMPLETE, PHASE_TYPE_PENDING
|
||||
*/
|
||||
func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []string) ([]OfflineTask, error) {
|
||||
res := make([]OfflineTask, 0)
|
||||
url := "https://api-drive.mypikpak.com/drive/v1/tasks"
|
||||
url := "https://api-drive.mypikpak.net/drive/v1/tasks"
|
||||
|
||||
if len(phase) == 0 {
|
||||
phase = []string{"PHASE_TYPE_RUNNING", "PHASE_TYPE_ERROR", "PHASE_TYPE_COMPLETE", "PHASE_TYPE_PENDING"}
|
||||
@ -377,7 +334,7 @@ func (d *PikPak) OfflineList(ctx context.Context, nextPageToken string, phase []
|
||||
}
|
||||
|
||||
func (d *PikPak) DeleteOfflineTasks(ctx context.Context, taskIDs []string, deleteFiles bool) error {
|
||||
url := "https://api-drive.mypikpak.com/drive/v1/tasks"
|
||||
url := "https://api-drive.mypikpak.net/drive/v1/tasks"
|
||||
params := map[string]string{
|
||||
"task_ids": strings.Join(taskIDs, ","),
|
||||
"delete_files": strconv.FormatBool(deleteFiles),
|
||||
|
@ -9,14 +9,11 @@ type Addition struct {
|
||||
driver.RootID
|
||||
Username string `json:"username" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
Platform string `json:"platform" required:"true" type:"select" options:"android,web,pc"`
|
||||
Platform string `json:"platform" required:"true" default:"web" type:"select" options:"android,web,pc"`
|
||||
RefreshToken string `json:"refresh_token" required:"true" default:""`
|
||||
RefreshTokenMethod string `json:"refresh_token_method" required:"true" type:"select" options:"oauth2,http"`
|
||||
CaptchaToken string `json:"captcha_token" default:""`
|
||||
DeviceID string `json:"device_id" required:"false" default:""`
|
||||
DisableMediaLink bool `json:"disable_media_link" default:"true"`
|
||||
UseLowLatencyAddress bool `json:"use_low_latency_address" default:"false"`
|
||||
CustomLowLatencyAddress string `json:"custom_low_latency_address" default:""`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -2,7 +2,6 @@ package pikpak
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@ -14,7 +13,6 @@ import (
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
@ -27,35 +25,35 @@ import (
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
var AndroidAlgorithms = []string{
|
||||
"aDhgaSE3MsjROCmpmsWqP1sJdFJ",
|
||||
"+oaVkqdd8MJuKT+uMr2AYKcd9tdWge3XPEPR2hcePUknd",
|
||||
"u/sd2GgT2fTytRcKzGicHodhvIltMntA3xKw2SRv7S48OdnaQIS5mn",
|
||||
"2WZiae2QuqTOxBKaaqCNHCW3olu2UImelkDzBn",
|
||||
"/vJ3upic39lgmrkX855Qx",
|
||||
"yNc9ruCVMV7pGV7XvFeuLMOcy1",
|
||||
"4FPq8mT3JQ1jzcVxMVfwFftLQm33M7i",
|
||||
"xozoy5e3Ea",
|
||||
"7xOq4Z8s",
|
||||
"QE9/9+IQco",
|
||||
"WdX5J9CPLZp",
|
||||
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
|
||||
"cc44M+l7GDhav",
|
||||
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
|
||||
"wla81BUVSmDkctHDpUT",
|
||||
"c6wMr1sm1WxiR3i8LDAm3W",
|
||||
"hRLrEQCFNYi0PFPV",
|
||||
"o1J41zIraDtJPNuhBu7Ifb/q3",
|
||||
"U",
|
||||
"RrbZvV0CTu3gaZJ56PVKki4IeP",
|
||||
"NNuRbLckJqUp1Do0YlrKCUP",
|
||||
"UUwnBbipMTvInA0U0E9",
|
||||
"VzGc",
|
||||
}
|
||||
|
||||
var WebAlgorithms = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
|
||||
"uSUX02HYJ1IkyLdhINEFcCf7l2",
|
||||
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
|
||||
"3binT1s/5a1pu3fGsN",
|
||||
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
|
||||
"DYS3StqnAEKdGddRP8CJrxUSFh",
|
||||
"crquW+4",
|
||||
"ryKqvW9B9hly+JAymXCIfag5Z",
|
||||
"Hr08T/NDTX1oSJfHk90c",
|
||||
"i",
|
||||
}
|
||||
|
||||
var PCAlgorithms = []string{
|
||||
@ -80,13 +78,13 @@ const (
|
||||
const (
|
||||
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
||||
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||
AndroidClientVersion = "1.48.3"
|
||||
AndroidClientVersion = "1.49.3"
|
||||
AndroidPackageName = "com.pikcloud.pikpak"
|
||||
AndroidSdkVersion = "2.0.4.204101"
|
||||
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
||||
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||
WebClientVersion = "2.0.0"
|
||||
WebPackageName = "mypikpak.com"
|
||||
WebClientVersion = "undefined"
|
||||
WebPackageName = "drive.mypikpak.com"
|
||||
WebSdkVersion = "8.0.3"
|
||||
PCClientID = "YvtoWO6GNHiuCl7x"
|
||||
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
||||
@ -95,51 +93,13 @@ const (
|
||||
PCSdkVersion = "8.0.3"
|
||||
)
|
||||
|
||||
var DlAddr = []string{
|
||||
"dl-a10b-0621.mypikpak.com",
|
||||
"dl-a10b-0622.mypikpak.com",
|
||||
"dl-a10b-0623.mypikpak.com",
|
||||
"dl-a10b-0624.mypikpak.com",
|
||||
"dl-a10b-0625.mypikpak.com",
|
||||
"dl-a10b-0858.mypikpak.com",
|
||||
"dl-a10b-0859.mypikpak.com",
|
||||
"dl-a10b-0860.mypikpak.com",
|
||||
"dl-a10b-0861.mypikpak.com",
|
||||
"dl-a10b-0862.mypikpak.com",
|
||||
"dl-a10b-0863.mypikpak.com",
|
||||
"dl-a10b-0864.mypikpak.com",
|
||||
"dl-a10b-0865.mypikpak.com",
|
||||
"dl-a10b-0866.mypikpak.com",
|
||||
"dl-a10b-0867.mypikpak.com",
|
||||
"dl-a10b-0868.mypikpak.com",
|
||||
"dl-a10b-0869.mypikpak.com",
|
||||
"dl-a10b-0870.mypikpak.com",
|
||||
"dl-a10b-0871.mypikpak.com",
|
||||
"dl-a10b-0872.mypikpak.com",
|
||||
"dl-a10b-0873.mypikpak.com",
|
||||
"dl-a10b-0874.mypikpak.com",
|
||||
"dl-a10b-0875.mypikpak.com",
|
||||
"dl-a10b-0876.mypikpak.com",
|
||||
"dl-a10b-0877.mypikpak.com",
|
||||
"dl-a10b-0878.mypikpak.com",
|
||||
"dl-a10b-0879.mypikpak.com",
|
||||
"dl-a10b-0880.mypikpak.com",
|
||||
"dl-a10b-0881.mypikpak.com",
|
||||
"dl-a10b-0882.mypikpak.com",
|
||||
"dl-a10b-0883.mypikpak.com",
|
||||
"dl-a10b-0884.mypikpak.com",
|
||||
"dl-a10b-0885.mypikpak.com",
|
||||
"dl-a10b-0886.mypikpak.com",
|
||||
"dl-a10b-0887.mypikpak.com",
|
||||
}
|
||||
|
||||
func (d *PikPak) login() error {
|
||||
// 检查用户名和密码是否为空
|
||||
if d.Addition.Username == "" || d.Addition.Password == "" {
|
||||
return errors.New("username or password is empty")
|
||||
}
|
||||
|
||||
url := "https://user.mypikpak.com/v1/auth/signin"
|
||||
url := "https://user.mypikpak.net/v1/auth/signin"
|
||||
// 使用 用户填写的 CaptchaToken —————— (验证后的captcha_token)
|
||||
if d.GetCaptchaToken() == "" {
|
||||
if err := d.RefreshCaptchaTokenInLogin(GetAction(http.MethodPost, url), d.Username); err != nil {
|
||||
@ -169,7 +129,7 @@ func (d *PikPak) login() error {
|
||||
}
|
||||
|
||||
func (d *PikPak) refreshToken(refreshToken string) error {
|
||||
url := "https://user.mypikpak.com/v1/auth/token"
|
||||
url := "https://user.mypikpak.net/v1/auth/token"
|
||||
var e ErrResp
|
||||
res, err := base.RestyClient.SetRetryCount(1).R().SetError(&e).
|
||||
SetHeader("user-agent", "").SetBody(base.Json{
|
||||
@ -207,30 +167,6 @@ func (d *PikPak) refreshToken(refreshToken string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) initializeOAuth2Token(ctx context.Context, oauth2Config *oauth2.Config, refreshToken string) {
|
||||
d.oauth2Token = oauth2.ReuseTokenSource(nil, utils.TokenSource(func() (*oauth2.Token, error) {
|
||||
return oauth2Config.TokenSource(ctx, &oauth2.Token{
|
||||
RefreshToken: refreshToken,
|
||||
}).Token()
|
||||
}))
|
||||
}
|
||||
|
||||
func (d *PikPak) refreshTokenByOAuth2() error {
|
||||
token, err := d.oauth2Token.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.Status = "work"
|
||||
d.RefreshToken = token.RefreshToken
|
||||
d.AccessToken = token.AccessToken
|
||||
// 获取用户ID
|
||||
userID := token.Extra("sub").(string)
|
||||
d.Common.SetUserID(userID)
|
||||
d.Addition.RefreshToken = d.RefreshToken
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *PikPak) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@ -239,14 +175,7 @@ func (d *PikPak) request(url string, method string, callback base.ReqCallback, r
|
||||
"X-Device-ID": d.GetDeviceID(),
|
||||
"X-Captcha-Token": d.GetCaptchaToken(),
|
||||
})
|
||||
if d.RefreshTokenMethod == "oauth2" && d.oauth2Token != nil {
|
||||
// 使用oauth2 获取 access_token
|
||||
token, err := d.oauth2Token.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SetAuthScheme(token.TokenType).SetAuthToken(token.AccessToken)
|
||||
} else if d.AccessToken != "" {
|
||||
if d.AccessToken != "" {
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
}
|
||||
|
||||
@ -268,16 +197,9 @@ func (d *PikPak) request(url string, method string, callback base.ReqCallback, r
|
||||
return res.Body(), nil
|
||||
case 4122, 4121, 16:
|
||||
// access_token 过期
|
||||
if d.RefreshTokenMethod == "oauth2" {
|
||||
if err1 := d.refreshTokenByOAuth2(); err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
} else {
|
||||
if err1 := d.refreshToken(d.RefreshToken); err1 != nil {
|
||||
return nil, err1
|
||||
}
|
||||
}
|
||||
|
||||
return d.request(url, method, callback, resp)
|
||||
case 9: // 验证码token过期
|
||||
if err = d.RefreshCaptchaTokenAtLogin(GetAction(method, url), d.GetUserID()); err != nil {
|
||||
@ -307,7 +229,7 @@ func (d *PikPak) getFiles(id string) ([]File, error) {
|
||||
"page_token": pageToken,
|
||||
}
|
||||
var resp Files
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodGet, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/files", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@ -338,7 +260,6 @@ type Common struct {
|
||||
UserAgent string
|
||||
// 验证码token刷新成功回调
|
||||
RefreshCTokenCk func(token string)
|
||||
LowLatencyAddr string
|
||||
}
|
||||
|
||||
func generateDeviceSign(deviceID, packageName string) string {
|
||||
@ -473,7 +394,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
|
||||
}
|
||||
var e ErrResp
|
||||
var resp CaptchaTokenResponse
|
||||
_, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetError(&e).SetBody(param).SetQueryParam("client_id", d.ClientID)
|
||||
}, &resp)
|
||||
|
||||
@ -729,46 +650,3 @@ func OssOption(params *S3Params) []oss.Option {
|
||||
}
|
||||
return options
|
||||
}
|
||||
|
||||
type AddressLatency struct {
|
||||
Address string
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
func checkLatency(address string, wg *sync.WaitGroup, ch chan<- AddressLatency) {
|
||||
defer wg.Done()
|
||||
start := time.Now()
|
||||
resp, err := http.Get("https://" + address + "/generate_204")
|
||||
if err != nil {
|
||||
ch <- AddressLatency{Address: address, Latency: time.Hour} // Set high latency on error
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
latency := time.Since(start)
|
||||
ch <- AddressLatency{Address: address, Latency: latency}
|
||||
}
|
||||
|
||||
func findLowestLatencyAddress(addresses []string) string {
|
||||
var wg sync.WaitGroup
|
||||
ch := make(chan AddressLatency, len(addresses))
|
||||
|
||||
for _, address := range addresses {
|
||||
wg.Add(1)
|
||||
go checkLatency(address, &wg, ch)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
var lowestLatencyAddress string
|
||||
lowestLatency := time.Hour
|
||||
|
||||
for result := range ch {
|
||||
if result.Latency < lowestLatency {
|
||||
lowestLatency = result.Latency
|
||||
lowestLatencyAddress = result.Address
|
||||
}
|
||||
}
|
||||
|
||||
return lowestLatencyAddress
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -37,7 +36,6 @@ func (d *PikPakShare) Init(ctx context.Context) error {
|
||||
d.Common.CaptchaToken = token
|
||||
op.MustSaveDriverStorage(d)
|
||||
},
|
||||
LowLatencyAddr: "",
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,16 +69,8 @@ func (d *PikPakShare) Init(ctx context.Context) error {
|
||||
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
|
||||
}
|
||||
|
||||
if d.UseLowLatencyAddress && d.Addition.CustomLowLatencyAddress != "" {
|
||||
d.Common.LowLatencyAddr = d.Addition.CustomLowLatencyAddress
|
||||
} else if d.UseLowLatencyAddress {
|
||||
d.Common.LowLatencyAddr = findLowestLatencyAddress(DlAddr)
|
||||
d.Addition.CustomLowLatencyAddress = d.Common.LowLatencyAddr
|
||||
op.MustSaveDriverStorage(d)
|
||||
}
|
||||
|
||||
// 获取CaptchaToken
|
||||
err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.com/drive/v1/share:batch_file_info"), "")
|
||||
err := d.RefreshCaptchaToken(GetAction(http.MethodGet, "https://api-drive.mypikpak.net/drive/v1/share:batch_file_info"), "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -113,7 +103,7 @@ func (d *PikPakShare) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
"file_id": file.GetID(),
|
||||
"pass_code_token": d.PassCodeToken,
|
||||
}
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/file_info", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@ -131,12 +121,6 @@ func (d *PikPakShare) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
|
||||
}
|
||||
|
||||
if d.UseLowLatencyAddress && d.Common.LowLatencyAddr != "" {
|
||||
// 替换为加速链接
|
||||
re := regexp.MustCompile(`https://[^/]+/download/`)
|
||||
downloadUrl = re.ReplaceAllString(downloadUrl, "https://"+d.Common.LowLatencyAddr+"/download/")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: downloadUrl,
|
||||
}, nil
|
||||
|
@ -9,11 +9,9 @@ type Addition struct {
|
||||
driver.RootID
|
||||
ShareId string `json:"share_id" required:"true"`
|
||||
SharePwd string `json:"share_pwd"`
|
||||
Platform string `json:"platform" required:"true" type:"select" options:"android,web,pc"`
|
||||
Platform string `json:"platform" default:"web" required:"true" type:"select" options:"android,web,pc"`
|
||||
DeviceID string `json:"device_id" required:"false" default:""`
|
||||
UseTransCodingAddress bool `json:"use_transcoding_address" required:"true" default:"false"`
|
||||
UseLowLatencyAddress bool `json:"use_low_latency_address" default:"false"`
|
||||
CustomLowLatencyAddress string `json:"custom_low_latency_address" default:""`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -18,32 +17,34 @@ import (
|
||||
)
|
||||
|
||||
var AndroidAlgorithms = []string{
|
||||
"aDhgaSE3MsjROCmpmsWqP1sJdFJ",
|
||||
"+oaVkqdd8MJuKT+uMr2AYKcd9tdWge3XPEPR2hcePUknd",
|
||||
"u/sd2GgT2fTytRcKzGicHodhvIltMntA3xKw2SRv7S48OdnaQIS5mn",
|
||||
"2WZiae2QuqTOxBKaaqCNHCW3olu2UImelkDzBn",
|
||||
"/vJ3upic39lgmrkX855Qx",
|
||||
"yNc9ruCVMV7pGV7XvFeuLMOcy1",
|
||||
"4FPq8mT3JQ1jzcVxMVfwFftLQm33M7i",
|
||||
"xozoy5e3Ea",
|
||||
"7xOq4Z8s",
|
||||
"QE9/9+IQco",
|
||||
"WdX5J9CPLZp",
|
||||
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
|
||||
"cc44M+l7GDhav",
|
||||
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
|
||||
"wla81BUVSmDkctHDpUT",
|
||||
"c6wMr1sm1WxiR3i8LDAm3W",
|
||||
"hRLrEQCFNYi0PFPV",
|
||||
"o1J41zIraDtJPNuhBu7Ifb/q3",
|
||||
"U",
|
||||
"RrbZvV0CTu3gaZJ56PVKki4IeP",
|
||||
"NNuRbLckJqUp1Do0YlrKCUP",
|
||||
"UUwnBbipMTvInA0U0E9",
|
||||
"VzGc",
|
||||
}
|
||||
|
||||
var WebAlgorithms = []string{
|
||||
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||
"+r6CQVxjzJV6LCV",
|
||||
"F",
|
||||
"pFJRC",
|
||||
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||
"/750aCr4lm/Sly/c",
|
||||
"RB+DT/gZCrbV",
|
||||
"",
|
||||
"CyLsf7hdkIRxRm215hl",
|
||||
"7xHvLi2tOYP0Y92b",
|
||||
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||
"1UI3",
|
||||
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||
"ihtqpG6FMt65+Xk+tWUH2",
|
||||
"NhXXU9rg4XXdzo7u5o",
|
||||
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
|
||||
"uSUX02HYJ1IkyLdhINEFcCf7l2",
|
||||
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
|
||||
"3binT1s/5a1pu3fGsN",
|
||||
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
|
||||
"DYS3StqnAEKdGddRP8CJrxUSFh",
|
||||
"crquW+4",
|
||||
"ryKqvW9B9hly+JAymXCIfag5Z",
|
||||
"Hr08T/NDTX1oSJfHk90c",
|
||||
"i",
|
||||
}
|
||||
|
||||
var PCAlgorithms = []string{
|
||||
@ -62,13 +63,13 @@ var PCAlgorithms = []string{
|
||||
const (
|
||||
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
||||
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||
AndroidClientVersion = "1.48.3"
|
||||
AndroidClientVersion = "1.49.3"
|
||||
AndroidPackageName = "com.pikcloud.pikpak"
|
||||
AndroidSdkVersion = "2.0.4.204101"
|
||||
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
||||
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||
WebClientVersion = "2.0.0"
|
||||
WebPackageName = "mypikpak.com"
|
||||
WebClientVersion = "undefined"
|
||||
WebPackageName = "drive.mypikpak.com"
|
||||
WebSdkVersion = "8.0.3"
|
||||
PCClientID = "YvtoWO6GNHiuCl7x"
|
||||
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
||||
@ -77,44 +78,6 @@ const (
|
||||
PCSdkVersion = "8.0.3"
|
||||
)
|
||||
|
||||
var DlAddr = []string{
|
||||
"dl-a10b-0621.mypikpak.com",
|
||||
"dl-a10b-0622.mypikpak.com",
|
||||
"dl-a10b-0623.mypikpak.com",
|
||||
"dl-a10b-0624.mypikpak.com",
|
||||
"dl-a10b-0625.mypikpak.com",
|
||||
"dl-a10b-0858.mypikpak.com",
|
||||
"dl-a10b-0859.mypikpak.com",
|
||||
"dl-a10b-0860.mypikpak.com",
|
||||
"dl-a10b-0861.mypikpak.com",
|
||||
"dl-a10b-0862.mypikpak.com",
|
||||
"dl-a10b-0863.mypikpak.com",
|
||||
"dl-a10b-0864.mypikpak.com",
|
||||
"dl-a10b-0865.mypikpak.com",
|
||||
"dl-a10b-0866.mypikpak.com",
|
||||
"dl-a10b-0867.mypikpak.com",
|
||||
"dl-a10b-0868.mypikpak.com",
|
||||
"dl-a10b-0869.mypikpak.com",
|
||||
"dl-a10b-0870.mypikpak.com",
|
||||
"dl-a10b-0871.mypikpak.com",
|
||||
"dl-a10b-0872.mypikpak.com",
|
||||
"dl-a10b-0873.mypikpak.com",
|
||||
"dl-a10b-0874.mypikpak.com",
|
||||
"dl-a10b-0875.mypikpak.com",
|
||||
"dl-a10b-0876.mypikpak.com",
|
||||
"dl-a10b-0877.mypikpak.com",
|
||||
"dl-a10b-0878.mypikpak.com",
|
||||
"dl-a10b-0879.mypikpak.com",
|
||||
"dl-a10b-0880.mypikpak.com",
|
||||
"dl-a10b-0881.mypikpak.com",
|
||||
"dl-a10b-0882.mypikpak.com",
|
||||
"dl-a10b-0883.mypikpak.com",
|
||||
"dl-a10b-0884.mypikpak.com",
|
||||
"dl-a10b-0885.mypikpak.com",
|
||||
"dl-a10b-0886.mypikpak.com",
|
||||
"dl-a10b-0887.mypikpak.com",
|
||||
}
|
||||
|
||||
func (d *PikPakShare) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
@ -159,7 +122,7 @@ func (d *PikPakShare) getSharePassToken() error {
|
||||
"limit": "100",
|
||||
}
|
||||
var resp ShareResp
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share", http.MethodGet, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@ -187,7 +150,7 @@ func (d *PikPakShare) getFiles(id string) ([]File, error) {
|
||||
"pass_code_token": d.PassCodeToken,
|
||||
}
|
||||
var resp ShareResp
|
||||
_, err := d.request("https://api-drive.mypikpak.com/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) {
|
||||
_, err := d.request("https://api-drive.mypikpak.net/drive/v1/share/detail", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
@ -227,7 +190,6 @@ type Common struct {
|
||||
UserAgent string
|
||||
// 验证码token刷新成功回调
|
||||
RefreshCTokenCk func(token string)
|
||||
LowLatencyAddr string
|
||||
}
|
||||
|
||||
func (c *Common) SetUserAgent(userAgent string) {
|
||||
@ -345,7 +307,7 @@ func (d *PikPakShare) refreshCaptchaToken(action string, metas map[string]string
|
||||
}
|
||||
var e ErrResp
|
||||
var resp CaptchaTokenResponse
|
||||
_, err := d.request("https://user.mypikpak.com/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
|
||||
_, err := d.request("https://user.mypikpak.net/v1/shield/captcha/init", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetError(&e).SetBody(param)
|
||||
}, &resp)
|
||||
|
||||
@ -367,46 +329,3 @@ func (d *PikPakShare) refreshCaptchaToken(action string, metas map[string]string
|
||||
d.Common.SetCaptchaToken(resp.CaptchaToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
type AddressLatency struct {
|
||||
Address string
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
func checkLatency(address string, wg *sync.WaitGroup, ch chan<- AddressLatency) {
|
||||
defer wg.Done()
|
||||
start := time.Now()
|
||||
resp, err := http.Get("https://" + address + "/generate_204")
|
||||
if err != nil {
|
||||
ch <- AddressLatency{Address: address, Latency: time.Hour} // Set high latency on error
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
latency := time.Since(start)
|
||||
ch <- AddressLatency{Address: address, Latency: latency}
|
||||
}
|
||||
|
||||
func findLowestLatencyAddress(addresses []string) string {
|
||||
var wg sync.WaitGroup
|
||||
ch := make(chan AddressLatency, len(addresses))
|
||||
|
||||
for _, address := range addresses {
|
||||
wg.Add(1)
|
||||
go checkLatency(address, &wg, ch)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
var lowestLatencyAddress string
|
||||
lowestLatency := time.Hour
|
||||
|
||||
for result := range ch {
|
||||
if result.Latency < lowestLatency {
|
||||
lowestLatency = result.Latency
|
||||
lowestLatencyAddress = result.Address
|
||||
}
|
||||
}
|
||||
|
||||
return lowestLatencyAddress
|
||||
}
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"math"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -24,6 +23,8 @@ type Terabox struct {
|
||||
model.Storage
|
||||
Addition
|
||||
JsToken string
|
||||
url_domain_prefix string
|
||||
base_url string
|
||||
}
|
||||
|
||||
func (d *Terabox) Config() driver.Config {
|
||||
@ -36,6 +37,8 @@ func (d *Terabox) GetAddition() driver.Additional {
|
||||
|
||||
func (d *Terabox) Init(ctx context.Context) error {
|
||||
var resp CheckLoginResp
|
||||
d.base_url = "https://www.terabox.com"
|
||||
d.url_domain_prefix = "jp"
|
||||
_, err := d.get("/api/check/login", nil, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -71,7 +74,16 @@ func (d *Terabox) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *Terabox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
||||
params := map[string]string{
|
||||
"a": "commit",
|
||||
}
|
||||
data := map[string]string{
|
||||
"path": stdpath.Join(parentDir.GetPath(), dirName),
|
||||
"isdir": "1",
|
||||
"block_list": "[]",
|
||||
}
|
||||
res, err := d.post_form("/api/create", params, data, nil)
|
||||
log.Debugln(string(res))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -117,63 +129,61 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
resp, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
Get("https://" + d.url_domain_prefix + "-data.terabox.com/rest/2.0/pcs/file?method=locateupload")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var Default int64 = 4 * 1024 * 1024
|
||||
defaultByteData := make([]byte, Default)
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
|
||||
// cal md5
|
||||
h1 := md5.New()
|
||||
h2 := md5.New()
|
||||
block_list := make([]string, 0)
|
||||
left := stream.GetSize()
|
||||
for i := 0; i < count; i++ {
|
||||
byteSize := Default
|
||||
var byteData []byte
|
||||
if left < Default {
|
||||
byteSize = left
|
||||
byteData = make([]byte, byteSize)
|
||||
} else {
|
||||
byteData = defaultByteData
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.ReadFull(tempFile, byteData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h1.Write(byteData)
|
||||
h2.Write(byteData)
|
||||
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
||||
h2.Reset()
|
||||
}
|
||||
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
var locateupload_resp LocateUploadResp
|
||||
err = utils.Json.Unmarshal(resp.Body(), &locateupload_resp)
|
||||
if err != nil {
|
||||
log.Debugln(resp)
|
||||
return err
|
||||
}
|
||||
log.Debugln(locateupload_resp)
|
||||
|
||||
// precreate file
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s",
|
||||
path, stream.GetSize(),
|
||||
block_list_str)
|
||||
params := map[string]string{}
|
||||
|
||||
var precreateBlockListStr string
|
||||
if stream.GetSize() > initialChunkSize {
|
||||
precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761","a5fc157d78e6ad1c7e114b056c92821e"]`
|
||||
} else {
|
||||
precreateBlockListStr = `["5910a591dd8fc18c32a8f3df4fdc1761"]`
|
||||
}
|
||||
|
||||
data := map[string]string{
|
||||
"path": rawPath,
|
||||
"autoinit": "1",
|
||||
"target_path": dstDir.GetPath(),
|
||||
"block_list": precreateBlockListStr,
|
||||
"local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10),
|
||||
"file_limit_switch_v34": "true",
|
||||
}
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.post("/api/precreate", params, data, &precreateResp)
|
||||
log.Debugln(data)
|
||||
res, err := d.post_form("/api/precreate", nil, data, &precreateResp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.Errno != 0 {
|
||||
log.Debugln(string(res))
|
||||
return fmt.Errorf("[terabox] failed to precreate file, errno: %d", precreateResp.Errno)
|
||||
}
|
||||
if precreateResp.ReturnType == 2 {
|
||||
return nil
|
||||
}
|
||||
params = map[string]string{
|
||||
|
||||
// upload chunks
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
@ -182,25 +192,38 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
"channel": "dubox",
|
||||
"clienttype": "0",
|
||||
}
|
||||
left = stream.GetSize()
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
chunkSize := calculateChunkSize(streamSize)
|
||||
chunkByteData := make([]byte, chunkSize)
|
||||
count := int(math.Ceil(float64(streamSize) / float64(chunkSize)))
|
||||
left := streamSize
|
||||
uploadBlockList := make([]string, 0, count)
|
||||
h := md5.New()
|
||||
for partseq := 0; partseq < count; partseq++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
byteSize := Default
|
||||
byteSize := chunkSize
|
||||
var byteData []byte
|
||||
if left < Default {
|
||||
if left >= chunkSize {
|
||||
byteData = chunkByteData
|
||||
} else {
|
||||
byteSize = left
|
||||
byteData = make([]byte, byteSize)
|
||||
} else {
|
||||
byteData = defaultByteData
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.ReadFull(tempFile, byteData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := "https://c-jp.terabox.com/rest/2.0/pcs/superfile2"
|
||||
|
||||
// calculate md5
|
||||
h.Write(byteData)
|
||||
uploadBlockList = append(uploadBlockList, hex.EncodeToString(h.Sum(nil)))
|
||||
h.Reset()
|
||||
|
||||
u := "https://" + locateupload_resp.Host + "/rest/2.0/pcs/superfile2"
|
||||
params["partseq"] = strconv.Itoa(partseq)
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
@ -212,12 +235,39 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
return err
|
||||
}
|
||||
log.Debugln(res.String())
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(float64(i) * 100 / float64(len(precreateResp.BlockList)))
|
||||
if count > 0 {
|
||||
up(float64(partseq) * 100 / float64(count))
|
||||
}
|
||||
}
|
||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
||||
|
||||
// create file
|
||||
params = map[string]string{
|
||||
"isdir": "0",
|
||||
"rtype": "1",
|
||||
}
|
||||
|
||||
uploadBlockListStr, err := utils.Json.MarshalToString(uploadBlockList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data = map[string]string{
|
||||
"path": rawPath,
|
||||
"size": strconv.FormatInt(stream.GetSize(), 10),
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"target_path": dstDir.GetPath(),
|
||||
"block_list": uploadBlockListStr,
|
||||
"local_mtime": strconv.FormatInt(stream.ModTime().Unix(), 10),
|
||||
}
|
||||
var createResp CreateResp
|
||||
res, err = d.post_form("/api/create", params, data, &createResp)
|
||||
log.Debugln(string(res))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if createResp.Errno != 0 {
|
||||
return fmt.Errorf("[terabox] failed to create file, errno: %d", createResp.Errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Terabox)(nil)
|
||||
|
@ -95,3 +95,11 @@ type PrecreateResp struct {
|
||||
type CheckLoginResp struct {
|
||||
Errno int `json:"errno"`
|
||||
}
|
||||
|
||||
type LocateUploadResp struct {
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
type CreateResp struct {
|
||||
Errno int `json:"errno"`
|
||||
}
|
||||
|
@ -14,6 +14,12 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
initialChunkSize int64 = 4 << 20 // 4MB
|
||||
initialSizeThreshold int64 = 4 << 30 // 4GB
|
||||
)
|
||||
|
||||
func getStrBetween(raw, start, end string) string {
|
||||
@ -28,11 +34,11 @@ func getStrBetween(raw, start, end string) string {
|
||||
}
|
||||
|
||||
func (d *Terabox) resetJsToken() error {
|
||||
u := "https://www.terabox.com/main"
|
||||
u := d.base_url
|
||||
res, err := base.RestyClient.R().SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Referer": "https://www.terabox.com/",
|
||||
"Referer": d.base_url,
|
||||
"User-Agent": base.UserAgent,
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
}).Get(u)
|
||||
@ -48,12 +54,12 @@ func (d *Terabox) resetJsToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
func (d *Terabox) request(rurl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Referer": "https://www.terabox.com/",
|
||||
"Referer": d.base_url,
|
||||
"User-Agent": base.UserAgent,
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
})
|
||||
@ -70,7 +76,7 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback,
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, furl)
|
||||
res, err := req.Execute(method, d.base_url+rurl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -82,14 +88,24 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback,
|
||||
return nil, err
|
||||
}
|
||||
if !utils.IsBool(noRetry...) {
|
||||
return d.request(furl, method, callback, resp, true)
|
||||
return d.request(rurl, method, callback, resp, true)
|
||||
}
|
||||
} else if errno == -6 {
|
||||
header := res.Header()
|
||||
log.Debugln(header)
|
||||
urlDomainPrefix := header.Get("Url-Domain-Prefix")
|
||||
if len(urlDomainPrefix) > 0 {
|
||||
d.url_domain_prefix = urlDomainPrefix
|
||||
d.base_url = "https://" + d.url_domain_prefix + ".terabox.com"
|
||||
log.Debugln("Redirect base_url to", d.base_url)
|
||||
return d.request(rurl, method, callback, resp, noRetry...)
|
||||
}
|
||||
}
|
||||
return res.Body(), nil
|
||||
}
|
||||
|
||||
func (d *Terabox) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.request("https://www.terabox.com"+pathname, http.MethodGet, func(req *resty.Request) {
|
||||
return d.request(pathname, http.MethodGet, func(req *resty.Request) {
|
||||
if params != nil {
|
||||
req.SetQueryParams(params)
|
||||
}
|
||||
@ -97,7 +113,7 @@ func (d *Terabox) get(pathname string, params map[string]string, resp interface{
|
||||
}
|
||||
|
||||
func (d *Terabox) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
return d.request("https://www.terabox.com"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
if params != nil {
|
||||
req.SetQueryParams(params)
|
||||
}
|
||||
@ -105,6 +121,15 @@ func (d *Terabox) post(pathname string, params map[string]string, data interface
|
||||
}, resp)
|
||||
}
|
||||
|
||||
func (d *Terabox) post_form(pathname string, params map[string]string, data map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.request(pathname, http.MethodPost, func(req *resty.Request) {
|
||||
if params != nil {
|
||||
req.SetQueryParams(params)
|
||||
}
|
||||
req.SetFormData(data)
|
||||
}, resp)
|
||||
}
|
||||
|
||||
func (d *Terabox) getFiles(dir string) ([]File, error) {
|
||||
page := 1
|
||||
num := 100
|
||||
@ -237,17 +262,24 @@ func (d *Terabox) manage(opera string, filelist interface{}) ([]byte, error) {
|
||||
return d.post("/api/filemanager", params, data, nil)
|
||||
}
|
||||
|
||||
func (d *Terabox) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
||||
params := map[string]string{}
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d", encodeURIComponent(path), size, isdir)
|
||||
if uploadid != "" {
|
||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||
}
|
||||
return d.post("/api/create", params, data, nil)
|
||||
}
|
||||
|
||||
func encodeURIComponent(str string) string {
|
||||
r := url.QueryEscape(str)
|
||||
r = strings.ReplaceAll(r, "+", "%20")
|
||||
return r
|
||||
}
|
||||
|
||||
func calculateChunkSize(streamSize int64) int64 {
|
||||
chunkSize := initialChunkSize
|
||||
sizeThreshold := initialSizeThreshold
|
||||
|
||||
if streamSize < chunkSize {
|
||||
return streamSize
|
||||
}
|
||||
|
||||
for streamSize > sizeThreshold {
|
||||
chunkSize <<= 1
|
||||
sizeThreshold <<= 1
|
||||
}
|
||||
|
||||
return chunkSize
|
||||
}
|
||||
|
@ -55,7 +55,9 @@ func (d *Vtencent) Init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Vtencent) Drop(ctx context.Context) error {
|
||||
if d.cron != nil {
|
||||
d.cron.Stop()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
27
go.mod
27
go.mod
@ -3,7 +3,9 @@ module github.com/alist-org/alist/v3
|
||||
go 1.22.4
|
||||
|
||||
require (
|
||||
github.com/SheltonZhu/115driver v1.0.27
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
||||
github.com/KirCute/sftpd-alist v0.0.11
|
||||
github.com/SheltonZhu/115driver v1.0.32
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21
|
||||
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
|
||||
github.com/alist-org/gofakes3 v0.0.7
|
||||
@ -33,6 +35,7 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.7.0
|
||||
github.com/jlaffaye/ftp v0.2.0
|
||||
@ -49,21 +52,22 @@ require (
|
||||
github.com/pquerna/otp v1.4.0
|
||||
github.com/rclone/rclone v1.67.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
|
||||
github.com/xhofe/tache v0.1.2
|
||||
github.com/xhofe/tache v0.1.3
|
||||
github.com/xhofe/wopan-sdk-go v0.1.3
|
||||
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
||||
golang.org/x/crypto v0.27.0
|
||||
golang.org/x/crypto v0.30.0
|
||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
|
||||
golang.org/x/image v0.19.0
|
||||
golang.org/x/net v0.28.0
|
||||
golang.org/x/oauth2 v0.22.0
|
||||
golang.org/x/time v0.6.0
|
||||
golang.org/x/time v0.8.0
|
||||
google.golang.org/appengine v1.6.8
|
||||
gopkg.in/ldap.v3 v3.1.0
|
||||
gorm.io/driver/mysql v1.5.7
|
||||
@ -82,8 +86,12 @@ require (
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/fclairamb/go-log v0.5.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hekmon/cunits/v2 v2.1.0 // indirect
|
||||
github.com/ipfs/boxo v0.12.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@ -186,6 +194,7 @@ require (
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/otiai10/copy v1.14.0
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
@ -216,10 +225,10 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.etcd.io/bbolt v1.3.8 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.25.0 // indirect
|
||||
golang.org/x/term v0.24.0 // indirect
|
||||
golang.org/x/text v0.18.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/tools v0.24.0 // indirect
|
||||
google.golang.org/api v0.169.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
|
||||
|
71
go.sum
71
go.sum
@ -1,14 +1,19 @@
|
||||
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
|
||||
cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw=
|
||||
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
|
||||
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0 h1:ikwCzeqoqN6wvBHOB9OI6dde/jbV7EoTMpUcxtYl5Po=
|
||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0/go.mod h1:v0NgMtKDDi/6CM6r4P+daCljCW3eO9yS+Z+pZDTKo1E=
|
||||
github.com/KirCute/sftpd-alist v0.0.11 h1:BGInXmmLBI+v6S9WZCwvY0DRK1vDprGNcTv/57p2GSo=
|
||||
github.com/KirCute/sftpd-alist v0.0.11/go.mod h1:pPFzr6GrKqXvFXLr46ZpoqmtSpwH8DKTYloSp/ybzKQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||
github.com/SheltonZhu/115driver v1.0.27 h1:Ya1HYHYXFmi7JnqQ/+Vy6xZvq3leto+E+PxTm6UChj8=
|
||||
github.com/SheltonZhu/115driver v1.0.27/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4=
|
||||
github.com/SheltonZhu/115driver v1.0.32 h1:Taw1bnfcPJZW0xTdhDvEbBS1tccif7J7DslRp2NkDyQ=
|
||||
github.com/SheltonZhu/115driver v1.0.32/go.mod h1:XXFi23pyhAgzUE8dUEKdGvIdUQKi3wv6zR7C1Do40D8=
|
||||
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
|
||||
github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
|
||||
github.com/Xhofe/go-cache v0.0.0-20240804043513-b1a71927bc21 h1:h6q5E9aMBhhdqouW81LozVPI1I+Pu6IxL2EKpfm5OjY=
|
||||
@ -96,8 +101,6 @@ github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc
|
||||
github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v0.19.0 h1:gKZkKXPP6GlDk6EcfujDK19PCQqRjaJZQ7QRERx1UF0=
|
||||
github.com/charmbracelet/bubbles v0.19.0/go.mod h1:WILteEqZ+krG5c3ntGEMeG99nCupcuIk7V0/zOP0tOA=
|
||||
github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE=
|
||||
github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU=
|
||||
github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c=
|
||||
@ -146,6 +149,8 @@ github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJL
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/fclairamb/go-log v0.5.0 h1:Gz9wSamEaA6lta4IU2cjJc2xSq5sV5VYSB5w/SUHhVc=
|
||||
github.com/fclairamb/go-log v0.5.0/go.mod h1:XoRO1dYezpsGmLLkZE9I+sHqpqY65p8JA+Vqblb7k40=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.6 h1:6J37oI4wMZLj8EPgSCcSTTIbnI5D6RCNW/srX8vQd1Y=
|
||||
@ -170,6 +175,10 @@ github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s=
|
||||
github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@ -242,11 +251,17 @@ github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
|
||||
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hekmon/cunits/v2 v2.1.0 h1:k6wIjc4PlacNOHwKEMBgWV2/c8jyD4eRMs5mR1BBhI0=
|
||||
github.com/hekmon/cunits/v2 v2.1.0/go.mod h1:9r1TycXYXaTmEWlAIfFV8JT+Xo59U96yUJAYHxzii2M=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0 h1:0Fb11qE0IBh4V4GlOwHNYpqpjcYDp5GouolwrpmcUDQ=
|
||||
github.com/hekmon/transmissionrpc/v3 v3.0.0/go.mod h1:38SlNhFzinVUuY87wGj3acOmRxeYZAZfrj6Re7UgCDg=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@ -387,6 +402,10 @@ github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg=
|
||||
github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng=
|
||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
|
||||
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
|
||||
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
|
||||
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
|
||||
@ -433,6 +452,8 @@ github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4 h1:PT+ElG/UUFMfqy5HrxJxNzj3QBOf7dZwupeVC+mG1Lo=
|
||||
github.com/secsy/goftp v0.0.0-20200609142545-aa2de14babf4/go.mod h1:MnkX001NG75g3p8bhFycnyIjeQoOjGL6CEIsdE/nKSY=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc=
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
|
||||
@ -451,6 +472,8 @@ github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:s
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
@ -471,10 +494,13 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 h1:Jtcrb09q0AVWe3BGe8qtuuGxNSHWGkTWr43kHTJ+CpA=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 h1:6Y51mutOvRGRx6KqyMNo//xk8B8o6zW9/RVmy1VamOs=
|
||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543/go.mod h1:jpwqYA8KUVEvSUJHkCXsnBRJCSKP1BMa81QZ6kvRpow=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
@ -506,8 +532,8 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
|
||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
|
||||
github.com/xhofe/tache v0.1.2 h1:pHrXlrWcbTb4G7hVUDW7Rc+YTUnLJvnLBrdktVE1Fqg=
|
||||
github.com/xhofe/tache v0.1.2/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
|
||||
github.com/xhofe/tache v0.1.3 h1:MipxzlljYX29E1YI/SLC7hVomVF+51iP1OUzlsuq1wE=
|
||||
github.com/xhofe/tache v0.1.3/go.mod h1:iKumPFvywf30FRpAHHCt64G0JHLMzT0K+wyGedHsmTQ=
|
||||
github.com/xhofe/wopan-sdk-go v0.1.3 h1:J58X6v+n25ewBZjb05pKOr7AWGohb+Rdll4CThGh6+A=
|
||||
github.com/xhofe/wopan-sdk-go v0.1.3/go.mod h1:dcY9yA28fnaoZPnXZiVTFSkcd7GnIPTpTIIlfSI5z5Q=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -548,10 +574,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A=
|
||||
golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70=
|
||||
golang.org/x/crypto v0.30.0 h1:RwoQn3GkWiMkzlX562cLB7OxWvjH1L8xutO2WoJcRoY=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk=
|
||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@ -593,8 +617,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -626,10 +650,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@ -640,10 +662,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM=
|
||||
golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@ -657,14 +677,13 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224=
|
||||
golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -164,6 +164,7 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.SSOApplicationName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSOEndpointName, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSOJwtPublicKey, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSOExtraScopes, Value: "", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSOAutoRegister, Value: "false", Type: conf.TypeBool, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSODefaultDir, Value: "/", Type: conf.TypeString, Group: model.SSO, Flag: model.PRIVATE},
|
||||
{Key: conf.SSODefaultPermission, Value: "0", Type: conf.TypeNumber, Group: model.SSO, Flag: model.PRIVATE},
|
||||
@ -184,6 +185,16 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.S3AccessKeyId, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||
{Key: conf.S3SecretAccessKey, Value: "", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||
{Key: conf.S3Buckets, Value: "[]", Type: conf.TypeString, Group: model.S3, Flag: model.PRIVATE},
|
||||
|
||||
//ftp settings
|
||||
{Key: conf.FTPPublicHost, Value: "127.0.0.1", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPPasvPortMap, Value: "", Type: conf.TypeText, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPProxyUserAgent, Value: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " +
|
||||
"Chrome/87.0.4280.88 Safari/537.36", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPMandatoryTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPImplicitTLS, Value: "false", Type: conf.TypeBool, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPTLSPrivateKeyPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
{Key: conf.FTPTLSPublicCertPath, Value: "", Type: conf.TypeString, Group: model.FTP, Flag: model.PRIVATE},
|
||||
}
|
||||
initialSettingItems = append(initialSettingItems, tool.Tools.Items()...)
|
||||
if flags.Dev {
|
||||
|
101
internal/bootstrap/ssh.go
Normal file
101
internal/bootstrap/ssh.go
Normal file
@ -0,0 +1,101 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func InitHostKey() {
|
||||
sshPath := filepath.Join(flags.DataDir, "ssh")
|
||||
if !utils.Exists(sshPath) {
|
||||
err := utils.CreateNestedDirectory(sshPath)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to create ssh directory: %+v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
conf.SSHSigners = make([]ssh.Signer, 0, 4)
|
||||
if rsaKey, ok := LoadOrGenerateRSAHostKey(sshPath); ok {
|
||||
conf.SSHSigners = append(conf.SSHSigners, rsaKey)
|
||||
}
|
||||
// TODO Add keys for other encryption algorithms
|
||||
}
|
||||
|
||||
func LoadOrGenerateRSAHostKey(parentDir string) (ssh.Signer, bool) {
|
||||
privateKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key")
|
||||
publicKeyPath := filepath.Join(parentDir, "ssh_host_rsa_key.pub")
|
||||
privateKeyBytes, err := os.ReadFile(privateKeyPath)
|
||||
if err == nil {
|
||||
var privateKey *rsa.PrivateKey
|
||||
privateKey, err = rsaDecodePrivateKey(privateKeyBytes)
|
||||
if err == nil {
|
||||
var ret ssh.Signer
|
||||
ret, err = ssh.NewSignerFromKey(privateKey)
|
||||
if err == nil {
|
||||
return ret, true
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = os.Remove(privateKeyPath)
|
||||
_ = os.Remove(publicKeyPath)
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to generate RSA private key: %+v", err)
|
||||
return nil, false
|
||||
}
|
||||
publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to generate RSA public key: %+v", err)
|
||||
return nil, false
|
||||
}
|
||||
ret, err := ssh.NewSignerFromKey(privateKey)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to generate RSA signer: %+v", err)
|
||||
return nil, false
|
||||
}
|
||||
privateBytes := rsaEncodePrivateKey(privateKey)
|
||||
publicBytes := ssh.MarshalAuthorizedKey(publicKey)
|
||||
err = os.WriteFile(privateKeyPath, privateBytes, 0600)
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to write RSA private key to file: %+v", err)
|
||||
return nil, false
|
||||
}
|
||||
err = os.WriteFile(publicKeyPath, publicBytes, 0644)
|
||||
if err != nil {
|
||||
_ = os.Remove(privateKeyPath)
|
||||
utils.Log.Fatalf("failed to write RSA public key to file: %+v", err)
|
||||
return nil, false
|
||||
}
|
||||
return ret, true
|
||||
}
|
||||
|
||||
func rsaEncodePrivateKey(privateKey *rsa.PrivateKey) []byte {
|
||||
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
privateBlock := &pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: privateKeyBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(privateBlock)
|
||||
}
|
||||
|
||||
func rsaDecodePrivateKey(bytes []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(bytes)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("failed to parse PEM block containing the key")
|
||||
}
|
||||
privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return privateKey, nil
|
||||
}
|
@ -71,6 +71,24 @@ type S3 struct {
|
||||
SSL bool `json:"ssl" env:"SSL"`
|
||||
}
|
||||
|
||||
type FTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
FindPasvPortAttempts int `json:"find_pasv_port_attempts" env:"FIND_PASV_PORT_ATTEMPTS"`
|
||||
ActiveTransferPortNon20 bool `json:"active_transfer_port_non_20" env:"ACTIVE_TRANSFER_PORT_NON_20"`
|
||||
IdleTimeout int `json:"idle_timeout" env:"IDLE_TIMEOUT"`
|
||||
ConnectionTimeout int `json:"connection_timeout" env:"CONNECTION_TIMEOUT"`
|
||||
DisableActiveMode bool `json:"disable_active_mode" env:"DISABLE_ACTIVE_MODE"`
|
||||
DefaultTransferBinary bool `json:"default_transfer_binary" env:"DEFAULT_TRANSFER_BINARY"`
|
||||
EnableActiveConnIPCheck bool `json:"enable_active_conn_ip_check" env:"ENABLE_ACTIVE_CONN_IP_CHECK"`
|
||||
EnablePasvConnIPCheck bool `json:"enable_pasv_conn_ip_check" env:"ENABLE_PASV_CONN_IP_CHECK"`
|
||||
}
|
||||
|
||||
type SFTP struct {
|
||||
Enable bool `json:"enable" env:"ENABLE"`
|
||||
Listen string `json:"listen" env:"LISTEN"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Force bool `json:"force" env:"FORCE"`
|
||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
||||
@ -90,6 +108,8 @@ type Config struct {
|
||||
Tasks TasksConfig `json:"tasks" envPrefix:"TASKS_"`
|
||||
Cors Cors `json:"cors" envPrefix:"CORS_"`
|
||||
S3 S3 `json:"s3" envPrefix:"S3_"`
|
||||
FTP FTP `json:"ftp" envPrefix:"FTP_"`
|
||||
SFTP SFTP `json:"sftp" envPrefix:"SFTP_"`
|
||||
}
|
||||
|
||||
func DefaultConfig() *Config {
|
||||
@ -133,12 +153,12 @@ func DefaultConfig() *Config {
|
||||
Download: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 1,
|
||||
TaskPersistant: true,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Transfer: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
TaskPersistant: true,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Upload: TaskConfig{
|
||||
Workers: 5,
|
||||
@ -146,7 +166,7 @@ func DefaultConfig() *Config {
|
||||
Copy: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
TaskPersistant: true,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
},
|
||||
Cors: Cors{
|
||||
@ -159,5 +179,21 @@ func DefaultConfig() *Config {
|
||||
Port: 5246,
|
||||
SSL: false,
|
||||
},
|
||||
FTP: FTP{
|
||||
Enable: false,
|
||||
Listen: ":5221",
|
||||
FindPasvPortAttempts: 50,
|
||||
ActiveTransferPortNon20: false,
|
||||
IdleTimeout: 900,
|
||||
ConnectionTimeout: 30,
|
||||
DisableActiveMode: false,
|
||||
DefaultTransferBinary: false,
|
||||
EnableActiveConnIPCheck: true,
|
||||
EnablePasvConnIPCheck: true,
|
||||
},
|
||||
SFTP: SFTP{
|
||||
Enable: false,
|
||||
Listen: ":5222",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -54,11 +54,15 @@ const (
|
||||
Aria2Uri = "aria2_uri"
|
||||
Aria2Secret = "aria2_secret"
|
||||
|
||||
// transmission
|
||||
TransmissionUri = "transmission_uri"
|
||||
TransmissionSeedtime = "transmission_seedtime"
|
||||
|
||||
// single
|
||||
Token = "token"
|
||||
IndexProgress = "index_progress"
|
||||
|
||||
//SSO
|
||||
// SSO
|
||||
SSOClientId = "sso_client_id"
|
||||
SSOClientSecret = "sso_client_secret"
|
||||
SSOLoginEnabled = "sso_login_enabled"
|
||||
@ -68,12 +72,13 @@ const (
|
||||
SSOApplicationName = "sso_application_name"
|
||||
SSOEndpointName = "sso_endpoint_name"
|
||||
SSOJwtPublicKey = "sso_jwt_public_key"
|
||||
SSOExtraScopes = "sso_extra_scopes"
|
||||
SSOAutoRegister = "sso_auto_register"
|
||||
SSODefaultDir = "sso_default_dir"
|
||||
SSODefaultPermission = "sso_default_permission"
|
||||
SSOCompatibilityMode = "sso_compatibility_mode"
|
||||
|
||||
//ldap
|
||||
// ldap
|
||||
LdapLoginEnabled = "ldap_login_enabled"
|
||||
LdapServer = "ldap_server"
|
||||
LdapManagerDN = "ldap_manager_dn"
|
||||
@ -84,7 +89,7 @@ const (
|
||||
LdapDefaultDir = "ldap_default_dir"
|
||||
LdapLoginTips = "ldap_login_tips"
|
||||
|
||||
//s3
|
||||
// s3
|
||||
S3Buckets = "s3_buckets"
|
||||
S3AccessKeyId = "s3_access_key_id"
|
||||
S3SecretAccessKey = "s3_secret_access_key"
|
||||
@ -92,12 +97,21 @@ const (
|
||||
// qbittorrent
|
||||
QbittorrentUrl = "qbittorrent_url"
|
||||
QbittorrentSeedtime = "qbittorrent_seedtime"
|
||||
|
||||
// ftp
|
||||
FTPPublicHost = "ftp_public_host"
|
||||
FTPPasvPortMap = "ftp_pasv_port_map"
|
||||
FTPProxyUserAgent = "ftp_proxy_user_agent"
|
||||
FTPMandatoryTLS = "ftp_mandatory_tls"
|
||||
FTPImplicitTLS = "ftp_implicit_tls"
|
||||
FTPTLSPrivateKeyPath = "ftp_tls_private_key_path"
|
||||
FTPTLSPublicCertPath = "ftp_tls_public_cert_path"
|
||||
)
|
||||
|
||||
const (
|
||||
UNKNOWN = iota
|
||||
FOLDER
|
||||
//OFFICE
|
||||
// OFFICE
|
||||
VIDEO
|
||||
AUDIO
|
||||
TEXT
|
||||
|
@ -1,6 +1,7 @@
|
||||
package conf
|
||||
|
||||
import (
|
||||
"golang.org/x/crypto/ssh"
|
||||
"net/url"
|
||||
"regexp"
|
||||
)
|
||||
@ -32,3 +33,5 @@ var (
|
||||
ManageHtml string
|
||||
IndexHtml string
|
||||
)
|
||||
|
||||
var SSHSigners []ssh.Signer
|
||||
|
@ -11,13 +11,14 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type CopyTask struct {
|
||||
tache.Base
|
||||
task.TaskWithCreator
|
||||
Status string `json:"-"` //don't save status to save space
|
||||
SrcObjPath string `json:"src_path"`
|
||||
DstDirPath string `json:"dst_path"`
|
||||
@ -53,7 +54,7 @@ var CopyTaskManager *tache.Manager[*CopyTask]
|
||||
|
||||
// Copy if in the same storage, call move method
|
||||
// if not, add copy task
|
||||
func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
|
||||
func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) {
|
||||
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get src storage")
|
||||
@ -92,7 +93,11 @@ func _copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool
|
||||
}
|
||||
}
|
||||
// not in the same storage
|
||||
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
|
||||
t := &CopyTask{
|
||||
TaskWithCreator: task.TaskWithCreator{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
srcStorage: srcStorage,
|
||||
dstStorage: dstStorage,
|
||||
SrcObjPath: srcObjActualPath,
|
||||
@ -123,6 +128,9 @@ func copyBetween2Storages(t *CopyTask, srcStorage, dstStorage driver.Driver, src
|
||||
srcObjPath := stdpath.Join(srcObjPath, obj.GetName())
|
||||
dstObjPath := stdpath.Join(dstDirPath, srcObj.GetName())
|
||||
CopyTaskManager.Add(&CopyTask{
|
||||
TaskWithCreator: task.TaskWithCreator{
|
||||
Creator: t.Creator,
|
||||
},
|
||||
srcStorage: srcStorage,
|
||||
dstStorage: dstStorage,
|
||||
SrcObjPath: srcObjPath,
|
||||
|
@ -5,8 +5,8 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
// the param named path of functions in this package is a mount path
|
||||
@ -69,7 +69,7 @@ func Move(ctx context.Context, srcPath, dstDirPath string, lazyCache ...bool) er
|
||||
return err
|
||||
}
|
||||
|
||||
func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (tache.TaskWithInfo, error) {
|
||||
func Copy(ctx context.Context, srcObjPath, dstDirPath string, lazyCache ...bool) (task.TaskInfoWithCreator, error) {
|
||||
res, err := _copy(ctx, srcObjPath, dstDirPath, lazyCache...)
|
||||
if err != nil {
|
||||
log.Errorf("failed copy %s to %s: %+v", srcObjPath, dstDirPath, err)
|
||||
@ -101,8 +101,8 @@ func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer
|
||||
return err
|
||||
}
|
||||
|
||||
func PutAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
|
||||
t, err := putAsTask(dstDirPath, file)
|
||||
func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) {
|
||||
t, err := putAsTask(ctx, dstDirPath, file)
|
||||
if err != nil {
|
||||
log.Errorf("failed put %s: %+v", dstDirPath, err)
|
||||
}
|
||||
|
@ -7,12 +7,13 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type UploadTask struct {
|
||||
tache.Base
|
||||
task.TaskWithCreator
|
||||
storage driver.Driver
|
||||
dstDirActualPath string
|
||||
file model.FileStreamer
|
||||
@ -33,7 +34,7 @@ func (t *UploadTask) Run() error {
|
||||
var UploadTaskManager *tache.Manager[*UploadTask]
|
||||
|
||||
// putAsTask add as a put task and return immediately
|
||||
func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo, error) {
|
||||
func putAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer) (task.TaskInfoWithCreator, error) {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
@ -49,7 +50,11 @@ func putAsTask(dstDirPath string, file model.FileStreamer) (tache.TaskWithInfo,
|
||||
//file.SetReader(tempFile)
|
||||
//file.SetTmpFile(tempFile)
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
|
||||
t := &UploadTask{
|
||||
TaskWithCreator: task.TaskWithCreator{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
storage: storage,
|
||||
dstDirActualPath: dstDirActualPath,
|
||||
file: file,
|
||||
|
@ -11,6 +11,7 @@ const (
|
||||
SSO
|
||||
LDAP
|
||||
S3
|
||||
FTP
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -117,6 +117,14 @@ func (u *User) CanWebdavManage() bool {
|
||||
return u.IsAdmin() || (u.Permission>>9)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) CanFTPAccess() bool {
|
||||
return (u.Permission>>10)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) CanFTPManage() bool {
|
||||
return (u.Permission>>11)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) JoinPath(reqPath string) (string, error) {
|
||||
return utils.JoinBasePath(u.BasePath, reqPath)
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -13,6 +12,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -168,6 +169,9 @@ func (d *downloader) sendChunkTask() *chunk {
|
||||
|
||||
// when the final reader Close, we interrupt
|
||||
func (d *downloader) interrupt() error {
|
||||
if d.chunkChannel == nil {
|
||||
return nil
|
||||
}
|
||||
d.cancel()
|
||||
if d.written != d.params.Range.Length {
|
||||
log.Debugf("Downloader interrupt before finish")
|
||||
@ -177,6 +181,7 @@ func (d *downloader) interrupt() error {
|
||||
}
|
||||
defer func() {
|
||||
close(d.chunkChannel)
|
||||
d.chunkChannel = nil
|
||||
for _, buf := range d.bufs {
|
||||
buf.Close()
|
||||
}
|
||||
|
@ -6,4 +6,5 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/http"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/pikpak"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/qbit"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download/transmission"
|
||||
)
|
||||
|
@ -2,6 +2,8 @@ package tool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
@ -9,7 +11,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type DeletePolicy string
|
||||
@ -28,7 +29,7 @@ type AddURLArgs struct {
|
||||
DeletePolicy DeletePolicy
|
||||
}
|
||||
|
||||
func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
|
||||
func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskInfoWithCreator, error) {
|
||||
// get tool
|
||||
tool, err := Tools.Get(args.Tool)
|
||||
if err != nil {
|
||||
@ -78,7 +79,11 @@ func AddURL(ctx context.Context, args *AddURLArgs) (tache.TaskWithInfo, error) {
|
||||
deletePolicy = DeleteNever
|
||||
}
|
||||
|
||||
taskCreator, _ := ctx.Value("user").(*model.User) // taskCreator is nil when convert failed
|
||||
t := &DownloadTask{
|
||||
TaskWithCreator: task.TaskWithCreator{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
Url: args.URL,
|
||||
DstDirPath: args.DstDirPath,
|
||||
TempDir: tempDir,
|
||||
|
@ -7,13 +7,14 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type DownloadTask struct {
|
||||
tache.Base
|
||||
task.TaskWithCreator
|
||||
Url string `json:"url"`
|
||||
DstDirPath string `json:"dst_dir_path"`
|
||||
TempDir string `json:"temp_dir"`
|
||||
@ -101,6 +102,19 @@ outer:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if t.tool.Name() == "transmission" {
|
||||
// hack for transmission
|
||||
seedTime := setting.GetInt(conf.TransmissionSeedtime, 0)
|
||||
if seedTime >= 0 {
|
||||
t.Status = "offline download completed, waiting for seeding"
|
||||
<-time.After(time.Minute * time.Duration(seedTime))
|
||||
err := t.tool.Remove(t)
|
||||
if err != nil {
|
||||
log.Errorln(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -158,6 +172,9 @@ func (t *DownloadTask) Complete() error {
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
TransferTaskManager.Add(&TransferTask{
|
||||
TaskWithCreator: task.TaskWithCreator{
|
||||
Creator: t.Creator,
|
||||
},
|
||||
file: file,
|
||||
DstDirPath: t.DstDirPath,
|
||||
TempDir: t.TempDir,
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -15,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
type TransferTask struct {
|
||||
tache.Base
|
||||
task.TaskWithCreator
|
||||
FileDir string `json:"file_dir"`
|
||||
DstDirPath string `json:"dst_dir_path"`
|
||||
TempDir string `json:"temp_dir"`
|
||||
|
176
internal/offline_download/transmission/client.go
Normal file
176
internal/offline_download/transmission/client.go
Normal file
@ -0,0 +1,176 @@
|
||||
package transmission
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/offline_download/tool"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/hekmon/transmissionrpc/v3"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Transmission struct {
|
||||
client *transmissionrpc.Client
|
||||
}
|
||||
|
||||
func (t *Transmission) Run(task *tool.DownloadTask) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (t *Transmission) Name() string {
|
||||
return "transmission"
|
||||
}
|
||||
|
||||
func (t *Transmission) Items() []model.SettingItem {
|
||||
// transmission settings
|
||||
return []model.SettingItem{
|
||||
{Key: conf.TransmissionUri, Value: "http://localhost:9091/transmission/rpc", Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
|
||||
{Key: conf.TransmissionSeedtime, Value: "0", Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transmission) Init() (string, error) {
|
||||
t.client = nil
|
||||
uri := setting.GetStr(conf.TransmissionUri)
|
||||
endpoint, err := url.Parse(uri)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to init transmission client")
|
||||
}
|
||||
c, err := transmissionrpc.New(endpoint, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to init transmission client")
|
||||
}
|
||||
|
||||
ok, serverVersion, serverMinimumVersion, err := c.RPCVersion(context.Background())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed get transmission version")
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return "", fmt.Errorf("remote transmission RPC version (v%d) is incompatible with the transmission library (v%d): remote needs at least v%d",
|
||||
serverVersion, transmissionrpc.RPCVersion, serverMinimumVersion)
|
||||
}
|
||||
|
||||
t.client = c
|
||||
log.Infof("remote transmission RPC version (v%d) is compatible with our transmissionrpc library (v%d)\n",
|
||||
serverVersion, transmissionrpc.RPCVersion)
|
||||
log.Infof("using transmission version: %d", serverVersion)
|
||||
return fmt.Sprintf("transmission version: %d", serverVersion), nil
|
||||
}
|
||||
|
||||
func (t *Transmission) IsReady() bool {
|
||||
return t.client != nil
|
||||
}
|
||||
|
||||
func (t *Transmission) AddURL(args *tool.AddUrlArgs) (string, error) {
|
||||
endpoint, err := url.Parse(args.Url)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to parse transmission uri")
|
||||
}
|
||||
|
||||
rpcPayload := transmissionrpc.TorrentAddPayload{
|
||||
DownloadDir: &args.TempDir,
|
||||
}
|
||||
// http url for .torrent file
|
||||
if endpoint.Scheme == "http" || endpoint.Scheme == "https" {
|
||||
resp, err := http.Get(args.Url)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get .torrent file")
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := base64.NewEncoder(base64.StdEncoding, buffer)
|
||||
// Stream file to the encoder
|
||||
if _, err = io.Copy(encoder, resp.Body); err != nil {
|
||||
return "", errors.Wrap(err, "can't copy file content into the base64 encoder")
|
||||
}
|
||||
// Flush last bytes
|
||||
if err = encoder.Close(); err != nil {
|
||||
return "", errors.Wrap(err, "can't flush last bytes of the base64 encoder")
|
||||
}
|
||||
// Get the string form
|
||||
b64 := buffer.String()
|
||||
rpcPayload.MetaInfo = &b64
|
||||
} else { // magnet uri
|
||||
rpcPayload.Filename = &args.Url
|
||||
}
|
||||
|
||||
torrent, err := t.client.TorrentAdd(context.TODO(), rpcPayload)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if torrent.ID == nil {
|
||||
return "", fmt.Errorf("failed get torrent ID")
|
||||
}
|
||||
gid := strconv.FormatInt(*torrent.ID, 10)
|
||||
return gid, nil
|
||||
}
|
||||
|
||||
func (t *Transmission) Remove(task *tool.DownloadTask) error {
|
||||
gid, err := strconv.ParseInt(task.GID, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = t.client.TorrentRemove(context.TODO(), transmissionrpc.TorrentRemovePayload{
|
||||
IDs: []int64{gid},
|
||||
DeleteLocalData: false,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Transmission) Status(task *tool.DownloadTask) (*tool.Status, error) {
|
||||
gid, err := strconv.ParseInt(task.GID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos, err := t.client.TorrentGetAllFor(context.TODO(), []int64{gid})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(infos) < 1 {
|
||||
return nil, fmt.Errorf("failed get status, wrong gid: %s", task.GID)
|
||||
}
|
||||
info := infos[0]
|
||||
|
||||
s := &tool.Status{
|
||||
Completed: *info.IsFinished,
|
||||
Err: err,
|
||||
}
|
||||
s.Progress = *info.PercentDone * 100
|
||||
|
||||
switch *info.Status {
|
||||
case transmissionrpc.TorrentStatusCheckWait,
|
||||
transmissionrpc.TorrentStatusDownloadWait,
|
||||
transmissionrpc.TorrentStatusCheck,
|
||||
transmissionrpc.TorrentStatusDownload,
|
||||
transmissionrpc.TorrentStatusIsolated:
|
||||
s.Status = "[transmission] " + info.Status.String()
|
||||
case transmissionrpc.TorrentStatusSeedWait,
|
||||
transmissionrpc.TorrentStatusSeed:
|
||||
s.Completed = true
|
||||
case transmissionrpc.TorrentStatusStopped:
|
||||
s.Err = errors.Errorf("[transmission] failed to download %s, status: %s, error: %s", task.GID, info.Status.String(), *info.ErrorString)
|
||||
default:
|
||||
s.Err = errors.Errorf("[transmission] unknown status occurred downloading %s, err: %s", task.GID, *info.ErrorString)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Transmission)(nil)
|
||||
|
||||
func init() {
|
||||
tool.Tools.Add(&Transmission{})
|
||||
}
|
@ -101,7 +101,7 @@ func initStorage(ctx context.Context, storage model.Storage, storageDriver drive
|
||||
log.Errorf("panic init storage: %s", errInfo)
|
||||
driverStorage.SetStatus(errInfo)
|
||||
MustSaveDriverStorage(storageDriver)
|
||||
storagesMap.Delete(driverStorage.MountPath)
|
||||
storagesMap.Store(driverStorage.MountPath, storageDriver)
|
||||
}
|
||||
}()
|
||||
// Unmarshal Addition
|
||||
|
26
internal/task/base.go
Normal file
26
internal/task/base.go
Normal file
@ -0,0 +1,26 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type TaskWithCreator struct {
|
||||
tache.Base
|
||||
Creator *model.User
|
||||
}
|
||||
|
||||
func (t *TaskWithCreator) SetCreator(creator *model.User) {
|
||||
t.Creator = creator
|
||||
t.Persist()
|
||||
}
|
||||
|
||||
func (t *TaskWithCreator) GetCreator() *model.User {
|
||||
return t.Creator
|
||||
}
|
||||
|
||||
type TaskInfoWithCreator interface {
|
||||
tache.TaskWithInfo
|
||||
SetCreator(creator *model.User)
|
||||
GetCreator() *model.User
|
||||
}
|
@ -1,20 +1,27 @@
|
||||
package random
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
mathRand "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
var Rand *rand.Rand
|
||||
var Rand *mathRand.Rand
|
||||
|
||||
const letterBytes = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
||||
func String(n int) string {
|
||||
b := make([]byte, n)
|
||||
letterLen := big.NewInt(int64(len(letterBytes)))
|
||||
for i := range b {
|
||||
b[i] = letterBytes[Rand.Intn(len(letterBytes))]
|
||||
idx, err := rand.Int(rand.Reader, letterLen)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
b[i] = letterBytes[idx.Int64()]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
@ -24,10 +31,10 @@ func Token() string {
|
||||
}
|
||||
|
||||
func RangeInt64(left, right int64) int64 {
|
||||
return rand.Int63n(left+right) - left
|
||||
return mathRand.Int63n(left+right) - left
|
||||
}
|
||||
|
||||
func init() {
|
||||
s := rand.NewSource(time.Now().UnixNano())
|
||||
Rand = rand.New(s)
|
||||
s := mathRand.NewSource(time.Now().UnixNano())
|
||||
Rand = mathRand.New(s)
|
||||
}
|
||||
|
288
server/ftp.go
Normal file
288
server/ftp.go
Normal file
@ -0,0 +1,288 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/ftp"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type FtpMainDriver struct {
|
||||
settings *ftpserver.Settings
|
||||
proxyHeader *http.Header
|
||||
clients map[uint32]ftpserver.ClientContext
|
||||
shutdownLock sync.RWMutex
|
||||
isShutdown bool
|
||||
tlsConfig *tls.Config
|
||||
}
|
||||
|
||||
func NewMainDriver() (*FtpMainDriver, error) {
|
||||
header := &http.Header{}
|
||||
header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent))
|
||||
transferType := ftpserver.TransferTypeASCII
|
||||
if conf.Conf.FTP.DefaultTransferBinary {
|
||||
transferType = ftpserver.TransferTypeBinary
|
||||
}
|
||||
activeConnCheck := ftpserver.IPMatchDisabled
|
||||
if conf.Conf.FTP.EnableActiveConnIPCheck {
|
||||
activeConnCheck = ftpserver.IPMatchRequired
|
||||
}
|
||||
pasvConnCheck := ftpserver.IPMatchDisabled
|
||||
if conf.Conf.FTP.EnablePasvConnIPCheck {
|
||||
pasvConnCheck = ftpserver.IPMatchRequired
|
||||
}
|
||||
tlsRequired := ftpserver.ClearOrEncrypted
|
||||
if setting.GetBool(conf.FTPImplicitTLS) {
|
||||
tlsRequired = ftpserver.ImplicitEncryption
|
||||
} else if setting.GetBool(conf.FTPMandatoryTLS) {
|
||||
tlsRequired = ftpserver.MandatoryEncryption
|
||||
}
|
||||
tlsConf, err := getTlsConf(setting.GetStr(conf.FTPTLSPrivateKeyPath), setting.GetStr(conf.FTPTLSPublicCertPath))
|
||||
if err != nil && tlsRequired != ftpserver.ClearOrEncrypted {
|
||||
return nil, fmt.Errorf("FTP mandatory TLS has been enabled, but the certificate failed to load: %w", err)
|
||||
}
|
||||
return &FtpMainDriver{
|
||||
settings: &ftpserver.Settings{
|
||||
ListenAddr: conf.Conf.FTP.Listen,
|
||||
PublicHost: lookupIP(setting.GetStr(conf.FTPPublicHost)),
|
||||
PassiveTransferPortGetter: newPortMapper(setting.GetStr(conf.FTPPasvPortMap)),
|
||||
FindPasvPortAttempts: conf.Conf.FTP.FindPasvPortAttempts,
|
||||
ActiveTransferPortNon20: conf.Conf.FTP.ActiveTransferPortNon20,
|
||||
IdleTimeout: conf.Conf.FTP.IdleTimeout,
|
||||
ConnectionTimeout: conf.Conf.FTP.ConnectionTimeout,
|
||||
DisableMLSD: false,
|
||||
DisableMLST: false,
|
||||
DisableMFMT: true,
|
||||
Banner: setting.GetStr(conf.Announcement),
|
||||
TLSRequired: tlsRequired,
|
||||
DisableLISTArgs: false,
|
||||
DisableSite: false,
|
||||
DisableActiveMode: conf.Conf.FTP.DisableActiveMode,
|
||||
EnableHASH: false,
|
||||
DisableSTAT: false,
|
||||
DisableSYST: false,
|
||||
EnableCOMB: false,
|
||||
DefaultTransferType: transferType,
|
||||
ActiveConnectionsCheck: activeConnCheck,
|
||||
PasvConnectionsCheck: pasvConnCheck,
|
||||
SiteHandlers: map[string]ftpserver.SiteHandler{
|
||||
"SIZE": ftp.HandleSIZE,
|
||||
},
|
||||
},
|
||||
proxyHeader: header,
|
||||
clients: make(map[uint32]ftpserver.ClientContext),
|
||||
shutdownLock: sync.RWMutex{},
|
||||
isShutdown: false,
|
||||
tlsConfig: tlsConf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) GetSettings() (*ftpserver.Settings, error) {
|
||||
return d.settings, nil
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) ClientConnected(cc ftpserver.ClientContext) (string, error) {
|
||||
if d.isShutdown || !d.shutdownLock.TryRLock() {
|
||||
return "", errors.New("server has shutdown")
|
||||
}
|
||||
defer d.shutdownLock.RUnlock()
|
||||
d.clients[cc.ID()] = cc
|
||||
return "AList FTP Endpoint", nil
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) ClientDisconnected(cc ftpserver.ClientContext) {
|
||||
err := cc.Close()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to close client: %v", err)
|
||||
}
|
||||
delete(d.clients, cc.ID())
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) AuthUser(cc ftpserver.ClientContext, user, pass string) (ftpserver.ClientDriver, error) {
|
||||
var userObj *model.User
|
||||
var err error
|
||||
if user == "anonymous" || user == "guest" {
|
||||
userObj, err = op.GetGuest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
userObj, err = op.GetUserByName(user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passHash := model.StaticHash(pass)
|
||||
if err = userObj.ValidatePwdStaticHash(passHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if userObj.Disabled || !userObj.CanFTPAccess() {
|
||||
return nil, errors.New("user is not allowed to access via FTP")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
ctx = context.WithValue(ctx, "user", userObj)
|
||||
if user == "anonymous" || user == "guest" {
|
||||
ctx = context.WithValue(ctx, "meta_pass", pass)
|
||||
} else {
|
||||
ctx = context.WithValue(ctx, "meta_pass", "")
|
||||
}
|
||||
ctx = context.WithValue(ctx, "client_ip", cc.RemoteAddr().String())
|
||||
ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader)
|
||||
return ftp.NewAferoAdapter(ctx), nil
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) GetTLSConfig() (*tls.Config, error) {
|
||||
if d.tlsConfig == nil {
|
||||
return nil, errors.New("TLS config not provided")
|
||||
}
|
||||
return d.tlsConfig, nil
|
||||
}
|
||||
|
||||
func (d *FtpMainDriver) Stop() {
|
||||
d.isShutdown = true
|
||||
d.shutdownLock.Lock()
|
||||
defer d.shutdownLock.Unlock()
|
||||
for _, value := range d.clients {
|
||||
_ = value.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func lookupIP(host string) string {
|
||||
if host == "" || net.ParseIP(host) != nil {
|
||||
return host
|
||||
}
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil || len(ips) == 0 {
|
||||
utils.Log.Fatalf("given FTP public host is invalid, and the default value will be used: %v", err)
|
||||
return ""
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.To4() != nil {
|
||||
return ip.String()
|
||||
}
|
||||
}
|
||||
v6 := ips[0].String()
|
||||
utils.Log.Warnf("no IPv4 record looked up, %s will be used as public host, and it might do not work.", v6)
|
||||
return v6
|
||||
}
|
||||
|
||||
func newPortMapper(str string) ftpserver.PasvPortGetter {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
pasvPortMappers := strings.Split(strings.Replace(str, "\n", ",", -1), ",")
|
||||
type group struct {
|
||||
ExposedStart int
|
||||
ListenedStart int
|
||||
Length int
|
||||
}
|
||||
groups := make([]group, len(pasvPortMappers))
|
||||
totalLength := 0
|
||||
convertToPorts := func(str string) (int, int, error) {
|
||||
start, end, multi := strings.Cut(str, "-")
|
||||
if multi {
|
||||
si, err := strconv.Atoi(start)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
ei, err := strconv.Atoi(end)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if ei < si || ei < 1024 || si < 1024 || ei > 65535 || si > 65535 {
|
||||
return 0, 0, errors.New("invalid port")
|
||||
}
|
||||
return si, ei - si + 1, nil
|
||||
} else {
|
||||
ret, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
} else {
|
||||
return ret, 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, mapper := range pasvPortMappers {
|
||||
var err error
|
||||
exposed, listened, mapped := strings.Cut(mapper, ":")
|
||||
for {
|
||||
if mapped {
|
||||
var es, ls, el, ll int
|
||||
es, el, err = convertToPorts(exposed)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ls, ll, err = convertToPorts(listened)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if el != ll {
|
||||
err = errors.New("the number of exposed ports and listened ports does not match")
|
||||
break
|
||||
}
|
||||
groups[i].ExposedStart = es
|
||||
groups[i].ListenedStart = ls
|
||||
groups[i].Length = el
|
||||
totalLength += el
|
||||
} else {
|
||||
var start, length int
|
||||
start, length, err = convertToPorts(mapper)
|
||||
groups[i].ExposedStart = start
|
||||
groups[i].ListenedStart = start
|
||||
groups[i].Length = length
|
||||
totalLength += length
|
||||
}
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
utils.Log.Fatalf("failed to convert FTP PASV port mapper %s: %v, the port mapper will be ignored.", mapper, err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return func() (int, int, bool) {
|
||||
idxPort := rand.Intn(totalLength)
|
||||
for _, g := range groups {
|
||||
if idxPort >= g.Length {
|
||||
idxPort -= g.Length
|
||||
} else {
|
||||
return g.ExposedStart + idxPort, g.ListenedStart + idxPort, true
|
||||
}
|
||||
}
|
||||
// unreachable
|
||||
return 0, 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func getTlsConf(keyPath, certPath string) (*tls.Config, error) {
|
||||
if keyPath == "" || certPath == "" {
|
||||
return nil, errors.New("private key or certificate is not provided")
|
||||
}
|
||||
cert, err := os.ReadFile(certPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := os.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}, nil
|
||||
}
|
115
server/ftp/afero.go
Normal file
115
server/ftp/afero.go
Normal file
@ -0,0 +1,115 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/spf13/afero"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AferoAdapter struct {
|
||||
ctx context.Context
|
||||
nextFileSize int64
|
||||
}
|
||||
|
||||
func NewAferoAdapter(ctx context.Context) *AferoAdapter {
|
||||
return &AferoAdapter{ctx: ctx}
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Create(_ string) (afero.File, error) {
|
||||
// See also GetHandle
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Mkdir(name string, _ os.FileMode) error {
|
||||
return Mkdir(a.ctx, name)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) MkdirAll(path string, perm os.FileMode) error {
|
||||
return a.Mkdir(path, perm)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Open(_ string) (afero.File, error) {
|
||||
// See also GetHandle and ReadDir
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) OpenFile(_ string, _ int, _ os.FileMode) (afero.File, error) {
|
||||
// See also GetHandle
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Remove(name string) error {
|
||||
return Remove(a.ctx, name)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) RemoveAll(path string) error {
|
||||
return a.Remove(path)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Rename(oldName, newName string) error {
|
||||
return Rename(a.ctx, oldName, newName)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Stat(name string) (os.FileInfo, error) {
|
||||
return Stat(a.ctx, name)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Name() string {
|
||||
return "AList FTP Endpoint"
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Chmod(_ string, _ os.FileMode) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Chown(_ string, _, _ int) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) Chtimes(_ string, _ time.Time, _ time.Time) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) ReadDir(name string) ([]os.FileInfo, error) {
|
||||
return List(a.ctx, name)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) GetHandle(name string, flags int, offset int64) (ftpserver.FileTransfer, error) {
|
||||
fileSize := a.nextFileSize
|
||||
a.nextFileSize = 0
|
||||
if offset != 0 {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
if (flags & os.O_SYNC) != 0 {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
if (flags & os.O_APPEND) != 0 {
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
_, err := fs.Get(a.ctx, name, &fs.GetArgs{})
|
||||
exists := err == nil
|
||||
if (flags&os.O_CREATE) == 0 && !exists {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if (flags&os.O_EXCL) != 0 && exists {
|
||||
return nil, errors.New("file already exists")
|
||||
}
|
||||
if (flags & os.O_WRONLY) != 0 {
|
||||
trunc := (flags & os.O_TRUNC) != 0
|
||||
if fileSize > 0 {
|
||||
return OpenUploadWithLength(a.ctx, name, trunc, fileSize)
|
||||
} else {
|
||||
return OpenUpload(a.ctx, name, trunc)
|
||||
}
|
||||
}
|
||||
return OpenDownload(a.ctx, name)
|
||||
}
|
||||
|
||||
func (a *AferoAdapter) SetNextFileSize(size int64) {
|
||||
a.nextFileSize = size
|
||||
}
|
11
server/ftp/const.go
Normal file
11
server/ftp/const.go
Normal file
@ -0,0 +1,11 @@
|
||||
package ftp
|
||||
|
||||
// From leffss/sftpd
|
||||
const (
|
||||
SSH_FXF_READ = 0x00000001
|
||||
SSH_FXF_WRITE = 0x00000002
|
||||
SSH_FXF_APPEND = 0x00000004
|
||||
SSH_FXF_CREAT = 0x00000008
|
||||
SSH_FXF_TRUNC = 0x00000010
|
||||
SSH_FXF_EXCL = 0x00000020
|
||||
)
|
75
server/ftp/fsmanage.go
Normal file
75
server/ftp/fsmanage.go
Normal file
@ -0,0 +1,75 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/pkg/errors"
|
||||
stdpath "path"
|
||||
)
|
||||
|
||||
func Mkdir(ctx context.Context, path string) error {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !user.CanWrite() || !user.CanFTPManage() {
|
||||
meta, err := op.GetNearestMeta(stdpath.Dir(reqPath))
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !common.CanWrite(meta, reqPath) {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
}
|
||||
return fs.MakeDir(ctx, reqPath)
|
||||
}
|
||||
|
||||
func Remove(ctx context.Context, path string) error {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
if !user.CanRemove() || !user.CanFTPManage() {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
reqPath, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fs.Remove(ctx, reqPath)
|
||||
}
|
||||
|
||||
func Rename(ctx context.Context, oldPath, newPath string) error {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
srcPath, err := user.JoinPath(oldPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath, err := user.JoinPath(newPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcDir, srcBase := stdpath.Split(srcPath)
|
||||
dstDir, dstBase := stdpath.Split(dstPath)
|
||||
if srcDir == dstDir {
|
||||
if !user.CanRename() || !user.CanFTPManage() {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
return fs.Rename(ctx, srcPath, dstBase)
|
||||
} else {
|
||||
if !user.CanFTPManage() || !user.CanMove() || (srcBase != dstBase && !user.CanRename()) {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
if err := fs.Move(ctx, srcPath, dstDir); err != nil {
|
||||
return err
|
||||
}
|
||||
if srcBase != dstBase {
|
||||
return fs.Rename(ctx, stdpath.Join(dstDir, srcBase), dstBase)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
188
server/ftp/fsread.go
Normal file
188
server/ftp/fsread.go
Normal file
@ -0,0 +1,188 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
fs2 "io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileDownloadProxy struct {
|
||||
ftpserver.FileTransfer
|
||||
reader io.ReadCloser
|
||||
closers *utils.Closers
|
||||
}
|
||||
|
||||
func OpenDownload(ctx context.Context, path string) (*FileDownloadProxy, error) {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := op.GetNearestMeta(reqPath)
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx = context.WithValue(ctx, "meta", meta)
|
||||
if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
|
||||
return nil, errs.PermissionDenied
|
||||
}
|
||||
|
||||
// directly use proxy
|
||||
header := *(ctx.Value("proxy_header").(*http.Header))
|
||||
link, obj, err := fs.Link(ctx, reqPath, model.LinkArgs{
|
||||
IP: ctx.Value("client_ip").(string),
|
||||
Header: header,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if storage.GetStorage().ProxyRange {
|
||||
common.ProxyRange(link, obj.GetSize())
|
||||
}
|
||||
reader, closers, err := proxy(link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileDownloadProxy{reader: reader, closers: closers}, nil
|
||||
}
|
||||
|
||||
func proxy(link *model.Link) (io.ReadCloser, *utils.Closers, error) {
|
||||
if link.MFile != nil {
|
||||
return link.MFile, nil, nil
|
||||
} else if link.RangeReadCloser != nil {
|
||||
rc, err := link.RangeReadCloser.RangeRead(context.Background(), http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
closers := link.RangeReadCloser.GetClosers()
|
||||
return rc, &closers, nil
|
||||
} else {
|
||||
res, err := net.RequestHttp(context.Background(), http.MethodGet, link.Header, link.URL)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return res.Body, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileDownloadProxy) Read(p []byte) (n int, err error) {
|
||||
return f.reader.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileDownloadProxy) Write(p []byte) (n int, err error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileDownloadProxy) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileDownloadProxy) Close() error {
|
||||
defer func() {
|
||||
if f.closers != nil {
|
||||
_ = f.closers.Close()
|
||||
}
|
||||
}()
|
||||
return f.reader.Close()
|
||||
}
|
||||
|
||||
type OsFileInfoAdapter struct {
|
||||
obj model.Obj
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) Name() string {
|
||||
return o.obj.GetName()
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) Size() int64 {
|
||||
return o.obj.GetSize()
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) Mode() fs2.FileMode {
|
||||
var mode fs2.FileMode = 0755
|
||||
if o.IsDir() {
|
||||
mode |= fs2.ModeDir
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) ModTime() time.Time {
|
||||
return o.obj.ModTime()
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) IsDir() bool {
|
||||
return o.obj.IsDir()
|
||||
}
|
||||
|
||||
func (o *OsFileInfoAdapter) Sys() any {
|
||||
return o.obj
|
||||
}
|
||||
|
||||
func Stat(ctx context.Context, path string) (os.FileInfo, error) {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := op.GetNearestMeta(reqPath)
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx = context.WithValue(ctx, "meta", meta)
|
||||
if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
|
||||
return nil, errs.PermissionDenied
|
||||
}
|
||||
obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &OsFileInfoAdapter{obj: obj}, nil
|
||||
}
|
||||
|
||||
func List(ctx context.Context, path string) ([]os.FileInfo, error) {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
reqPath, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta, err := op.GetNearestMeta(reqPath)
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx = context.WithValue(ctx, "meta", meta)
|
||||
if !common.CanAccess(user, meta, reqPath, ctx.Value("meta_pass").(string)) {
|
||||
return nil, errs.PermissionDenied
|
||||
}
|
||||
objs, err := fs.List(ctx, reqPath, &fs.ListArgs{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make([]os.FileInfo, len(objs))
|
||||
for i, obj := range objs {
|
||||
ret[i] = &OsFileInfoAdapter{obj: obj}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
209
server/ftp/fsup.go
Normal file
209
server/ftp/fsup.go
Normal file
@ -0,0 +1,209 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"time"
|
||||
)
|
||||
|
||||
type FileUploadProxy struct {
|
||||
ftpserver.FileTransfer
|
||||
buffer *os.File
|
||||
path string
|
||||
ctx context.Context
|
||||
trunc bool
|
||||
}
|
||||
|
||||
func uploadAuth(ctx context.Context, path string) error {
|
||||
user := ctx.Value("user").(*model.User)
|
||||
path, err := user.JoinPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := op.GetNearestMeta(stdpath.Dir(path))
|
||||
if err != nil {
|
||||
if !errors.Is(errors.Cause(err), errs.MetaNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !(common.CanAccess(user, meta, path, ctx.Value("meta_pass").(string)) &&
|
||||
((user.CanFTPManage() && user.CanWrite()) || common.CanWrite(meta, stdpath.Dir(path)))) {
|
||||
return errs.PermissionDenied
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func OpenUpload(ctx context.Context, path string, trunc bool) (*FileUploadProxy, error) {
|
||||
err := uploadAuth(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmpFile, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileUploadProxy{buffer: tmpFile, path: path, ctx: ctx, trunc: trunc}, nil
|
||||
}
|
||||
|
||||
func (f *FileUploadProxy) Read(p []byte) (n int, err error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileUploadProxy) Write(p []byte) (n int, err error) {
|
||||
return f.buffer.Write(p)
|
||||
}
|
||||
|
||||
func (f *FileUploadProxy) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileUploadProxy) Close() error {
|
||||
dir, name := stdpath.Split(f.path)
|
||||
size, err := f.buffer.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.buffer.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
arr := make([]byte, 512)
|
||||
if _, err := f.buffer.Read(arr); err != nil {
|
||||
return err
|
||||
}
|
||||
contentType := http.DetectContentType(arr)
|
||||
if _, err := f.buffer.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
if f.trunc {
|
||||
_ = fs.Remove(f.ctx, f.path)
|
||||
}
|
||||
s := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: name,
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
},
|
||||
Mimetype: contentType,
|
||||
WebPutAsTask: true,
|
||||
}
|
||||
s.SetTmpFile(f.buffer)
|
||||
s.Closers.Add(f.buffer)
|
||||
_, err = fs.PutAsTask(f.ctx, dir, s)
|
||||
return err
|
||||
}
|
||||
|
||||
type FileUploadWithLengthProxy struct {
|
||||
ftpserver.FileTransfer
|
||||
ctx context.Context
|
||||
path string
|
||||
length int64
|
||||
first512Bytes [512]byte
|
||||
pFirst int
|
||||
pipeWriter io.WriteCloser
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
func OpenUploadWithLength(ctx context.Context, path string, trunc bool, length int64) (*FileUploadWithLengthProxy, error) {
|
||||
err := uploadAuth(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if trunc {
|
||||
_ = fs.Remove(ctx, path)
|
||||
}
|
||||
return &FileUploadWithLengthProxy{ctx: ctx, path: path, length: length}, nil
|
||||
}
|
||||
|
||||
func (f *FileUploadWithLengthProxy) Read(p []byte) (n int, err error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileUploadWithLengthProxy) Write(p []byte) (n int, err error) {
|
||||
if f.pipeWriter != nil {
|
||||
select {
|
||||
case e := <-f.errChan:
|
||||
return 0, e
|
||||
default:
|
||||
return f.pipeWriter.Write(p)
|
||||
}
|
||||
} else if len(p) < 512-f.pFirst {
|
||||
copy(f.first512Bytes[f.pFirst:], p)
|
||||
f.pFirst += len(p)
|
||||
return len(p), nil
|
||||
} else {
|
||||
copy(f.first512Bytes[f.pFirst:], p[:512-f.pFirst])
|
||||
contentType := http.DetectContentType(f.first512Bytes[:])
|
||||
dir, name := stdpath.Split(f.path)
|
||||
reader, writer := io.Pipe()
|
||||
f.errChan = make(chan error, 1)
|
||||
s := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: name,
|
||||
Size: f.length,
|
||||
Modified: time.Now(),
|
||||
},
|
||||
Mimetype: contentType,
|
||||
WebPutAsTask: false,
|
||||
Reader: reader,
|
||||
}
|
||||
go func() {
|
||||
e := fs.PutDirectly(f.ctx, dir, s, true)
|
||||
f.errChan <- e
|
||||
close(f.errChan)
|
||||
}()
|
||||
f.pipeWriter = writer
|
||||
n, err = writer.Write(f.first512Bytes[:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n1, err := writer.Write(p[512-f.pFirst:])
|
||||
if err != nil {
|
||||
return n1 + 512 - f.pFirst, err
|
||||
}
|
||||
f.pFirst = 512
|
||||
return len(p), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileUploadWithLengthProxy) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
|
||||
func (f *FileUploadWithLengthProxy) Close() error {
|
||||
if f.pipeWriter != nil {
|
||||
err := f.pipeWriter.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = <-f.errChan
|
||||
return err
|
||||
} else {
|
||||
data := f.first512Bytes[:f.pFirst]
|
||||
contentType := http.DetectContentType(data)
|
||||
dir, name := stdpath.Split(f.path)
|
||||
s := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: name,
|
||||
Size: int64(f.pFirst),
|
||||
Modified: time.Now(),
|
||||
},
|
||||
Mimetype: contentType,
|
||||
WebPutAsTask: false,
|
||||
Reader: bytes.NewReader(data),
|
||||
}
|
||||
return fs.PutDirectly(f.ctx, dir, s, true)
|
||||
}
|
||||
}
|
122
server/ftp/sftp.go
Normal file
122
server/ftp/sftp.go
Normal file
@ -0,0 +1,122 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"github.com/KirCute/sftpd-alist"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"os"
|
||||
)
|
||||
|
||||
type SftpDriverAdapter struct {
|
||||
FtpDriver *AferoAdapter
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) OpenFile(_ string, _ uint32, _ *sftpd.Attr) (sftpd.File, error) {
|
||||
// See also GetHandle
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) OpenDir(_ string) (sftpd.Dir, error) {
|
||||
// See also GetHandle
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) Remove(name string) error {
|
||||
return s.FtpDriver.Remove(name)
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) Rename(old, new string, _ uint32) error {
|
||||
return s.FtpDriver.Rename(old, new)
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) Mkdir(name string, attr *sftpd.Attr) error {
|
||||
return s.FtpDriver.Mkdir(name, attr.Mode)
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) Rmdir(name string) error {
|
||||
return s.Remove(name)
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) Stat(name string, _ bool) (*sftpd.Attr, error) {
|
||||
stat, err := s.FtpDriver.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileInfoToSftpAttr(stat), nil
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) SetStat(_ string, _ *sftpd.Attr) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) ReadLink(_ string) (string, error) {
|
||||
return "", errs.NotSupport
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) CreateLink(_, _ string, _ uint32) error {
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) RealPath(path string) (string, error) {
|
||||
return utils.FixAndCleanPath(path), nil
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) GetHandle(name string, flags uint32, _ *sftpd.Attr, offset uint64) (sftpd.FileTransfer, error) {
|
||||
return s.FtpDriver.GetHandle(name, sftpFlagToOpenMode(flags), int64(offset))
|
||||
}
|
||||
|
||||
func (s *SftpDriverAdapter) ReadDir(name string) ([]sftpd.NamedAttr, error) {
|
||||
dir, err := s.FtpDriver.ReadDir(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make([]sftpd.NamedAttr, len(dir))
|
||||
for i, d := range dir {
|
||||
ret[i] = *fileInfoToSftpNamedAttr(d)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// From leffss/sftpd
|
||||
func sftpFlagToOpenMode(flags uint32) int {
|
||||
mode := 0
|
||||
if (flags & SSH_FXF_READ) != 0 {
|
||||
mode |= os.O_RDONLY
|
||||
}
|
||||
if (flags & SSH_FXF_WRITE) != 0 {
|
||||
mode |= os.O_WRONLY
|
||||
}
|
||||
if (flags & SSH_FXF_APPEND) != 0 {
|
||||
mode |= os.O_APPEND
|
||||
}
|
||||
if (flags & SSH_FXF_CREAT) != 0 {
|
||||
mode |= os.O_CREATE
|
||||
}
|
||||
if (flags & SSH_FXF_TRUNC) != 0 {
|
||||
mode |= os.O_TRUNC
|
||||
}
|
||||
if (flags & SSH_FXF_EXCL) != 0 {
|
||||
mode |= os.O_EXCL
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
func fileInfoToSftpAttr(stat os.FileInfo) *sftpd.Attr {
|
||||
ret := &sftpd.Attr{}
|
||||
ret.Flags |= sftpd.ATTR_SIZE
|
||||
ret.Size = uint64(stat.Size())
|
||||
ret.Flags |= sftpd.ATTR_MODE
|
||||
ret.Mode = stat.Mode()
|
||||
ret.Flags |= sftpd.ATTR_TIME
|
||||
ret.ATime = stat.Sys().(model.Obj).CreateTime()
|
||||
ret.MTime = stat.ModTime()
|
||||
return ret
|
||||
}
|
||||
|
||||
func fileInfoToSftpNamedAttr(stat os.FileInfo) *sftpd.NamedAttr {
|
||||
return &sftpd.NamedAttr{
|
||||
Name: stat.Name(),
|
||||
Attr: *fileInfoToSftpAttr(stat),
|
||||
}
|
||||
}
|
21
server/ftp/site.go
Normal file
21
server/ftp/site.go
Normal file
@ -0,0 +1,21 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func HandleSIZE(param string, client ftpserver.ClientDriver) (int, string) {
|
||||
fs, ok := client.(*AferoAdapter)
|
||||
if !ok {
|
||||
return ftpserver.StatusNotLoggedIn, "Unexpected exception (driver is nil)"
|
||||
}
|
||||
size, err := strconv.ParseInt(param, 10, 64)
|
||||
if err != nil {
|
||||
return ftpserver.StatusSyntaxErrorParameters, fmt.Sprintf(
|
||||
"Couldn't parse file size, given: %s, err: %v", param, err)
|
||||
}
|
||||
fs.SetNextFileSize(size)
|
||||
return ftpserver.StatusOK, "Accepted next file size"
|
||||
}
|
@ -2,7 +2,7 @@ package handles
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/xhofe/tache"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"io"
|
||||
stdpath "path"
|
||||
|
||||
@ -121,7 +121,7 @@ func FsCopy(c *gin.Context) {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
var addedTasks []tache.TaskWithInfo
|
||||
var addedTasks []task.TaskInfoWithCreator
|
||||
for i, name := range req.Names {
|
||||
t, err := fs.Copy(c, stdpath.Join(srcDir, name), dstDir, len(req.Names) > i+1)
|
||||
if t != nil {
|
||||
|
@ -1,17 +1,16 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"github.com/xhofe/tache"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@ -58,9 +57,9 @@ func FsStream(c *gin.Context) {
|
||||
Mimetype: c.GetHeader("Content-Type"),
|
||||
WebPutAsTask: asTask,
|
||||
}
|
||||
var t tache.TaskWithInfo
|
||||
var t task.TaskInfoWithCreator
|
||||
if asTask {
|
||||
t, err = fs.PutAsTask(dir, s)
|
||||
t, err = fs.PutAsTask(c, dir, s)
|
||||
} else {
|
||||
err = fs.PutDirectly(c, dir, s, true)
|
||||
}
|
||||
@ -123,12 +122,12 @@ func FsForm(c *gin.Context) {
|
||||
Mimetype: file.Header.Get("Content-Type"),
|
||||
WebPutAsTask: asTask,
|
||||
}
|
||||
var t tache.TaskWithInfo
|
||||
var t task.TaskInfoWithCreator
|
||||
if asTask {
|
||||
s.Reader = struct {
|
||||
io.Reader
|
||||
}{f}
|
||||
t, err = fs.PutAsTask(dir, &s)
|
||||
t, err = fs.PutAsTask(c, dir, &s)
|
||||
} else {
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
|
@ -5,9 +5,9 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/offline_download/tool"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/xhofe/tache"
|
||||
)
|
||||
|
||||
type SetAria2Req struct {
|
||||
@ -30,6 +30,10 @@ func SetAria2(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
_tool, err := tool.Tools.Get("aria2")
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
version, err := _tool.Init()
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
@ -74,6 +78,37 @@ func OfflineDownloadTools(c *gin.Context) {
|
||||
common.SuccessResp(c, tools)
|
||||
}
|
||||
|
||||
type SetTransmissionReq struct {
|
||||
Uri string `json:"uri" form:"uri"`
|
||||
Seedtime string `json:"seedtime" form:"seedtime"`
|
||||
}
|
||||
|
||||
func SetTransmission(c *gin.Context) {
|
||||
var req SetTransmissionReq
|
||||
if err := c.ShouldBind(&req); err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
items := []model.SettingItem{
|
||||
{Key: conf.TransmissionUri, Value: req.Uri, Type: conf.TypeString, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
|
||||
{Key: conf.TransmissionSeedtime, Value: req.Seedtime, Type: conf.TypeNumber, Group: model.OFFLINE_DOWNLOAD, Flag: model.PRIVATE},
|
||||
}
|
||||
if err := op.SaveSettingItems(items); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
_tool, err := tool.Tools.Get("transmission")
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
if _, err := _tool.Init(); err != nil {
|
||||
common.ErrorResp(c, err, 500)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, "ok")
|
||||
}
|
||||
|
||||
type AddOfflineDownloadReq struct {
|
||||
Urls []string `json:"urls"`
|
||||
Path string `json:"path"`
|
||||
@ -98,7 +133,7 @@ func AddOfflineDownload(c *gin.Context) {
|
||||
common.ErrorResp(c, err, 403)
|
||||
return
|
||||
}
|
||||
var tasks []tache.TaskWithInfo
|
||||
var tasks []task.TaskInfoWithCreator
|
||||
for _, url := range req.Urls {
|
||||
t, err := tool.AddURL(c, &tool.AddURLArgs{
|
||||
URL: url,
|
||||
|
@ -1,7 +1,6 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -11,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/go-cache"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -21,29 +22,45 @@ import (
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/pquerna/otp"
|
||||
"github.com/pquerna/otp/totp"
|
||||
"golang.org/x/oauth2"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var opts = totp.ValidateOpts{
|
||||
// state verify won't expire in 30 secs, which is quite enough for the callback
|
||||
Period: 30,
|
||||
Skew: 1,
|
||||
// in some OIDC providers(such as Authelia), state parameter must be at least 8 characters
|
||||
Digits: otp.DigitsEight,
|
||||
Algorithm: otp.AlgorithmSHA1,
|
||||
const stateLength = 16
|
||||
const stateExpire = time.Minute * 5
|
||||
|
||||
var stateCache = cache.NewMemCache[string](cache.WithShards[string](stateLength))
|
||||
|
||||
func _keyState(clientID, state string) string {
|
||||
return fmt.Sprintf("%s_%s", clientID, state)
|
||||
}
|
||||
|
||||
func generateState(clientID, ip string) string {
|
||||
state := random.String(stateLength)
|
||||
stateCache.Set(_keyState(clientID, state), ip, cache.WithEx[string](stateExpire))
|
||||
return state
|
||||
}
|
||||
|
||||
func verifyState(clientID, ip, state string) bool {
|
||||
value, ok := stateCache.Get(_keyState(clientID, state))
|
||||
return ok && value == ip
|
||||
}
|
||||
|
||||
func ssoRedirectUri(c *gin.Context, useCompatibility bool, method string) string {
|
||||
if useCompatibility {
|
||||
return common.GetApiUrl(c.Request) + "/api/auth/" + method
|
||||
} else {
|
||||
return common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method
|
||||
}
|
||||
}
|
||||
|
||||
func SSOLoginRedirect(c *gin.Context) {
|
||||
method := c.Query("method")
|
||||
usecompatibility := setting.GetBool(conf.SSOCompatibilityMode)
|
||||
useCompatibility := setting.GetBool(conf.SSOCompatibilityMode)
|
||||
enabled := setting.GetBool(conf.SSOLoginEnabled)
|
||||
clientId := setting.GetStr(conf.SSOClientId)
|
||||
platform := setting.GetStr(conf.SSOLoginPlatform)
|
||||
var r_url string
|
||||
var redirect_uri string
|
||||
var rUrl string
|
||||
if !enabled {
|
||||
common.ErrorStrResp(c, "Single sign-on is not enabled", 403)
|
||||
return
|
||||
@ -53,69 +70,52 @@ func SSOLoginRedirect(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "no method provided", 400)
|
||||
return
|
||||
}
|
||||
if usecompatibility {
|
||||
redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + method
|
||||
} else {
|
||||
redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + method
|
||||
}
|
||||
redirectUri := ssoRedirectUri(c, useCompatibility, method)
|
||||
urlValues.Add("response_type", "code")
|
||||
urlValues.Add("redirect_uri", redirect_uri)
|
||||
urlValues.Add("redirect_uri", redirectUri)
|
||||
urlValues.Add("client_id", clientId)
|
||||
switch platform {
|
||||
case "Github":
|
||||
r_url = "https://github.com/login/oauth/authorize?"
|
||||
rUrl = "https://github.com/login/oauth/authorize?"
|
||||
urlValues.Add("scope", "read:user")
|
||||
case "Microsoft":
|
||||
r_url = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?"
|
||||
rUrl = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize?"
|
||||
urlValues.Add("scope", "user.read")
|
||||
urlValues.Add("response_mode", "query")
|
||||
case "Google":
|
||||
r_url = "https://accounts.google.com/o/oauth2/v2/auth?"
|
||||
rUrl = "https://accounts.google.com/o/oauth2/v2/auth?"
|
||||
urlValues.Add("scope", "https://www.googleapis.com/auth/userinfo.profile")
|
||||
case "Dingtalk":
|
||||
r_url = "https://login.dingtalk.com/oauth2/auth?"
|
||||
rUrl = "https://login.dingtalk.com/oauth2/auth?"
|
||||
urlValues.Add("scope", "openid")
|
||||
urlValues.Add("prompt", "consent")
|
||||
urlValues.Add("response_type", "code")
|
||||
case "Casdoor":
|
||||
endpoint := strings.TrimSuffix(setting.GetStr(conf.SSOEndpointName), "/")
|
||||
r_url = endpoint + "/login/oauth/authorize?"
|
||||
rUrl = endpoint + "/login/oauth/authorize?"
|
||||
urlValues.Add("scope", "profile")
|
||||
urlValues.Add("state", endpoint)
|
||||
case "OIDC":
|
||||
oauth2Config, err := GetOIDCClient(c)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
// generate state parameter
|
||||
state, err := totp.GenerateCodeCustom(base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts)
|
||||
oauth2Config, err := GetOIDCClient(c, useCompatibility, redirectUri, method)
|
||||
if err != nil {
|
||||
common.ErrorStrResp(c, err.Error(), 400)
|
||||
return
|
||||
}
|
||||
state := generateState(clientId, c.ClientIP())
|
||||
c.Redirect(http.StatusFound, oauth2Config.AuthCodeURL(state))
|
||||
return
|
||||
default:
|
||||
common.ErrorStrResp(c, "invalid platform", 400)
|
||||
return
|
||||
}
|
||||
c.Redirect(302, r_url+urlValues.Encode())
|
||||
c.Redirect(302, rUrl+urlValues.Encode())
|
||||
}
|
||||
|
||||
var ssoClient = resty.New().SetRetryCount(3)
|
||||
|
||||
func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) {
|
||||
var redirect_uri string
|
||||
usecompatibility := setting.GetBool(conf.SSOCompatibilityMode)
|
||||
argument := c.Query("method")
|
||||
if usecompatibility {
|
||||
argument = path.Base(c.Request.URL.Path)
|
||||
}
|
||||
if usecompatibility {
|
||||
redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/" + argument
|
||||
} else {
|
||||
redirect_uri = common.GetApiUrl(c.Request) + "/api/auth/sso_callback" + "?method=" + argument
|
||||
func GetOIDCClient(c *gin.Context, useCompatibility bool, redirectUri, method string) (*oauth2.Config, error) {
|
||||
if redirectUri == "" {
|
||||
redirectUri = ssoRedirectUri(c, useCompatibility, method)
|
||||
}
|
||||
endpoint := setting.GetStr(conf.SSOEndpointName)
|
||||
provider, err := oidc.NewProvider(c, endpoint)
|
||||
@ -124,16 +124,20 @@ func GetOIDCClient(c *gin.Context) (*oauth2.Config, error) {
|
||||
}
|
||||
clientId := setting.GetStr(conf.SSOClientId)
|
||||
clientSecret := setting.GetStr(conf.SSOClientSecret)
|
||||
extraScopes := []string{}
|
||||
if setting.GetStr(conf.SSOExtraScopes) != "" {
|
||||
extraScopes = strings.Split(setting.GetStr(conf.SSOExtraScopes), " ")
|
||||
}
|
||||
return &oauth2.Config{
|
||||
ClientID: clientId,
|
||||
ClientSecret: clientSecret,
|
||||
RedirectURL: redirect_uri,
|
||||
RedirectURL: redirectUri,
|
||||
|
||||
// Discovery returns the OAuth2 endpoints.
|
||||
Endpoint: provider.Endpoint(),
|
||||
|
||||
// "openid" is a required scope for OpenID Connect flows.
|
||||
Scopes: []string{oidc.ScopeOpenID, "profile"},
|
||||
Scopes: append([]string{oidc.ScopeOpenID, "profile"}, extraScopes...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -181,9 +185,9 @@ func parseJWT(p string) ([]byte, error) {
|
||||
|
||||
func OIDCLoginCallback(c *gin.Context) {
|
||||
useCompatibility := setting.GetBool(conf.SSOCompatibilityMode)
|
||||
argument := c.Query("method")
|
||||
method := c.Query("method")
|
||||
if useCompatibility {
|
||||
argument = path.Base(c.Request.URL.Path)
|
||||
method = path.Base(c.Request.URL.Path)
|
||||
}
|
||||
clientId := setting.GetStr(conf.SSOClientId)
|
||||
endpoint := setting.GetStr(conf.SSOEndpointName)
|
||||
@ -192,18 +196,12 @@ func OIDCLoginCallback(c *gin.Context) {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
oauth2Config, err := GetOIDCClient(c)
|
||||
oauth2Config, err := GetOIDCClient(c, useCompatibility, "", method)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
// add state verify process
|
||||
stateVerification, err := totp.ValidateCustom(c.Query("state"), base32.StdEncoding.EncodeToString([]byte(oauth2Config.ClientSecret)), time.Now(), opts)
|
||||
if err != nil {
|
||||
common.ErrorResp(c, err, 400)
|
||||
return
|
||||
}
|
||||
if !stateVerification {
|
||||
if !verifyState(clientId, c.ClientIP(), c.Query("state")) {
|
||||
common.ErrorStrResp(c, "incorrect or expired state parameter", 400)
|
||||
return
|
||||
}
|
||||
@ -236,7 +234,7 @@ func OIDCLoginCallback(c *gin.Context) {
|
||||
common.ErrorStrResp(c, "cannot get username from OIDC provider", 400)
|
||||
return
|
||||
}
|
||||
if argument == "get_sso_id" {
|
||||
if method == "get_sso_id" {
|
||||
if useCompatibility {
|
||||
c.Redirect(302, common.GetApiUrl(c.Request)+"/@manage?sso_id="+userID)
|
||||
return
|
||||
@ -252,7 +250,7 @@ func OIDCLoginCallback(c *gin.Context) {
|
||||
c.Data(200, "text/html; charset=utf-8", []byte(html))
|
||||
return
|
||||
}
|
||||
if argument == "sso_get_token" {
|
||||
if method == "sso_get_token" {
|
||||
user, err := db.GetUserBySSOID(userID)
|
||||
if err != nil {
|
||||
user, err = autoRegister(userID, userID, err)
|
||||
|
@ -1,6 +1,8 @@
|
||||
package handles
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"math"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
@ -14,13 +16,15 @@ import (
|
||||
type TaskInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Creator string `json:"creator"`
|
||||
CreatorRole int `json:"creator_role"`
|
||||
State tache.State `json:"state"`
|
||||
Status string `json:"status"`
|
||||
Progress float64 `json:"progress"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
|
||||
func getTaskInfo[T task.TaskInfoWithCreator](task T) TaskInfo {
|
||||
errMsg := ""
|
||||
if task.GetErr() != nil {
|
||||
errMsg = task.GetErr().Error()
|
||||
@ -30,9 +34,17 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
|
||||
if math.IsNaN(progress) {
|
||||
progress = 100
|
||||
}
|
||||
creatorName := ""
|
||||
creatorRole := -1
|
||||
if task.GetCreator() != nil {
|
||||
creatorName = task.GetCreator().Username
|
||||
creatorRole = task.GetCreator().Role
|
||||
}
|
||||
return TaskInfo{
|
||||
ID: task.GetID(),
|
||||
Name: task.GetName(),
|
||||
Creator: creatorName,
|
||||
CreatorRole: creatorRole,
|
||||
State: task.GetState(),
|
||||
Status: task.GetStatus(),
|
||||
Progress: progress,
|
||||
@ -40,52 +52,158 @@ func getTaskInfo[T tache.TaskWithInfo](task T) TaskInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func getTaskInfos[T tache.TaskWithInfo](tasks []T) []TaskInfo {
|
||||
func getTaskInfos[T task.TaskInfoWithCreator](tasks []T) []TaskInfo {
|
||||
return utils.MustSliceConvert(tasks, getTaskInfo[T])
|
||||
}
|
||||
|
||||
func taskRoute[T tache.TaskWithInfo](g *gin.RouterGroup, manager *tache.Manager[T]) {
|
||||
g.GET("/undone", func(c *gin.Context) {
|
||||
common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StatePending, tache.StateRunning,
|
||||
tache.StateCanceling, tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)))
|
||||
})
|
||||
g.GET("/done", func(c *gin.Context) {
|
||||
common.SuccessResp(c, getTaskInfos(manager.GetByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)))
|
||||
})
|
||||
g.POST("/info", func(c *gin.Context) {
|
||||
tid := c.Query("tid")
|
||||
task, ok := manager.GetByID(tid)
|
||||
func argsContains[T comparable](v T, slice ...T) bool {
|
||||
return utils.SliceContains(slice, v)
|
||||
}
|
||||
|
||||
func getUserInfo(c *gin.Context) (bool, uint, bool) {
|
||||
if user, ok := c.Value("user").(*model.User); ok {
|
||||
return user.IsAdmin(), user.ID, true
|
||||
} else {
|
||||
return false, 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func getTargetedHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(c *gin.Context, task T)) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
t, ok := manager.GetByID(c.Query("tid"))
|
||||
if !ok {
|
||||
common.ErrorStrResp(c, "task not found", 404)
|
||||
return
|
||||
}
|
||||
if !isAdmin && uid != t.GetCreator().ID {
|
||||
// to avoid an attacker using error messages to guess valid TID, return a 404 rather than a 403
|
||||
common.ErrorStrResp(c, "task not found", 404)
|
||||
return
|
||||
}
|
||||
callback(c, t)
|
||||
}
|
||||
}
|
||||
|
||||
func getBatchHandler[T task.TaskInfoWithCreator](manager *tache.Manager[T], callback func(task T)) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
var tids []string
|
||||
if err := c.ShouldBind(&tids); err != nil {
|
||||
common.ErrorStrResp(c, "invalid request format", 400)
|
||||
return
|
||||
}
|
||||
retErrs := make(map[string]string)
|
||||
for _, tid := range tids {
|
||||
t, ok := manager.GetByID(tid)
|
||||
if !ok || (!isAdmin && uid != t.GetCreator().ID) {
|
||||
retErrs[tid] = "task not found"
|
||||
continue
|
||||
}
|
||||
callback(t)
|
||||
}
|
||||
common.SuccessResp(c, retErrs)
|
||||
}
|
||||
}
|
||||
|
||||
func taskRoute[T task.TaskInfoWithCreator](g *gin.RouterGroup, manager *tache.Manager[T]) {
|
||||
g.GET("/undone", func(c *gin.Context) {
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
|
||||
// avoid directly passing the user object into the function to reduce closure size
|
||||
return (isAdmin || uid == task.GetCreator().ID) &&
|
||||
argsContains(task.GetState(), tache.StatePending, tache.StateRunning, tache.StateCanceling,
|
||||
tache.StateErrored, tache.StateFailing, tache.StateWaitingRetry, tache.StateBeforeRetry)
|
||||
})))
|
||||
})
|
||||
g.GET("/done", func(c *gin.Context) {
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
common.SuccessResp(c, getTaskInfos(manager.GetByCondition(func(task T) bool {
|
||||
return (isAdmin || uid == task.GetCreator().ID) &&
|
||||
argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
|
||||
})))
|
||||
})
|
||||
g.POST("/info", getTargetedHandler(manager, func(c *gin.Context, task T) {
|
||||
common.SuccessResp(c, getTaskInfo(task))
|
||||
})
|
||||
g.POST("/cancel", func(c *gin.Context) {
|
||||
tid := c.Query("tid")
|
||||
manager.Cancel(tid)
|
||||
}))
|
||||
g.POST("/cancel", getTargetedHandler(manager, func(c *gin.Context, task T) {
|
||||
manager.Cancel(task.GetID())
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
g.POST("/delete", func(c *gin.Context) {
|
||||
tid := c.Query("tid")
|
||||
manager.Remove(tid)
|
||||
}))
|
||||
g.POST("/delete", getTargetedHandler(manager, func(c *gin.Context, task T) {
|
||||
manager.Remove(task.GetID())
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
g.POST("/retry", func(c *gin.Context) {
|
||||
tid := c.Query("tid")
|
||||
manager.Retry(tid)
|
||||
}))
|
||||
g.POST("/retry", getTargetedHandler(manager, func(c *gin.Context, task T) {
|
||||
manager.Retry(task.GetID())
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
}))
|
||||
g.POST("/cancel_some", getBatchHandler(manager, func(task T) {
|
||||
manager.Cancel(task.GetID())
|
||||
}))
|
||||
g.POST("/delete_some", getBatchHandler(manager, func(task T) {
|
||||
manager.Remove(task.GetID())
|
||||
}))
|
||||
g.POST("/retry_some", getBatchHandler(manager, func(task T) {
|
||||
manager.Retry(task.GetID())
|
||||
}))
|
||||
g.POST("/clear_done", func(c *gin.Context) {
|
||||
manager.RemoveByState(tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
manager.RemoveByCondition(func(task T) bool {
|
||||
return (isAdmin || uid == task.GetCreator().ID) &&
|
||||
argsContains(task.GetState(), tache.StateCanceled, tache.StateFailed, tache.StateSucceeded)
|
||||
})
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
g.POST("/clear_succeeded", func(c *gin.Context) {
|
||||
manager.RemoveByState(tache.StateSucceeded)
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
manager.RemoveByCondition(func(task T) bool {
|
||||
return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateSucceeded
|
||||
})
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
g.POST("/retry_failed", func(c *gin.Context) {
|
||||
manager.RetryAllFailed()
|
||||
isAdmin, uid, ok := getUserInfo(c)
|
||||
if !ok {
|
||||
// if there is no bug, here is unreachable
|
||||
common.ErrorStrResp(c, "user invalid", 401)
|
||||
return
|
||||
}
|
||||
tasks := manager.GetByCondition(func(task T) bool {
|
||||
return (isAdmin || uid == task.GetCreator().ID) && task.GetState() == tache.StateFailed
|
||||
})
|
||||
for _, t := range tasks {
|
||||
manager.Retry(t.GetID())
|
||||
}
|
||||
common.SuccessResp(c)
|
||||
})
|
||||
}
|
||||
|
@ -127,6 +127,16 @@ func Authn(c *gin.Context) {
|
||||
c.Next()
|
||||
}
|
||||
|
||||
func AuthNotGuest(c *gin.Context) {
|
||||
user := c.MustGet("user").(*model.User)
|
||||
if user.IsGuest() {
|
||||
common.ErrorStrResp(c, "You are a guest", 403)
|
||||
c.Abort()
|
||||
} else {
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func AuthAdmin(c *gin.Context) {
|
||||
user := c.MustGet("user").(*model.User)
|
||||
if !user.IsAdmin() {
|
||||
|
@ -62,7 +62,7 @@ func Init(e *gin.Engine) {
|
||||
api.GET("/auth/get_sso_id", handles.SSOLoginCallback)
|
||||
api.GET("/auth/sso_get_token", handles.SSOLoginCallback)
|
||||
|
||||
//webauthn
|
||||
// webauthn
|
||||
webauthn.GET("/webauthn_begin_registration", handles.BeginAuthnRegistration)
|
||||
webauthn.POST("/webauthn_finish_registration", handles.FinishAuthnRegistration)
|
||||
webauthn.GET("/webauthn_begin_login", handles.BeginAuthnLogin)
|
||||
@ -76,6 +76,7 @@ func Init(e *gin.Engine) {
|
||||
public.Any("/offline_download_tools", handles.OfflineDownloadTools)
|
||||
|
||||
_fs(auth.Group("/fs"))
|
||||
_task(auth.Group("/task", middlewares.AuthNotGuest))
|
||||
admin(auth.Group("/admin", middlewares.AuthAdmin))
|
||||
if flags.Debug || flags.Dev {
|
||||
debug(g.Group("/debug"))
|
||||
@ -125,9 +126,10 @@ func admin(g *gin.RouterGroup) {
|
||||
setting.POST("/reset_token", handles.ResetToken)
|
||||
setting.POST("/set_aria2", handles.SetAria2)
|
||||
setting.POST("/set_qbit", handles.SetQbittorrent)
|
||||
setting.POST("/set_transmission", handles.SetTransmission)
|
||||
|
||||
task := g.Group("/task")
|
||||
handles.SetupTaskRoute(task)
|
||||
// retain /admin/task API to ensure compatibility with legacy automation scripts
|
||||
_task(g.Group("/task"))
|
||||
|
||||
ms := g.Group("/message")
|
||||
ms.POST("/get", message.HttpInstance.GetHandle)
|
||||
@ -159,14 +161,19 @@ func _fs(g *gin.RouterGroup) {
|
||||
g.PUT("/put", middlewares.FsUp, handles.FsStream)
|
||||
g.PUT("/form", middlewares.FsUp, handles.FsForm)
|
||||
g.POST("/link", middlewares.AuthAdmin, handles.Link)
|
||||
//g.POST("/add_aria2", handles.AddOfflineDownload)
|
||||
//g.POST("/add_qbit", handles.AddQbittorrent)
|
||||
// g.POST("/add_aria2", handles.AddOfflineDownload)
|
||||
// g.POST("/add_qbit", handles.AddQbittorrent)
|
||||
// g.POST("/add_transmission", handles.SetTransmission)
|
||||
g.POST("/add_offline_download", handles.AddOfflineDownload)
|
||||
}
|
||||
|
||||
func _task(g *gin.RouterGroup) {
|
||||
handles.SetupTaskRoute(g)
|
||||
}
|
||||
|
||||
func Cors(r *gin.Engine) {
|
||||
config := cors.DefaultConfig()
|
||||
//config.AllowAllOrigins = true
|
||||
// config.AllowAllOrigins = true
|
||||
config.AllowOrigins = conf.Conf.Cors.AllowOrigins
|
||||
config.AllowHeaders = conf.Conf.Cors.AllowHeaders
|
||||
config.AllowMethods = conf.Conf.Cors.AllowMethods
|
||||
|
109
server/sftp.go
Normal file
109
server/sftp.go
Normal file
@ -0,0 +1,109 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/KirCute/sftpd-alist"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/ftp"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type SftpDriver struct {
|
||||
proxyHeader *http.Header
|
||||
config *sftpd.Config
|
||||
}
|
||||
|
||||
func NewSftpDriver() (*SftpDriver, error) {
|
||||
header := &http.Header{}
|
||||
header.Add("User-Agent", setting.GetStr(conf.FTPProxyUserAgent))
|
||||
return &SftpDriver{
|
||||
proxyHeader: header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *SftpDriver) GetConfig() *sftpd.Config {
|
||||
if d.config != nil {
|
||||
return d.config
|
||||
}
|
||||
serverConfig := ssh.ServerConfig{
|
||||
NoClientAuth: true,
|
||||
NoClientAuthCallback: d.NoClientAuth,
|
||||
PasswordCallback: d.PasswordAuth,
|
||||
AuthLogCallback: d.AuthLogCallback,
|
||||
BannerCallback: d.GetBanner,
|
||||
}
|
||||
for _, k := range conf.SSHSigners {
|
||||
serverConfig.AddHostKey(k)
|
||||
}
|
||||
d.config = &sftpd.Config{
|
||||
ServerConfig: serverConfig,
|
||||
HostPort: conf.Conf.SFTP.Listen,
|
||||
ErrorLogFunc: utils.Log.Error,
|
||||
//DebugLogFunc: utils.Log.Debugf,
|
||||
}
|
||||
return d.config
|
||||
}
|
||||
|
||||
func (d *SftpDriver) GetFileSystem(sc *ssh.ServerConn) (sftpd.FileSystem, error) {
|
||||
userObj, err := op.GetUserByName(sc.User())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx := context.Background()
|
||||
ctx = context.WithValue(ctx, "user", userObj)
|
||||
ctx = context.WithValue(ctx, "meta_pass", "")
|
||||
ctx = context.WithValue(ctx, "client_ip", sc.RemoteAddr().String())
|
||||
ctx = context.WithValue(ctx, "proxy_header", d.proxyHeader)
|
||||
return &ftp.SftpDriverAdapter{FtpDriver: ftp.NewAferoAdapter(ctx)}, nil
|
||||
}
|
||||
|
||||
func (d *SftpDriver) Close() {
|
||||
}
|
||||
|
||||
func (d *SftpDriver) NoClientAuth(conn ssh.ConnMetadata) (*ssh.Permissions, error) {
|
||||
if conn.User() != "guest" {
|
||||
return nil, errors.New("only guest is allowed to login without authorization")
|
||||
}
|
||||
guest, err := op.GetGuest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if guest.Disabled || !guest.CanFTPAccess() {
|
||||
return nil, errors.New("user is not allowed to access via SFTP")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *SftpDriver) PasswordAuth(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
|
||||
userObj, err := op.GetUserByName(conn.User())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
passHash := model.StaticHash(string(password))
|
||||
if err = userObj.ValidatePwdStaticHash(passHash); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if userObj.Disabled || !userObj.CanFTPAccess() {
|
||||
return nil, errors.New("user is not allowed to access via SFTP")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *SftpDriver) AuthLogCallback(conn ssh.ConnMetadata, method string, err error) {
|
||||
ip := conn.RemoteAddr().String()
|
||||
if err == nil {
|
||||
utils.Log.Infof("[SFTP] %s(%s) logged in via %s", conn.User(), ip, method)
|
||||
} else if method != "none" {
|
||||
utils.Log.Infof("[SFTP] %s(%s) tries logging in via %s but with error: %s", conn.User(), ip, method, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *SftpDriver) GetBanner(_ ssh.ConnMetadata) string {
|
||||
return setting.GetStr(conf.Announcement)
|
||||
}
|
Reference in New Issue
Block a user