Compare commits
11 Commits
main
...
refactor/o
Author | SHA1 | Date | |
---|---|---|---|
|
3b90f591b5 | ||
|
5657b12b20 | ||
|
aba8bc0ec2 | ||
|
ce6e486666 | ||
|
9fb9efb704 | ||
|
1490da8b53 | ||
|
12dfb60a66 | ||
|
0380d7fff9 | ||
|
0acb2d6073 | ||
|
ea9a3432ab | ||
|
7db3975b18 |
44
.air.toml
44
.air.toml
@ -1,44 +0,0 @@
|
|||||||
root = "."
|
|
||||||
testdata_dir = "testdata"
|
|
||||||
tmp_dir = "tmp"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
args_bin = ["server"]
|
|
||||||
bin = "./tmp/main"
|
|
||||||
cmd = "go build -o ./tmp/main ."
|
|
||||||
delay = 0
|
|
||||||
exclude_dir = ["assets", "tmp", "vendor", "testdata"]
|
|
||||||
exclude_file = []
|
|
||||||
exclude_regex = ["_test.go"]
|
|
||||||
exclude_unchanged = false
|
|
||||||
follow_symlink = false
|
|
||||||
full_bin = ""
|
|
||||||
include_dir = []
|
|
||||||
include_ext = ["go", "tpl", "tmpl", "html"]
|
|
||||||
include_file = []
|
|
||||||
kill_delay = "0s"
|
|
||||||
log = "build-errors.log"
|
|
||||||
poll = false
|
|
||||||
poll_interval = 0
|
|
||||||
rerun = false
|
|
||||||
rerun_delay = 500
|
|
||||||
send_interrupt = false
|
|
||||||
stop_on_error = false
|
|
||||||
|
|
||||||
[color]
|
|
||||||
app = ""
|
|
||||||
build = "yellow"
|
|
||||||
main = "magenta"
|
|
||||||
runner = "green"
|
|
||||||
watcher = "cyan"
|
|
||||||
|
|
||||||
[log]
|
|
||||||
main_only = false
|
|
||||||
time = false
|
|
||||||
|
|
||||||
[misc]
|
|
||||||
clean_on_exit = false
|
|
||||||
|
|
||||||
[screen]
|
|
||||||
clear_on_rebuild = false
|
|
||||||
keep_scroll = true
|
|
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -1,5 +1,5 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
- name: Questions & Discussions
|
- name: Questions & Discussions
|
||||||
url: https://github.com/alist-org/alist/discussions
|
url: https://github.com/Xhofe/alist/discussions
|
||||||
about: Use GitHub discussions for message-board style questions and discussions.
|
about: Use GitHub discussions for message-board style questions and discussions.
|
8
.github/workflows/auto_lang.yml
vendored
8
.github/workflows/auto_lang.yml
vendored
@ -20,22 +20,22 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ ubuntu-latest ]
|
platform: [ ubuntu-latest ]
|
||||||
go-version: [ '1.21' ]
|
go-version: [ '1.20' ]
|
||||||
name: auto generate lang.json
|
name: auto generate lang.json
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
- name: Setup go
|
- name: Setup go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
- name: Checkout alist
|
- name: Checkout alist
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
path: alist
|
path: alist
|
||||||
|
|
||||||
- name: Checkout alist-web
|
- name: Checkout alist-web
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: 'alist-org/alist-web'
|
repository: 'alist-org/alist-web'
|
||||||
ref: main
|
ref: main
|
||||||
|
138
.github/workflows/beta_release.yml
vendored
138
.github/workflows/beta_release.yml
vendored
@ -1,138 +0,0 @@
|
|||||||
name: beta release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ 'main' ]
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
changelog:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
platform: [ ubuntu-latest ]
|
|
||||||
go-version: [ '1.21' ]
|
|
||||||
name: Beta Release Changelog
|
|
||||||
runs-on: ${{ matrix.platform }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Create or update ref
|
|
||||||
id: create-or-update-ref
|
|
||||||
uses: ovsds/create-or-update-ref-action@v1
|
|
||||||
with:
|
|
||||||
ref: tags/beta
|
|
||||||
sha: ${{ github.sha }}
|
|
||||||
|
|
||||||
- name: Delete beta tag
|
|
||||||
run: git tag -d beta
|
|
||||||
continue-on-error: true
|
|
||||||
|
|
||||||
- name: changelog # or changelogithub@0.12 if ensure the stable result
|
|
||||||
id: changelog
|
|
||||||
run: |
|
|
||||||
git tag -l
|
|
||||||
npx changelogithub --output CHANGELOG.md
|
|
||||||
# npx changelogen@latest --output CHANGELOG.md
|
|
||||||
|
|
||||||
- name: Upload assets
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
body_path: CHANGELOG.md
|
|
||||||
files: CHANGELOG.md
|
|
||||||
prerelease: true
|
|
||||||
tag_name: beta
|
|
||||||
|
|
||||||
release:
|
|
||||||
needs:
|
|
||||||
- changelog
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- target: '!(*musl*|*windows-arm64*|*android*|*freebsd*)' # xgo
|
|
||||||
hash: "md5"
|
|
||||||
- target: 'linux-!(arm*)-musl*' #musl-not-arm
|
|
||||||
hash: "md5-linux-musl"
|
|
||||||
- target: 'linux-arm*-musl*' #musl-arm
|
|
||||||
hash: "md5-linux-musl-arm"
|
|
||||||
- target: 'windows-arm64' #win-arm64
|
|
||||||
hash: "md5-windows-arm64"
|
|
||||||
- target: 'android-*' #android
|
|
||||||
hash: "md5-android"
|
|
||||||
- target: 'freebsd-*' #freebsd
|
|
||||||
hash: "md5-freebsd"
|
|
||||||
|
|
||||||
name: Beta Release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.22'
|
|
||||||
|
|
||||||
- name: Setup web
|
|
||||||
run: bash build.sh dev web
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
uses: go-cross/cgo-actions@v1
|
|
||||||
with:
|
|
||||||
targets: ${{ matrix.target }}
|
|
||||||
musl-target-format: $os-$musl-$arch
|
|
||||||
out-dir: build
|
|
||||||
x-flags: |
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
|
||||||
|
|
||||||
- name: Compress
|
|
||||||
run: |
|
|
||||||
bash build.sh zip ${{ matrix.hash }}
|
|
||||||
|
|
||||||
- name: Upload assets
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
files: build/compress/*
|
|
||||||
prerelease: true
|
|
||||||
tag_name: beta
|
|
||||||
|
|
||||||
desktop:
|
|
||||||
needs:
|
|
||||||
- release
|
|
||||||
name: Beta Release Desktop
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repo
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
repository: alist-org/desktop-release
|
|
||||||
ref: main
|
|
||||||
persist-credentials: false
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Commit
|
|
||||||
run: |
|
|
||||||
git config --local user.email "bot@nn.ci"
|
|
||||||
git config --local user.name "IlaBot"
|
|
||||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
|
||||||
|
|
||||||
- name: Push commit
|
|
||||||
uses: ad-m/github-push-action@master
|
|
||||||
with:
|
|
||||||
github_token: ${{ secrets.MY_TOKEN }}
|
|
||||||
branch: main
|
|
||||||
repository: alist-org/desktop-release
|
|
50
.github/workflows/build.yml
vendored
50
.github/workflows/build.yml
vendored
@ -15,47 +15,31 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ubuntu-latest]
|
platform: [ubuntu-latest]
|
||||||
target:
|
go-version: [ '1.20' ]
|
||||||
- darwin-amd64
|
|
||||||
- darwin-arm64
|
|
||||||
- windows-amd64
|
|
||||||
- linux-arm64-musl
|
|
||||||
- linux-amd64-musl
|
|
||||||
- windows-arm64
|
|
||||||
- android-arm64
|
|
||||||
name: Build
|
name: Build
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- uses: benjlevesque/short-sha@v3.0
|
- name: Install dependencies
|
||||||
id: short-sha
|
run: |
|
||||||
|
sudo snap install zig --classic --beta
|
||||||
- name: Setup Go
|
docker pull crazymax/xgo:latest
|
||||||
uses: actions/setup-go@v5
|
go install github.com/crazy-max/xgo@latest
|
||||||
with:
|
sudo apt install upx
|
||||||
go-version: '1.22'
|
|
||||||
|
|
||||||
- name: Setup web
|
|
||||||
run: bash build.sh dev web
|
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
uses: go-cross/cgo-actions@v1
|
run: |
|
||||||
with:
|
bash build.sh dev
|
||||||
targets: ${{ matrix.target }}
|
|
||||||
musl-target-format: $os-$musl-$arch
|
|
||||||
out-dir: build
|
|
||||||
x-flags: |
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.BuiltAt=$built_at
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitAuthor=Xhofe
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.GitCommit=$git_commit
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.Version=$tag
|
|
||||||
github.com/alist-org/alist/v3/internal/conf.WebVersion=dev
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: alist_${{ env.SHA }}_${{ matrix.target }}
|
name: alist
|
||||||
path: build/*
|
path: dist
|
69
.github/workflows/build_docker.yml
vendored
Normal file
69
.github/workflows/build_docker.yml
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
name: build_docker
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_docker:
|
||||||
|
name: Build docker
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: xhofe/alist
|
||||||
|
- name: Replace release with dev
|
||||||
|
run: |
|
||||||
|
sed -i 's/release/dev/g' Dockerfile
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: xhofe
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
- name: Build and push
|
||||||
|
id: docker_build
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: true
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
|
||||||
|
build_docker_with_aria2:
|
||||||
|
needs: build_docker
|
||||||
|
name: Build docker with aria2
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: alist-org/with_aria2
|
||||||
|
ref: main
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Commit
|
||||||
|
run: |
|
||||||
|
git config --local user.email "bot@nn.ci"
|
||||||
|
git config --local user.name "IlaBot"
|
||||||
|
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||||
|
|
||||||
|
- name: Push commit
|
||||||
|
uses: ad-m/github-push-action@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.MY_TOKEN }}
|
||||||
|
branch: main
|
||||||
|
repository: alist-org/with_aria2
|
9
.github/workflows/changelog.yml
vendored
9
.github/workflows/changelog.yml
vendored
@ -3,7 +3,7 @@ name: auto changelog
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- '*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
changelog:
|
changelog:
|
||||||
@ -11,14 +11,9 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Delete beta tag
|
|
||||||
run: git tag -d beta
|
|
||||||
continue-on-error: true
|
|
||||||
|
|
||||||
- run: npx changelogithub # or changelogithub@0.12 if ensure the stable result
|
- run: npx changelogithub # or changelogithub@0.12 if ensure the stable result
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{secrets.MY_TOKEN}}
|
GITHUB_TOKEN: ${{secrets.MY_TOKEN}}
|
||||||
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
if: github.event.label.name == 'question'
|
if: github.event.label.name == 'question'
|
||||||
steps:
|
steps:
|
||||||
- name: Create comment
|
- name: Create comment
|
||||||
uses: actions-cool/issues-helper@v3.6.0
|
uses: actions-cool/issues-helper@v3.5.2
|
||||||
with:
|
with:
|
||||||
actions: 'create-comment'
|
actions: 'create-comment'
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@ -9,27 +9,10 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ ubuntu-latest ]
|
platform: [ ubuntu-latest ]
|
||||||
go-version: [ '1.21' ]
|
go-version: [ '1.20' ]
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Free Disk Space (Ubuntu)
|
|
||||||
uses: jlumbroso/free-disk-space@main
|
|
||||||
with:
|
|
||||||
# this might remove tools that are actually needed,
|
|
||||||
# if set to "true" but frees about 6 GB
|
|
||||||
tool-cache: false
|
|
||||||
|
|
||||||
# all of these default to true, but feel free to set to
|
|
||||||
# "false" if necessary for your workflow
|
|
||||||
android: true
|
|
||||||
dotnet: true
|
|
||||||
haskell: true
|
|
||||||
large-packages: true
|
|
||||||
docker-images: true
|
|
||||||
swap-storage: true
|
|
||||||
|
|
||||||
- name: Prerelease
|
- name: Prerelease
|
||||||
uses: irongut/EditRelease@v1.2.0
|
uses: irongut/EditRelease@v1.2.0
|
||||||
with:
|
with:
|
||||||
@ -38,12 +21,12 @@ jobs:
|
|||||||
prerelease: true
|
prerelease: true
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -59,7 +42,7 @@ jobs:
|
|||||||
bash build.sh release
|
bash build.sh release
|
||||||
|
|
||||||
- name: Upload assets
|
- name: Upload assets
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
prerelease: false
|
prerelease: false
|
||||||
@ -70,7 +53,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repo
|
- name: Checkout repo
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: alist-org/desktop-release
|
repository: alist-org/desktop-release
|
||||||
ref: main
|
ref: main
|
||||||
|
34
.github/workflows/release_android.yml
vendored
34
.github/workflows/release_android.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: release_android
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [ published ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release_android:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
platform: [ ubuntu-latest ]
|
|
||||||
go-version: [ '1.21' ]
|
|
||||||
name: Release
|
|
||||||
runs-on: ${{ matrix.platform }}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go-version }}
|
|
||||||
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
bash build.sh release android
|
|
||||||
|
|
||||||
- name: Upload assets
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
files: build/compress/*
|
|
161
.github/workflows/release_docker.yml
vendored
161
.github/workflows/release_docker.yml
vendored
@ -3,135 +3,66 @@ name: release_docker
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- '*'
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: 'xhofe/alist'
|
|
||||||
REGISTRY_USERNAME: 'xhofe'
|
|
||||||
REGISTRY_PASSWORD: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
ARTIFACT_NAME: 'binaries_docker_release'
|
|
||||||
RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x,linux/ppc64le,linux/riscv64'
|
|
||||||
IMAGE_PUSH: ${{ github.event_name == 'push' }}
|
|
||||||
IMAGE_IS_PROD: ${{ github.ref_type == 'tag' }}
|
|
||||||
IMAGE_TAGS_BETA: |
|
|
||||||
type=schedule
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=tag
|
|
||||||
type=ref,event=pr
|
|
||||||
type=raw,value=beta,enable={{is_default_branch}}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_binary:
|
|
||||||
name: Build Binaries for Docker Release
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: 'stable'
|
|
||||||
|
|
||||||
- name: Cache Musl
|
|
||||||
id: cache-musl
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: build/musl-libs
|
|
||||||
key: docker-musl-libs-v2
|
|
||||||
|
|
||||||
- name: Download Musl Library
|
|
||||||
if: steps.cache-musl.outputs.cache-hit != 'true'
|
|
||||||
run: bash build.sh prepare docker-multiplatform
|
|
||||||
|
|
||||||
- name: Build go binary (beta)
|
|
||||||
if: env.IMAGE_IS_PROD != 'true'
|
|
||||||
run: bash build.sh beta docker-multiplatform
|
|
||||||
|
|
||||||
- name: Build go binary (release)
|
|
||||||
if: env.IMAGE_IS_PROD == 'true'
|
|
||||||
run: bash build.sh release docker-multiplatform
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ env.ARTIFACT_NAME }}
|
|
||||||
overwrite: true
|
|
||||||
path: |
|
|
||||||
build/
|
|
||||||
!build/*.tgz
|
|
||||||
!build/musl-libs/**
|
|
||||||
|
|
||||||
release_docker:
|
release_docker:
|
||||||
needs: build_binary
|
name: Release Docker
|
||||||
name: Release Docker image
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
image: ["latest", "ffmpeg", "aria2", "aio"]
|
|
||||||
include:
|
|
||||||
- image: "latest"
|
|
||||||
build_arg: ""
|
|
||||||
tag_favor: ""
|
|
||||||
- image: "ffmpeg"
|
|
||||||
build_arg: INSTALL_FFMPEG=true
|
|
||||||
tag_favor: "suffix=-ffmpeg,onlatest=true"
|
|
||||||
- image: "aria2"
|
|
||||||
build_arg: INSTALL_ARIA2=true
|
|
||||||
tag_favor: "suffix=-aria2,onlatest=true"
|
|
||||||
- image: "aio"
|
|
||||||
build_arg: |
|
|
||||||
INSTALL_FFMPEG=true
|
|
||||||
INSTALL_ARIA2=true
|
|
||||||
tag_favor: "suffix=-aio,onlatest=true"
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ env.ARTIFACT_NAME }}
|
|
||||||
path: 'build/'
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v3
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v3
|
|
||||||
|
|
||||||
- name: Login to DockerHub
|
|
||||||
if: env.IMAGE_PUSH == 'true'
|
|
||||||
uses: docker/login-action@v3
|
|
||||||
with:
|
|
||||||
logout: true
|
|
||||||
username: ${{ env.REGISTRY_USERNAME }}
|
|
||||||
password: ${{ env.REGISTRY_PASSWORD }}
|
|
||||||
|
|
||||||
- name: Docker meta
|
- name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v4
|
||||||
with:
|
with:
|
||||||
images: ${{ env.REGISTRY }}
|
images: xhofe/alist
|
||||||
tags: ${{ env.IMAGE_IS_PROD == 'true' && '' || env.IMAGE_TAGS_BETA }}
|
|
||||||
flavor: |
|
- name: Set up QEMU
|
||||||
${{ env.IMAGE_IS_PROD == 'true' && 'latest=true' || '' }}
|
uses: docker/setup-qemu-action@v2
|
||||||
${{ matrix.tag_favor }}
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
username: xhofe
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
id: docker_build
|
id: docker_build
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v4
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
file: Dockerfile.ci
|
push: true
|
||||||
push: ${{ env.IMAGE_PUSH == 'true' }}
|
|
||||||
build-args: ${{ matrix.build_arg }}
|
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
platforms: ${{ env.RELEASE_PLATFORMS }}
|
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/s390x
|
||||||
|
|
||||||
|
release_docker_with_aria2:
|
||||||
|
needs: release_docker
|
||||||
|
name: Release docker with aria2
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: alist-org/with_aria2
|
||||||
|
ref: main
|
||||||
|
persist-credentials: false
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Add tag
|
||||||
|
run: |
|
||||||
|
git config --local user.email "bot@nn.ci"
|
||||||
|
git config --local user.name "IlaBot"
|
||||||
|
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
||||||
|
|
||||||
|
- name: Push tags
|
||||||
|
uses: ad-m/github-push-action@master
|
||||||
|
with:
|
||||||
|
github_token: ${{ secrets.MY_TOKEN }}
|
||||||
|
branch: main
|
||||||
|
repository: alist-org/with_aria2
|
||||||
|
34
.github/workflows/release_freebsd.yml
vendored
34
.github/workflows/release_freebsd.yml
vendored
@ -1,34 +0,0 @@
|
|||||||
name: release_freebsd
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [ published ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release_freebsd:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
platform: [ ubuntu-latest ]
|
|
||||||
go-version: [ '1.21' ]
|
|
||||||
name: Release
|
|
||||||
runs-on: ${{ matrix.platform }}
|
|
||||||
steps:
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go-version }}
|
|
||||||
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
run: |
|
|
||||||
bash build.sh release freebsd
|
|
||||||
|
|
||||||
- name: Upload assets
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
with:
|
|
||||||
files: build/compress/*
|
|
8
.github/workflows/release_linux_musl.yml
vendored
8
.github/workflows/release_linux_musl.yml
vendored
@ -9,18 +9,18 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ ubuntu-latest ]
|
platform: [ ubuntu-latest ]
|
||||||
go-version: [ '1.21' ]
|
go-version: [ '1.20' ]
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -29,6 +29,6 @@ jobs:
|
|||||||
bash build.sh release linux_musl
|
bash build.sh release linux_musl
|
||||||
|
|
||||||
- name: Upload assets
|
- name: Upload assets
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
|
8
.github/workflows/release_linux_musl_arm.yml
vendored
8
.github/workflows/release_linux_musl_arm.yml
vendored
@ -9,18 +9,18 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
platform: [ ubuntu-latest ]
|
platform: [ ubuntu-latest ]
|
||||||
go-version: [ '1.21' ]
|
go-version: [ '1.20' ]
|
||||||
name: Release
|
name: Release
|
||||||
runs-on: ${{ matrix.platform }}
|
runs-on: ${{ matrix.platform }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Setup Go
|
- name: Setup Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -29,6 +29,6 @@ jobs:
|
|||||||
bash build.sh release linux_musl_arm
|
bash build.sh release linux_musl_arm
|
||||||
|
|
||||||
- name: Upload assets
|
- name: Upload assets
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -24,7 +24,6 @@ output/
|
|||||||
*.json
|
*.json
|
||||||
/build
|
/build
|
||||||
/data/
|
/data/
|
||||||
/tmp/
|
|
||||||
/log/
|
/log/
|
||||||
/lang/
|
/lang/
|
||||||
/daemon/
|
/daemon/
|
||||||
|
43
Dockerfile
43
Dockerfile
@ -1,43 +1,18 @@
|
|||||||
FROM alpine:edge as builder
|
FROM alpine:3.18 as builder
|
||||||
LABEL stage=go-builder
|
LABEL stage=go-builder
|
||||||
WORKDIR /app/
|
WORKDIR /app/
|
||||||
RUN apk add --no-cache bash curl gcc git go musl-dev
|
|
||||||
COPY go.mod go.sum ./
|
|
||||||
RUN go mod download
|
|
||||||
COPY ./ ./
|
COPY ./ ./
|
||||||
RUN bash build.sh release docker
|
RUN apk add --no-cache bash curl gcc git go musl-dev; \
|
||||||
|
bash build.sh release docker
|
||||||
|
|
||||||
FROM alpine:edge
|
FROM alpine:3.18
|
||||||
|
|
||||||
ARG INSTALL_FFMPEG=false
|
|
||||||
ARG INSTALL_ARIA2=false
|
|
||||||
LABEL MAINTAINER="i@nn.ci"
|
LABEL MAINTAINER="i@nn.ci"
|
||||||
|
VOLUME /opt/alist/data/
|
||||||
WORKDIR /opt/alist/
|
WORKDIR /opt/alist/
|
||||||
|
|
||||||
RUN apk update && \
|
|
||||||
apk upgrade --no-cache && \
|
|
||||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
|
||||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
|
||||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
|
||||||
mkdir -p /opt/aria2/.aria2 && \
|
|
||||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
|
||||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
|
||||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
|
||||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
|
||||||
touch /opt/aria2/.aria2/aria2.session && \
|
|
||||||
/opt/aria2/.aria2/tracker.sh ; \
|
|
||||||
rm -rf /var/cache/apk/*
|
|
||||||
|
|
||||||
COPY --from=builder /app/bin/alist ./
|
COPY --from=builder /app/bin/alist ./
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
RUN chmod +x /opt/alist/alist && \
|
RUN apk add --no-cache bash ca-certificates su-exec tzdata; \
|
||||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
chmod +x /entrypoint.sh
|
||||||
|
ENV PUID=0 PGID=0 UMASK=022
|
||||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
|
||||||
VOLUME /opt/alist/data/
|
|
||||||
EXPOSE 5244 5245
|
EXPOSE 5244 5245
|
||||||
CMD [ "/entrypoint.sh" ]
|
CMD [ "/entrypoint.sh" ]
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
FROM alpine:edge
|
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
ARG INSTALL_FFMPEG=false
|
|
||||||
ARG INSTALL_ARIA2=false
|
|
||||||
LABEL MAINTAINER="i@nn.ci"
|
|
||||||
|
|
||||||
WORKDIR /opt/alist/
|
|
||||||
|
|
||||||
RUN apk update && \
|
|
||||||
apk upgrade --no-cache && \
|
|
||||||
apk add --no-cache bash ca-certificates su-exec tzdata; \
|
|
||||||
[ "$INSTALL_FFMPEG" = "true" ] && apk add --no-cache ffmpeg; \
|
|
||||||
[ "$INSTALL_ARIA2" = "true" ] && apk add --no-cache curl aria2 && \
|
|
||||||
mkdir -p /opt/aria2/.aria2 && \
|
|
||||||
wget https://github.com/P3TERX/aria2.conf/archive/refs/heads/master.tar.gz -O /tmp/aria-conf.tar.gz && \
|
|
||||||
tar -zxvf /tmp/aria-conf.tar.gz -C /opt/aria2/.aria2 --strip-components=1 && rm -f /tmp/aria-conf.tar.gz && \
|
|
||||||
sed -i 's|rpc-secret|#rpc-secret|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root/.aria2|/opt/aria2/.aria2|g' /opt/aria2/.aria2/script.conf && \
|
|
||||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/aria2.conf && \
|
|
||||||
sed -i 's|/root|/opt/aria2|g' /opt/aria2/.aria2/script.conf && \
|
|
||||||
touch /opt/aria2/.aria2/aria2.session && \
|
|
||||||
/opt/aria2/.aria2/tracker.sh ; \
|
|
||||||
rm -rf /var/cache/apk/*
|
|
||||||
|
|
||||||
COPY /build/${TARGETPLATFORM}/alist ./
|
|
||||||
COPY entrypoint.sh /entrypoint.sh
|
|
||||||
RUN chmod +x /opt/alist/alist && \
|
|
||||||
chmod +x /entrypoint.sh && /entrypoint.sh version
|
|
||||||
|
|
||||||
ENV PUID=0 PGID=0 UMASK=022 RUN_ARIA2=${INSTALL_ARIA2}
|
|
||||||
VOLUME /opt/alist/data/
|
|
||||||
EXPOSE 5244 5245
|
|
||||||
CMD [ "/entrypoint.sh" ]
|
|
36
README.md
36
README.md
@ -1,17 +1,17 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
<p><em>🗂️A file list program that supports multiple storages, powered by Gin and Solidjs.</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||||
@ -19,13 +19,13 @@
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://github.com/alist-org/alist/discussions">
|
<a href="https://github.com/Xhofe/alist/discussions">
|
||||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://discord.gg/F4ymsH4xv2">
|
<a href="https://discord.gg/F4ymsH4xv2">
|
||||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
@ -39,13 +39,13 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- [x] Multiple storages
|
- [x] Multiple storages
|
||||||
- [x] Local storage
|
- [x] Local storage
|
||||||
- [x] [Aliyundrive](https://www.alipan.com/)
|
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||||
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||||
- [x] [GoogleDrive](https://drive.google.com/)
|
- [x] [GoogleDrive](https://drive.google.com/)
|
||||||
@ -58,7 +58,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||||
- [x] [Terabox](https://www.terabox.com/main)
|
- [x] [Terabox](https://www.terabox.com/main)
|
||||||
@ -66,8 +66,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] [Quark](https://pan.quark.cn)
|
- [x] [Quark](https://pan.quark.cn)
|
||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com/)
|
- [x] [Lanzou](https://www.lanzou.com/)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com/)
|
- [x] [Aliyundrive share](https://www.aliyundrive.com/)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com/)
|
|
||||||
- [x] [Google photo](https://photos.google.com/)
|
- [x] [Google photo](https://photos.google.com/)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com/)
|
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||||
@ -75,9 +74,6 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] [115](https://115.com/)
|
- [x] [115](https://115.com/)
|
||||||
- [X] Cloudreve
|
- [X] Cloudreve
|
||||||
- [x] [Dropbox](https://www.dropbox.com/)
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
|
||||||
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
|
||||||
- [x] Easy to deploy and out-of-the-box
|
- [x] Easy to deploy and out-of-the-box
|
||||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||||
- [x] Image preview in gallery mode
|
- [x] Image preview in gallery mode
|
||||||
@ -99,7 +95,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
|
|
||||||
## Document
|
## Document
|
||||||
|
|
||||||
<https://alistgo.com/>
|
<https://alist.nn.ci/>
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
@ -107,7 +103,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
|
|
||||||
## Discussion
|
## Discussion
|
||||||
|
|
||||||
Please go to our [discussion forum](https://github.com/alist-org/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
|
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
|
||||||
|
|
||||||
## Sponsor
|
## Sponsor
|
||||||
|
|
||||||
@ -116,9 +112,9 @@ https://alist.nn.ci/guide/sponsor.html
|
|||||||
|
|
||||||
### Special sponsors
|
### Special sponsors
|
||||||
|
|
||||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
|
||||||
@ -139,4 +135,4 @@ The `AList` is open-source software licensed under the AGPL-3.0 license.
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
> [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
||||||
|
31
README_cn.md
31
README_cn.md
@ -1,17 +1,17 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
<p><em>🗂一个支持多存储的文件列表程序,使用 Gin 和 Solidjs。</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||||
@ -19,13 +19,13 @@
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://github.com/alist-org/alist/discussions">
|
<a href="https://github.com/Xhofe/alist/discussions">
|
||||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://discord.gg/F4ymsH4xv2">
|
<a href="https://discord.gg/F4ymsH4xv2">
|
||||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
- [x] 多种存储
|
- [x] 多种存储
|
||||||
- [x] 本地存储
|
- [x] 本地存储
|
||||||
- [x] [阿里云盘](https://www.alipan.com/)
|
- [x] [阿里云盘](https://www.aliyundrive.com/)
|
||||||
- [x] OneDrive / Sharepoint([国际版](https://www.office.com/), [世纪互联](https://portal.partner.microsoftonline.cn),de,us)
|
- [x] OneDrive / Sharepoint([国际版](https://www.office.com/), [世纪互联](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
- [x] [天翼云盘](https://cloud.189.cn) (个人云, 家庭云)
|
- [x] [天翼云盘](https://cloud.189.cn) (个人云, 家庭云)
|
||||||
- [x] [GoogleDrive](https://drive.google.com/)
|
- [x] [GoogleDrive](https://drive.google.com/)
|
||||||
@ -58,15 +58,14 @@
|
|||||||
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
- [x] WebDav(支持无API的OneDrive/SharePoint)
|
||||||
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
- [x] Teambition([中国](https://www.teambition.com/ ),[国际](https://us.teambition.com/ ))
|
||||||
- [x] [分秒帧](https://www.mediatrack.cn/)
|
- [x] [分秒帧](https://www.mediatrack.cn/)
|
||||||
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云,共享群组)
|
- [x] [和彩云](https://yun.139.com/) (个人云, 家庭云)
|
||||||
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
- [x] [Yandex.Disk](https://disk.yandex.com/)
|
||||||
- [x] [百度网盘](http://pan.baidu.com/)
|
- [x] [百度网盘](http://pan.baidu.com/)
|
||||||
- [x] [UC网盘](https://drive.uc.cn)
|
- [x] [UC网盘](https://drive.uc.cn)
|
||||||
- [x] [夸克网盘](https://pan.quark.cn)
|
- [x] [夸克网盘](https://pan.quark.cn)
|
||||||
- [x] [迅雷网盘](https://pan.xunlei.com)
|
- [x] [迅雷网盘](https://pan.xunlei.com)
|
||||||
- [x] [蓝奏云](https://www.lanzou.com/)
|
- [x] [蓝奏云](https://www.lanzou.com/)
|
||||||
- [x] [蓝奏云优享版](https://www.ilanzou.com/)
|
- [x] [阿里云盘分享](https://www.aliyundrive.com/)
|
||||||
- [x] [阿里云盘分享](https://www.alipan.com/)
|
|
||||||
- [x] [谷歌相册](https://photos.google.com/)
|
- [x] [谷歌相册](https://photos.google.com/)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [一刻相册](https://photo.baidu.com/)
|
- [x] [一刻相册](https://photo.baidu.com/)
|
||||||
@ -74,8 +73,6 @@
|
|||||||
- [x] [115](https://115.com/)
|
- [x] [115](https://115.com/)
|
||||||
- [X] Cloudreve
|
- [X] Cloudreve
|
||||||
- [x] [Dropbox](https://www.dropbox.com/)
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
- [x] [飞机盘](https://www.feijipan.com/)
|
|
||||||
- [x] [多吉云](https://www.dogecloud.com/product/oss)
|
|
||||||
- [x] 部署方便,开箱即用
|
- [x] 部署方便,开箱即用
|
||||||
- [x] 文件预览(PDF、markdown、代码、纯文本……)
|
- [x] 文件预览(PDF、markdown、代码、纯文本……)
|
||||||
- [x] 画廊模式下的图像预览
|
- [x] 画廊模式下的图像预览
|
||||||
@ -105,7 +102,7 @@
|
|||||||
|
|
||||||
## 讨论
|
## 讨论
|
||||||
|
|
||||||
一般问题请到[讨论论坛](https://github.com/alist-org/alist/discussions) ,**issue仅针对错误报告和功能请求。**
|
一般问题请到[讨论论坛](https://github.com/Xhofe/alist/discussions) ,**issue仅针对错误报告和功能请求。**
|
||||||
|
|
||||||
## 赞助
|
## 赞助
|
||||||
|
|
||||||
@ -113,9 +110,9 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
|
|||||||
|
|
||||||
### 特别赞助
|
### 特别赞助
|
||||||
|
|
||||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - 苹果生态下优雅的网盘视频播放器,iPhone,iPad,Mac,Apple TV全平台支持。
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
||||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## 贡献者
|
## 贡献者
|
||||||
|
|
||||||
@ -136,4 +133,4 @@ Thanks goes to these wonderful people:
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
> [@博客](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@Telegram群](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
> [@博客](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@Telegram群](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
||||||
|
31
README_ja.md
31
README_ja.md
@ -1,17 +1,17 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<a href="https://alist.nn.ci"><img width="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/blob/main/LICENSE">
|
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/actions?query=workflow%3ABuild">
|
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||||
</a>
|
</a>
|
||||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||||
@ -19,13 +19,13 @@
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<a href="https://github.com/alist-org/alist/discussions">
|
<a href="https://github.com/Xhofe/alist/discussions">
|
||||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://discord.gg/F4ymsH4xv2">
|
<a href="https://discord.gg/F4ymsH4xv2">
|
||||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://github.com/alist-org/alist/releases">
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
- [x] マルチストレージ
|
- [x] マルチストレージ
|
||||||
- [x] ローカルストレージ
|
- [x] ローカルストレージ
|
||||||
- [x] [Aliyundrive](https://www.alipan.com/)
|
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||||
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||||
- [x] [GoogleDrive](https://drive.google.com/)
|
- [x] [GoogleDrive](https://drive.google.com/)
|
||||||
@ -58,7 +58,7 @@
|
|||||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||||
- [x] [139yun](https://yun.139.com/) (Personal, Family, Group)
|
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||||
- [x] [Terabox](https://www.terabox.com/main)
|
- [x] [Terabox](https://www.terabox.com/main)
|
||||||
@ -66,8 +66,7 @@
|
|||||||
- [x] [Quark](https://pan.quark.cn)
|
- [x] [Quark](https://pan.quark.cn)
|
||||||
- [x] [Thunder](https://pan.xunlei.com)
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
- [x] [Lanzou](https://www.lanzou.com/)
|
- [x] [Lanzou](https://www.lanzou.com/)
|
||||||
- [x] [ILanzou](https://www.ilanzou.com/)
|
- [x] [Aliyundrive share](https://www.aliyundrive.com/)
|
||||||
- [x] [Aliyundrive share](https://www.alipan.com/)
|
|
||||||
- [x] [Google photo](https://photos.google.com/)
|
- [x] [Google photo](https://photos.google.com/)
|
||||||
- [x] [Mega.nz](https://mega.nz)
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
- [x] [Baidu photo](https://photo.baidu.com/)
|
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||||
@ -75,8 +74,6 @@
|
|||||||
- [x] [115](https://115.com/)
|
- [x] [115](https://115.com/)
|
||||||
- [X] Cloudreve
|
- [X] Cloudreve
|
||||||
- [x] [Dropbox](https://www.dropbox.com/)
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
|
||||||
- [x] デプロイが簡単で、すぐに使える
|
- [x] デプロイが簡単で、すぐに使える
|
||||||
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
||||||
- [x] ギャラリーモードでの画像プレビュー
|
- [x] ギャラリーモードでの画像プレビュー
|
||||||
@ -106,7 +103,7 @@
|
|||||||
|
|
||||||
## ディスカッション
|
## ディスカッション
|
||||||
|
|
||||||
一般的なご質問は[ディスカッションフォーラム](https://github.com/alist-org/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
一般的なご質問は[ディスカッションフォーラム](https://github.com/Xhofe/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
||||||
|
|
||||||
## スポンサー
|
## スポンサー
|
||||||
|
|
||||||
@ -115,9 +112,9 @@ https://alist.nn.ci/guide/sponsor.html
|
|||||||
|
|
||||||
### スペシャルスポンサー
|
### スペシャルスポンサー
|
||||||
|
|
||||||
- [VidHub](https://apps.apple.com/app/apple-store/id1659622164?pt=118612019&ct=alist&mt=8) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [找资源](http://zhaoziyuan2.cc/) - 阿里云盘资源搜索引擎
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## コントリビューター
|
## コントリビューター
|
||||||
|
|
||||||
@ -138,4 +135,4 @@ https://alist.nn.ci/guide/sponsor.html
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/alist-org) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
||||||
|
142
build.sh
142
build.sh
@ -1,16 +1,13 @@
|
|||||||
appName="alist"
|
appName="alist"
|
||||||
builtAt="$(date +'%F %T %z')"
|
builtAt="$(date +'%F %T %z')"
|
||||||
|
goVersion=$(go version | sed 's/go version //')
|
||||||
gitAuthor="Xhofe <i@nn.ci>"
|
gitAuthor="Xhofe <i@nn.ci>"
|
||||||
gitCommit=$(git log --pretty=format:"%h" -1)
|
gitCommit=$(git log --pretty=format:"%h" -1)
|
||||||
|
|
||||||
if [ "$1" = "dev" ]; then
|
if [ "$1" = "dev" ]; then
|
||||||
version="dev"
|
version="dev"
|
||||||
webVersion="dev"
|
webVersion="dev"
|
||||||
elif [ "$1" = "beta" ]; then
|
|
||||||
version="beta"
|
|
||||||
webVersion="dev"
|
|
||||||
else
|
else
|
||||||
git tag -d beta
|
|
||||||
version=$(git describe --abbrev=0 --tags)
|
version=$(git describe --abbrev=0 --tags)
|
||||||
webVersion=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist-web/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
webVersion=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist-web/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||||
fi
|
fi
|
||||||
@ -21,6 +18,7 @@ echo "frontend version: $webVersion"
|
|||||||
ldflags="\
|
ldflags="\
|
||||||
-w -s \
|
-w -s \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.BuiltAt=$builtAt' \
|
||||||
|
-X 'github.com/alist-org/alist/v3/internal/conf.GoVersion=$goVersion' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.GitAuthor=$gitAuthor' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.GitCommit=$gitCommit' \
|
||||||
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
-X 'github.com/alist-org/alist/v3/internal/conf.Version=$version' \
|
||||||
@ -51,7 +49,6 @@ BuildWinArm64() {
|
|||||||
export GOARCH=arm64
|
export GOARCH=arm64
|
||||||
export CC=$(pwd)/wrapper/zcc-arm64
|
export CC=$(pwd)/wrapper/zcc-arm64
|
||||||
export CXX=$(pwd)/wrapper/zcxx-arm64
|
export CXX=$(pwd)/wrapper/zcxx-arm64
|
||||||
export CGO_ENABLED=1
|
|
||||||
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
|
go build -o "$1" -ldflags="$ldflags" -tags=jsoniter .
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,7 +75,7 @@ BuildDev() {
|
|||||||
export CGO_ENABLED=1
|
export CGO_ENABLED=1
|
||||||
go build -o ./dist/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
go build -o ./dist/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||||
done
|
done
|
||||||
xgo -targets=windows/amd64,darwin/amd64,darwin/arm64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
xgo -targets=windows/amd64,darwin/amd64 -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||||
mv alist-* dist
|
mv alist-* dist
|
||||||
cd dist
|
cd dist
|
||||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||||
@ -91,57 +88,6 @@ BuildDocker() {
|
|||||||
go build -o ./bin/alist -ldflags="$ldflags" -tags=jsoniter .
|
go build -o ./bin/alist -ldflags="$ldflags" -tags=jsoniter .
|
||||||
}
|
}
|
||||||
|
|
||||||
PrepareBuildDockerMusl() {
|
|
||||||
mkdir -p build/musl-libs
|
|
||||||
BASE="https://musl.cc/"
|
|
||||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross i486-linux-musl-cross s390x-linux-musl-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross riscv64-linux-musl-cross powerpc64le-linux-musl-cross)
|
|
||||||
for i in "${FILES[@]}"; do
|
|
||||||
url="${BASE}${i}.tgz"
|
|
||||||
lib_tgz="build/${i}.tgz"
|
|
||||||
curl -L -o "${lib_tgz}" "${url}"
|
|
||||||
tar xf "${lib_tgz}" --strip-components 1 -C build/musl-libs
|
|
||||||
rm -f "${lib_tgz}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
BuildDockerMultiplatform() {
|
|
||||||
go mod download
|
|
||||||
|
|
||||||
# run PrepareBuildDockerMusl before build
|
|
||||||
export PATH=$PATH:$PWD/build/musl-libs/bin
|
|
||||||
|
|
||||||
docker_lflags="--extldflags '-static -fpic' $ldflags"
|
|
||||||
export CGO_ENABLED=1
|
|
||||||
|
|
||||||
OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-s390x linux-riscv64 linux-ppc64le)
|
|
||||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc s390x-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc)
|
|
||||||
for i in "${!OS_ARCHES[@]}"; do
|
|
||||||
os_arch=${OS_ARCHES[$i]}
|
|
||||||
cgo_cc=${CGO_ARGS[$i]}
|
|
||||||
os=${os_arch%%-*}
|
|
||||||
arch=${os_arch##*-}
|
|
||||||
export GOOS=$os
|
|
||||||
export GOARCH=$arch
|
|
||||||
export CC=${cgo_cc}
|
|
||||||
echo "building for $os_arch"
|
|
||||||
go build -o build/$os/$arch/alist -ldflags="$docker_lflags" -tags=jsoniter .
|
|
||||||
done
|
|
||||||
|
|
||||||
DOCKER_ARM_ARCHES=(linux-arm/v6 linux-arm/v7)
|
|
||||||
CGO_ARGS=(armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc)
|
|
||||||
GO_ARM=(6 7)
|
|
||||||
export GOOS=linux
|
|
||||||
export GOARCH=arm
|
|
||||||
for i in "${!DOCKER_ARM_ARCHES[@]}"; do
|
|
||||||
docker_arch=${DOCKER_ARM_ARCHES[$i]}
|
|
||||||
cgo_cc=${CGO_ARGS[$i]}
|
|
||||||
export GOARM=${GO_ARM[$i]}
|
|
||||||
export CC=${cgo_cc}
|
|
||||||
echo "building for $docker_arch"
|
|
||||||
go build -o build/${docker_arch%%-*}/${docker_arch##*-}/alist -ldflags="$docker_lflags" -tags=jsoniter .
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
BuildRelease() {
|
BuildRelease() {
|
||||||
rm -rf .git/
|
rm -rf .git/
|
||||||
mkdir -p "build"
|
mkdir -p "build"
|
||||||
@ -213,50 +159,6 @@ BuildReleaseLinuxMuslArm() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
BuildReleaseAndroid() {
|
|
||||||
rm -rf .git/
|
|
||||||
mkdir -p "build"
|
|
||||||
wget https://dl.google.com/android/repository/android-ndk-r26b-linux.zip
|
|
||||||
unzip android-ndk-r26b-linux.zip
|
|
||||||
rm android-ndk-r26b-linux.zip
|
|
||||||
OS_ARCHES=(amd64 arm64 386 arm)
|
|
||||||
CGO_ARGS=(x86_64-linux-android24-clang aarch64-linux-android24-clang i686-linux-android24-clang armv7a-linux-androideabi24-clang)
|
|
||||||
for i in "${!OS_ARCHES[@]}"; do
|
|
||||||
os_arch=${OS_ARCHES[$i]}
|
|
||||||
cgo_cc=$(realpath android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/${CGO_ARGS[$i]})
|
|
||||||
echo building for android-${os_arch}
|
|
||||||
export GOOS=android
|
|
||||||
export GOARCH=${os_arch##*-}
|
|
||||||
export CC=${cgo_cc}
|
|
||||||
export CGO_ENABLED=1
|
|
||||||
go build -o ./build/$appName-android-$os_arch -ldflags="$ldflags" -tags=jsoniter .
|
|
||||||
android-ndk-r26b/toolchains/llvm/prebuilt/linux-x86_64/bin/llvm-strip ./build/$appName-android-$os_arch
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
BuildReleaseFreeBSD() {
|
|
||||||
rm -rf .git/
|
|
||||||
mkdir -p "build/freebsd"
|
|
||||||
OS_ARCHES=(amd64 arm64 i386)
|
|
||||||
GO_ARCHES=(amd64 arm64 386)
|
|
||||||
CGO_ARGS=(x86_64-unknown-freebsd14.1 aarch64-unknown-freebsd14.1 i386-unknown-freebsd14.1)
|
|
||||||
for i in "${!OS_ARCHES[@]}"; do
|
|
||||||
os_arch=${OS_ARCHES[$i]}
|
|
||||||
cgo_cc="clang --target=${CGO_ARGS[$i]} --sysroot=/opt/freebsd/${os_arch}"
|
|
||||||
echo building for freebsd-${os_arch}
|
|
||||||
sudo mkdir -p "/opt/freebsd/${os_arch}"
|
|
||||||
wget -q https://download.freebsd.org/releases/${os_arch}/14.1-RELEASE/base.txz
|
|
||||||
sudo tar -xf ./base.txz -C /opt/freebsd/${os_arch}
|
|
||||||
rm base.txz
|
|
||||||
export GOOS=freebsd
|
|
||||||
export GOARCH=${GO_ARCHES[$i]}
|
|
||||||
export CC=${cgo_cc}
|
|
||||||
export CGO_ENABLED=1
|
|
||||||
export CGO_LDFLAGS="-fuse-ld=lld"
|
|
||||||
go build -o ./build/$appName-freebsd-$os_arch -ldflags="$ldflags" -tags=jsoniter .
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
MakeRelease() {
|
MakeRelease() {
|
||||||
cd build
|
cd build
|
||||||
mkdir compress
|
mkdir compress
|
||||||
@ -264,22 +166,12 @@ MakeRelease() {
|
|||||||
cp "$i" alist
|
cp "$i" alist
|
||||||
tar -czvf compress/"$i".tar.gz alist
|
tar -czvf compress/"$i".tar.gz alist
|
||||||
rm -f alist
|
rm -f alist
|
||||||
done
|
|
||||||
for i in $(find . -type f -name "$appName-android-*"); do
|
|
||||||
cp "$i" alist
|
|
||||||
tar -czvf compress/"$i".tar.gz alist
|
|
||||||
rm -f alist
|
|
||||||
done
|
done
|
||||||
for i in $(find . -type f -name "$appName-darwin-*"); do
|
for i in $(find . -type f -name "$appName-darwin-*"); do
|
||||||
cp "$i" alist
|
cp "$i" alist
|
||||||
tar -czvf compress/"$i".tar.gz alist
|
tar -czvf compress/"$i".tar.gz alist
|
||||||
rm -f alist
|
rm -f alist
|
||||||
done
|
done
|
||||||
for i in $(find . -type f -name "$appName-freebsd-*"); do
|
|
||||||
cp "$i" alist
|
|
||||||
tar -czvf compress/"$i".tar.gz alist
|
|
||||||
rm -f alist
|
|
||||||
done
|
|
||||||
for i in $(find . -type f -name "$appName-windows-*"); do
|
for i in $(find . -type f -name "$appName-windows-*"); do
|
||||||
cp "$i" alist.exe
|
cp "$i" alist.exe
|
||||||
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
|
zip compress/$(echo $i | sed 's/\.[^.]*$//').zip alist.exe
|
||||||
@ -295,47 +187,23 @@ if [ "$1" = "dev" ]; then
|
|||||||
FetchWebDev
|
FetchWebDev
|
||||||
if [ "$2" = "docker" ]; then
|
if [ "$2" = "docker" ]; then
|
||||||
BuildDocker
|
BuildDocker
|
||||||
elif [ "$2" = "docker-multiplatform" ]; then
|
|
||||||
BuildDockerMultiplatform
|
|
||||||
elif [ "$2" = "web" ]; then
|
|
||||||
echo "web only"
|
|
||||||
else
|
else
|
||||||
BuildDev
|
BuildDev
|
||||||
fi
|
fi
|
||||||
elif [ "$1" = "release" -o "$1" = "beta" ]; then
|
elif [ "$1" = "release" ]; then
|
||||||
if [ "$1" = "beta" ]; then
|
FetchWebRelease
|
||||||
FetchWebDev
|
|
||||||
else
|
|
||||||
FetchWebRelease
|
|
||||||
fi
|
|
||||||
if [ "$2" = "docker" ]; then
|
if [ "$2" = "docker" ]; then
|
||||||
BuildDocker
|
BuildDocker
|
||||||
elif [ "$2" = "docker-multiplatform" ]; then
|
|
||||||
BuildDockerMultiplatform
|
|
||||||
elif [ "$2" = "linux_musl_arm" ]; then
|
elif [ "$2" = "linux_musl_arm" ]; then
|
||||||
BuildReleaseLinuxMuslArm
|
BuildReleaseLinuxMuslArm
|
||||||
MakeRelease "md5-linux-musl-arm.txt"
|
MakeRelease "md5-linux-musl-arm.txt"
|
||||||
elif [ "$2" = "linux_musl" ]; then
|
elif [ "$2" = "linux_musl" ]; then
|
||||||
BuildReleaseLinuxMusl
|
BuildReleaseLinuxMusl
|
||||||
MakeRelease "md5-linux-musl.txt"
|
MakeRelease "md5-linux-musl.txt"
|
||||||
elif [ "$2" = "android" ]; then
|
|
||||||
BuildReleaseAndroid
|
|
||||||
MakeRelease "md5-android.txt"
|
|
||||||
elif [ "$2" = "freebsd" ]; then
|
|
||||||
BuildReleaseFreeBSD
|
|
||||||
MakeRelease "md5-freebsd.txt"
|
|
||||||
elif [ "$2" = "web" ]; then
|
|
||||||
echo "web only"
|
|
||||||
else
|
else
|
||||||
BuildRelease
|
BuildRelease
|
||||||
MakeRelease "md5.txt"
|
MakeRelease "md5.txt"
|
||||||
fi
|
fi
|
||||||
elif [ "$1" = "prepare" ]; then
|
|
||||||
if [ "$2" = "docker-multiplatform" ]; then
|
|
||||||
PrepareBuildDockerMusl
|
|
||||||
fi
|
|
||||||
elif [ "$1" = "zip" ]; then
|
|
||||||
MakeRelease "$2".txt
|
|
||||||
else
|
else
|
||||||
echo -e "Parameter error"
|
echo -e "Parameter error"
|
||||||
fi
|
fi
|
||||||
|
@ -17,9 +17,7 @@ func Init() {
|
|||||||
bootstrap.Log()
|
bootstrap.Log()
|
||||||
bootstrap.InitDB()
|
bootstrap.InitDB()
|
||||||
data.InitData()
|
data.InitData()
|
||||||
bootstrap.InitStreamLimit()
|
|
||||||
bootstrap.InitIndex()
|
bootstrap.InitIndex()
|
||||||
bootstrap.InitUpgradePatch()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func Release() {
|
func Release() {
|
||||||
|
54
cmd/kill.go
54
cmd/kill.go
@ -1,54 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// KillCmd represents the kill command
|
|
||||||
var KillCmd = &cobra.Command{
|
|
||||||
Use: "kill",
|
|
||||||
Short: "Force kill alist server process by daemon/pid file",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
kill()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func kill() {
|
|
||||||
initDaemon()
|
|
||||||
if pid == -1 {
|
|
||||||
log.Info("Seems not have been started. Try use `alist start` to start server.")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
process, err := os.FindProcess(pid)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to find process by pid: %d, reason: %v", pid, process)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = process.Kill()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to kill process %d: %v", pid, err)
|
|
||||||
} else {
|
|
||||||
log.Info("killed process: ", pid)
|
|
||||||
}
|
|
||||||
err = os.Remove(pidFile)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to remove pid file")
|
|
||||||
}
|
|
||||||
pid = -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RootCmd.AddCommand(KillCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// stopCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
@ -12,7 +12,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers"
|
_ "github.com/alist-org/alist/v3/drivers"
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
@ -138,10 +137,9 @@ var LangCmd = &cobra.Command{
|
|||||||
Use: "lang",
|
Use: "lang",
|
||||||
Short: "Generate language json file",
|
Short: "Generate language json file",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
bootstrap.InitConfig()
|
|
||||||
err := os.MkdirAll("lang", 0777)
|
err := os.MkdirAll("lang", 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
utils.Log.Fatal("failed create folder: %s", err.Error())
|
||||||
}
|
}
|
||||||
generateDriversJson()
|
generateDriversJson()
|
||||||
generateSettingsJson()
|
generateSettingsJson()
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
_ "github.com/alist-org/alist/v3/drivers"
|
_ "github.com/alist-org/alist/v3/drivers"
|
||||||
_ "github.com/alist-org/alist/v3/internal/archive"
|
|
||||||
_ "github.com/alist-org/alist/v3/internal/offline_download"
|
_ "github.com/alist-org/alist/v3/internal/offline_download"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
@ -2,11 +2,7 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
ftpserver "github.com/KirCute/ftpserverlib-pasvportmap"
|
|
||||||
"github.com/KirCute/sftpd-alist"
|
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -40,7 +36,6 @@ the address is defined in config file`,
|
|||||||
}
|
}
|
||||||
bootstrap.InitOfflineDownloadTools()
|
bootstrap.InitOfflineDownloadTools()
|
||||||
bootstrap.LoadStorages()
|
bootstrap.LoadStorages()
|
||||||
bootstrap.InitTaskManager()
|
|
||||||
if !flags.Debug && !flags.Dev {
|
if !flags.Debug && !flags.Dev {
|
||||||
gin.SetMode(gin.ReleaseMode)
|
gin.SetMode(gin.ReleaseMode)
|
||||||
}
|
}
|
||||||
@ -54,7 +49,7 @@ the address is defined in config file`,
|
|||||||
httpSrv = &http.Server{Addr: httpBase, Handler: r}
|
httpSrv = &http.Server{Addr: httpBase, Handler: r}
|
||||||
go func() {
|
go func() {
|
||||||
err := httpSrv.ListenAndServe()
|
err := httpSrv.ListenAndServe()
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
utils.Log.Fatalf("failed to start http: %s", err.Error())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -65,7 +60,7 @@ the address is defined in config file`,
|
|||||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||||
go func() {
|
go func() {
|
||||||
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
utils.Log.Fatalf("failed to start https: %s", err.Error())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -89,68 +84,11 @@ the address is defined in config file`,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = unixSrv.Serve(listener)
|
err = unixSrv.Serve(listener)
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if conf.Conf.S3.Port != -1 && conf.Conf.S3.Enable {
|
|
||||||
s3r := gin.New()
|
|
||||||
s3r.Use(gin.LoggerWithWriter(log.StandardLogger().Out), gin.RecoveryWithWriter(log.StandardLogger().Out))
|
|
||||||
server.InitS3(s3r)
|
|
||||||
s3Base := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.S3.Port)
|
|
||||||
utils.Log.Infof("start S3 server @ %s", s3Base)
|
|
||||||
go func() {
|
|
||||||
var err error
|
|
||||||
if conf.Conf.S3.SSL {
|
|
||||||
httpsSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
|
||||||
err = httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
|
|
||||||
}
|
|
||||||
if !conf.Conf.S3.SSL {
|
|
||||||
httpSrv = &http.Server{Addr: s3Base, Handler: s3r}
|
|
||||||
err = httpSrv.ListenAndServe()
|
|
||||||
}
|
|
||||||
if err != nil && !errors.Is(err, http.ErrServerClosed) {
|
|
||||||
utils.Log.Fatalf("failed to start s3 server: %s", err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
var ftpDriver *server.FtpMainDriver
|
|
||||||
var ftpServer *ftpserver.FtpServer
|
|
||||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable {
|
|
||||||
var err error
|
|
||||||
ftpDriver, err = server.NewMainDriver()
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Fatalf("failed to start ftp driver: %s", err.Error())
|
|
||||||
} else {
|
|
||||||
utils.Log.Infof("start ftp server on %s", conf.Conf.FTP.Listen)
|
|
||||||
go func() {
|
|
||||||
ftpServer = ftpserver.NewFtpServer(ftpDriver)
|
|
||||||
err = ftpServer.ListenAndServe()
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Fatalf("problem ftp server listening: %s", err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var sftpDriver *server.SftpDriver
|
|
||||||
var sftpServer *sftpd.SftpServer
|
|
||||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable {
|
|
||||||
var err error
|
|
||||||
sftpDriver, err = server.NewSftpDriver()
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Fatalf("failed to start sftp driver: %s", err.Error())
|
|
||||||
} else {
|
|
||||||
utils.Log.Infof("start sftp server on %s", conf.Conf.SFTP.Listen)
|
|
||||||
go func() {
|
|
||||||
sftpServer = sftpd.NewSftpServer(sftpDriver)
|
|
||||||
err = sftpServer.RunServer()
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Fatalf("problem sftp server listening: %s", err.Error())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Wait for interrupt signal to gracefully shutdown the server with
|
// Wait for interrupt signal to gracefully shutdown the server with
|
||||||
// a timeout of 1 second.
|
// a timeout of 1 second.
|
||||||
quit := make(chan os.Signal, 1)
|
quit := make(chan os.Signal, 1)
|
||||||
@ -160,7 +98,6 @@ the address is defined in config file`,
|
|||||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||||
<-quit
|
<-quit
|
||||||
utils.Log.Println("Shutdown server...")
|
utils.Log.Println("Shutdown server...")
|
||||||
fs.ArchiveContentUploadTaskManager.RemoveAll()
|
|
||||||
Release()
|
Release()
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -192,25 +129,6 @@ the address is defined in config file`,
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if conf.Conf.FTP.Listen != "" && conf.Conf.FTP.Enable && ftpServer != nil && ftpDriver != nil {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
ftpDriver.Stop()
|
|
||||||
if err := ftpServer.Stop(); err != nil {
|
|
||||||
utils.Log.Fatal("FTP server shutdown err: ", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
if conf.Conf.SFTP.Listen != "" && conf.Conf.SFTP.Enable && sftpServer != nil && sftpDriver != nil {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
if err := sftpServer.Close(); err != nil {
|
|
||||||
utils.Log.Fatal("SFTP server shutdown err: ", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
utils.Log.Println("Server exit")
|
utils.Log.Println("Server exit")
|
||||||
},
|
},
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
//go:build !windows
|
/*
|
||||||
|
Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
||||||
|
*/
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -30,11 +30,11 @@ func stop() {
|
|||||||
log.Errorf("failed to find process by pid: %d, reason: %v", pid, process)
|
log.Errorf("failed to find process by pid: %d, reason: %v", pid, process)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = process.Signal(syscall.SIGTERM)
|
err = process.Kill()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to terminate process %d: %v", pid, err)
|
log.Errorf("failed to kill process %d: %v", pid, err)
|
||||||
} else {
|
} else {
|
||||||
log.Info("terminated process: ", pid)
|
log.Info("killed process: ", pid)
|
||||||
}
|
}
|
||||||
err = os.Remove(pidFile)
|
err = os.Remove(pidFile)
|
||||||
if err != nil {
|
if err != nil {
|
@ -1,34 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StopCmd represents the stop command
|
|
||||||
var StopCmd = &cobra.Command{
|
|
||||||
Use: "stop",
|
|
||||||
Short: "Same as the kill command",
|
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
|
||||||
stop()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func stop() {
|
|
||||||
kill()
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
RootCmd.AddCommand(StopCmd)
|
|
||||||
|
|
||||||
// Here you will define your flags and configuration settings.
|
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
|
||||||
// and all subcommands, e.g.:
|
|
||||||
// stopCmd.PersistentFlags().String("foo", "", "A help for foo")
|
|
||||||
|
|
||||||
// Cobra supports local flags which will only run when this command
|
|
||||||
// is called directly, e.g.:
|
|
||||||
// stopCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
|
|
||||||
}
|
|
@ -6,7 +6,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -17,15 +16,14 @@ var VersionCmd = &cobra.Command{
|
|||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "Show current version of AList",
|
Short: "Show current version of AList",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
goVersion := fmt.Sprintf("%s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
|
||||||
|
|
||||||
fmt.Printf(`Built At: %s
|
fmt.Printf(`Built At: %s
|
||||||
Go Version: %s
|
Go Version: %s
|
||||||
Author: %s
|
Author: %s
|
||||||
Commit ID: %s
|
Commit ID: %s
|
||||||
Version: %s
|
Version: %s
|
||||||
WebVersion: %s
|
WebVersion: %s
|
||||||
`, conf.BuiltAt, goVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
`,
|
||||||
|
conf.BuiltAt, conf.GoVersion, conf.GitAuthor, conf.GitCommit, conf.Version, conf.WebVersion)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,43 +0,0 @@
|
|||||||
package _115
|
|
||||||
|
|
||||||
import (
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
md5Salt = "Qclm8MGWUv59TnrR0XPg"
|
|
||||||
appVer = "27.0.5.7"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *Pan115) getAppVersion() ([]driver115.AppVersion, error) {
|
|
||||||
result := driver115.VersionResp{}
|
|
||||||
resp, err := base.RestyClient.R().Get(driver115.ApiGetVersion)
|
|
||||||
|
|
||||||
err = driver115.CheckErr(err, &result, resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return result.Data.GetAppVersions(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) getAppVer() string {
|
|
||||||
// todo add some cache?
|
|
||||||
vers, err := d.getAppVersion()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("[115] get app version failed: %v", err)
|
|
||||||
return appVer
|
|
||||||
}
|
|
||||||
for _, ver := range vers {
|
|
||||||
if ver.AppName == "win" {
|
|
||||||
return ver.Version
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return appVer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) initAppVer() {
|
|
||||||
appVer = d.getAppVer()
|
|
||||||
}
|
|
@ -3,7 +3,6 @@ package _115
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -17,9 +16,8 @@ import (
|
|||||||
type Pan115 struct {
|
type Pan115 struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
client *driver115.Pan115Client
|
client *driver115.Pan115Client
|
||||||
limiter *rate.Limiter
|
limiter *rate.Limiter
|
||||||
appVerOnce sync.Once
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Config() driver.Config {
|
func (d *Pan115) Config() driver.Config {
|
||||||
@ -31,7 +29,6 @@ func (d *Pan115) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Init(ctx context.Context) error {
|
func (d *Pan115) Init(ctx context.Context) error {
|
||||||
d.appVerOnce.Do(d.initAppVer)
|
|
||||||
if d.LimitRate > 0 {
|
if d.LimitRate > 0 {
|
||||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||||
}
|
}
|
||||||
@ -66,9 +63,8 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
userAgent := args.Header.Get("User-Agent")
|
downloadInfo, err := d.client.
|
||||||
downloadInfo, err := d.
|
DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
|
||||||
DownloadWithUA(file.(*FileObj).PickCode, userAgent)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -79,60 +75,28 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
|
||||||
result := driver115.MkdirResp{}
|
return err
|
||||||
form := map[string]string{
|
|
||||||
"pid": parentDir.GetID(),
|
|
||||||
"cname": dirName,
|
|
||||||
}
|
}
|
||||||
req := d.client.NewRequest().
|
return nil
|
||||||
SetFormData(form).
|
|
||||||
SetResult(&result).
|
|
||||||
ForceContentType("application/json;charset=UTF-8")
|
|
||||||
|
|
||||||
resp, err := req.Post(driver115.ApiDirAdd)
|
|
||||||
|
|
||||||
err = driver115.CheckErr(err, &result, resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := d.getNewFile(result.FileID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.client.Move(dstDir.GetID(), srcObj.GetID()); err != nil {
|
return d.client.Move(dstDir.GetID(), srcObj.GetID())
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := d.getNewFile(srcObj.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.client.Rename(srcObj.GetID(), newName); err != nil {
|
return d.client.Rename(srcObj.GetID(), newName)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err := d.getNewFile((srcObj.GetID()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -149,9 +113,9 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return d.client.Delete(obj.GetID())
|
return d.client.Delete(obj.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -160,10 +124,10 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
)
|
)
|
||||||
|
|
||||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||||
return nil, driver115.ErrUploadTooLarge
|
return driver115.ErrUploadTooLarge
|
||||||
}
|
}
|
||||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||||
// return err
|
// return err
|
||||||
@ -176,22 +140,22 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
preHash = strings.ToUpper(preHash)
|
preHash = strings.ToUpper(preHash)
|
||||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||||
if len(fullHash) <= 0 {
|
if len(fullHash) <= 0 {
|
||||||
tmpF, err := stream.CacheFullInTempFile()
|
tmpF, err := stream.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fullHash = strings.ToUpper(fullHash)
|
fullHash = strings.ToUpper(fullHash)
|
||||||
@ -200,52 +164,21 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
// note that 115 add timeout for rapid-upload,
|
// note that 115 add timeout for rapid-upload,
|
||||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if matched, err := fastInfo.Ok(); err != nil {
|
if matched, err := fastInfo.Ok(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
} else if matched {
|
} else if matched {
|
||||||
f, err := d.getNewFileByPickCode(fastInfo.PickCode)
|
return nil
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadResult *UploadResult
|
|
||||||
// 闪传失败,上传
|
// 闪传失败,上传
|
||||||
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||||
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
|
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// 分片上传
|
|
||||||
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
// 分片上传
|
||||||
|
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||||
|
|
||||||
file, err := d.getNewFile(uploadResult.Data.FileID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) OfflineList(ctx context.Context) ([]*driver115.OfflineTask, error) {
|
|
||||||
resp, err := d.client.ListOfflineTask(0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp.Tasks, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
|
|
||||||
return d.client.AddOfflineTaskURIs(uris, dstDir.GetID(), driver115.WithAppVer(appVer))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) DeleteOfflineTasks(ctx context.Context, hashes []string, deleteFiles bool) error {
|
|
||||||
return d.client.DeleteOfflineTasks(hashes, deleteFiles)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan115)(nil)
|
var _ driver.Driver = (*Pan115)(nil)
|
||||||
|
@ -6,20 +6,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||||
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
|
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
||||||
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
|
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
||||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate ([limit]r/1s)"`
|
|
||||||
driver.RootID
|
driver.RootID
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "115 Cloud",
|
Name: "115 Cloud",
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
// OnlyProxy: true,
|
OnlyProxy: true,
|
||||||
// OnlyLocal: true,
|
//OnlyLocal: true,
|
||||||
// NoOverwriteUpload: true,
|
NoOverwriteUpload: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
package _115
|
package _115
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ model.Obj = (*FileObj)(nil)
|
var _ model.Obj = (*FileObj)(nil)
|
||||||
@ -21,18 +20,3 @@ func (f *FileObj) CreateTime() time.Time {
|
|||||||
func (f *FileObj) GetHash() utils.HashInfo {
|
func (f *FileObj) GetHash() utils.HashInfo {
|
||||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||||
}
|
}
|
||||||
|
|
||||||
type UploadResult struct {
|
|
||||||
driver.BasicResp
|
|
||||||
Data struct {
|
|
||||||
PickCode string `json:"pick_code"`
|
|
||||||
FileSize int `json:"file_size"`
|
|
||||||
FileID string `json:"file_id"`
|
|
||||||
ThumbURL string `json:"thumb_url"`
|
|
||||||
Sha1 string `json:"sha1"`
|
|
||||||
Aid int `json:"aid"`
|
|
||||||
FileName string `json:"file_name"`
|
|
||||||
Cid string `json:"cid"`
|
|
||||||
IsVideo int `json:"is_video"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
@ -2,56 +2,51 @@ package _115
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
|
"github.com/orzogc/fake115uploader/cipher"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115"
|
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115"
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// var UserAgent = driver115.UA115Browser
|
var UserAgent = driver.UA115Desktop
|
||||||
|
|
||||||
func (d *Pan115) login() error {
|
func (d *Pan115) login() error {
|
||||||
var err error
|
var err error
|
||||||
opts := []driver115.Option{
|
opts := []driver.Option{
|
||||||
driver115.UA(d.getUA()),
|
driver.UA(UserAgent),
|
||||||
func(c *driver115.Pan115Client) {
|
func(c *driver.Pan115Client) {
|
||||||
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
d.client = driver115.New(opts...)
|
d.client = driver.New(opts...)
|
||||||
cr := &driver115.Credential{}
|
cr := &driver.Credential{}
|
||||||
if d.QRCodeToken != "" {
|
if d.Addition.QRCodeToken != "" {
|
||||||
s := &driver115.QRCodeSession{
|
s := &driver.QRCodeSession{
|
||||||
UID: d.QRCodeToken,
|
UID: d.Addition.QRCodeToken,
|
||||||
}
|
}
|
||||||
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
|
if cr, err = d.client.QRCodeLogin(s); err != nil {
|
||||||
return errors.Wrap(err, "failed to login by qrcode")
|
return errors.Wrap(err, "failed to login by qrcode")
|
||||||
}
|
}
|
||||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID)
|
d.Addition.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
|
||||||
d.QRCodeToken = ""
|
d.Addition.QRCodeToken = ""
|
||||||
} else if d.Cookie != "" {
|
} else if d.Addition.Cookie != "" {
|
||||||
if err = cr.FromCookie(d.Cookie); err != nil {
|
if err = cr.FromCookie(d.Addition.Cookie); err != nil {
|
||||||
return errors.Wrap(err, "failed to login by cookies")
|
return errors.Wrap(err, "failed to login by cookies")
|
||||||
}
|
}
|
||||||
d.client.ImportCredential(cr)
|
d.client.ImportCredential(cr)
|
||||||
@ -64,9 +59,9 @@ func (d *Pan115) login() error {
|
|||||||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||||
res := make([]FileObj, 0)
|
res := make([]FileObj, 0)
|
||||||
if d.PageSize <= 0 {
|
if d.PageSize <= 0 {
|
||||||
d.PageSize = driver115.FileListLimit
|
d.PageSize = driver.FileListLimit
|
||||||
}
|
}
|
||||||
files, err := d.client.ListWithLimit(fileId, d.PageSize, driver115.WithMultiUrls())
|
files, err := d.client.ListWithLimit(fileId, d.PageSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -76,98 +71,9 @@ func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) getNewFile(fileId string) (*FileObj, error) {
|
const (
|
||||||
file, err := d.client.GetFile(fileId)
|
appVer = "2.0.3.6"
|
||||||
if err != nil {
|
)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &FileObj{*file}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) getNewFileByPickCode(pickCode string) (*FileObj, error) {
|
|
||||||
result := driver115.GetFileInfoResponse{}
|
|
||||||
req := d.client.NewRequest().
|
|
||||||
SetQueryParam("pick_code", pickCode).
|
|
||||||
ForceContentType("application/json;charset=UTF-8").
|
|
||||||
SetResult(&result)
|
|
||||||
resp, err := req.Get(driver115.ApiFileInfo)
|
|
||||||
if err := driver115.CheckErr(err, &result, resp); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(result.Files) == 0 {
|
|
||||||
return nil, errors.New("not get file info")
|
|
||||||
}
|
|
||||||
fileInfo := result.Files[0]
|
|
||||||
|
|
||||||
f := &FileObj{}
|
|
||||||
f.From(fileInfo)
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) getUA() string {
|
|
||||||
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
|
|
||||||
key := crypto.GenerateKey()
|
|
||||||
result := driver115.DownloadResp{}
|
|
||||||
params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data := crypto.Encode(params, key)
|
|
||||||
|
|
||||||
bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode())
|
|
||||||
reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String())
|
|
||||||
req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader)
|
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
|
||||||
req.Header.Set("Cookie", d.Cookie)
|
|
||||||
req.Header.Set("User-Agent", ua)
|
|
||||||
|
|
||||||
resp, err := d.client.Client.GetClient().Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := utils.Json.Unmarshal(body, &result); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = result.Err(string(body)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := crypto.Decode(string(result.EncodedData), key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
downloadInfo := struct {
|
|
||||||
Url string `json:"url"`
|
|
||||||
}{}
|
|
||||||
if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info := &driver115.DownloadInfo{}
|
|
||||||
info.PickCode = pickCode
|
|
||||||
info.Header = resp.Request.Header
|
|
||||||
info.Url.Url = downloadInfo.Url
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string {
|
|
||||||
userID := strconv.FormatInt(c.client.UserID, 10)
|
|
||||||
userIDMd5 := md5.Sum([]byte(userID))
|
|
||||||
tokenMd5 := md5.Sum([]byte(md5Salt + fileID + fileSize + signKey + signVal + userID + timeStamp + hex.EncodeToString(userIDMd5[:]) + appVer))
|
|
||||||
return hex.EncodeToString(tokenMd5[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||||
var (
|
var (
|
||||||
@ -198,7 +104,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
|
|||||||
|
|
||||||
signKey, signVal := "", ""
|
signKey, signVal := "", ""
|
||||||
for retry := true; retry; {
|
for retry := true; retry; {
|
||||||
t := driver115.NowMilli()
|
t := driver115.Now()
|
||||||
|
|
||||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -209,7 +115,7 @@ func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
form.Set("t", t.String())
|
form.Set("t", t.String())
|
||||||
form.Set("token", d.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||||
if signKey != "" && signVal != "" {
|
if signKey != "" && signVal != "" {
|
||||||
form.Set("sign_key", signKey)
|
form.Set("sign_key", signKey)
|
||||||
form.Set("sign_val", signVal)
|
form.Set("sign_val", signVal)
|
||||||
@ -262,9 +168,6 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
|||||||
|
|
||||||
length := end - start + 1
|
length := end - start + 1
|
||||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@ -273,43 +176,8 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadByOSS use aliyun sdk to upload
|
|
||||||
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
|
|
||||||
ossToken, err := c.client.GetOSSToken()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ossClient, err := oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bucket, err := ossClient.Bucket(params.Bucket)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var bodyBytes []byte
|
|
||||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
|
||||||
Reader: s,
|
|
||||||
UpdateProgress: up,
|
|
||||||
})
|
|
||||||
if err = bucket.PutObject(params.Object, r, append(
|
|
||||||
driver115.OssOption(params, ossToken),
|
|
||||||
oss.CallbackResult(&bodyBytes),
|
|
||||||
)...); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var uploadResult UploadResult
|
|
||||||
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &uploadResult, uploadResult.Err(string(bodyBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadByMultipart upload by mutipart blocks
|
// UploadByMultipart upload by mutipart blocks
|
||||||
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
|
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||||
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
|
||||||
var (
|
var (
|
||||||
chunks []oss.FileChunk
|
chunks []oss.FileChunk
|
||||||
parts []oss.UploadPart
|
parts []oss.UploadPart
|
||||||
@ -317,13 +185,12 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
|||||||
ossClient *oss.Client
|
ossClient *oss.Client
|
||||||
bucket *oss.Bucket
|
bucket *oss.Bucket
|
||||||
ossToken *driver115.UploadOSSTokenResp
|
ossToken *driver115.UploadOSSTokenResp
|
||||||
bodyBytes []byte
|
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := s.CacheFullInTempFile()
|
tmpF, err := stream.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
options := driver115.DefalutUploadMultipartOptions()
|
options := driver115.DefalutUploadMultipartOptions()
|
||||||
@ -332,19 +199,17 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
|||||||
f(options)
|
f(options)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// oss 启用Sequential必须按顺序上传
|
|
||||||
options.ThreadsNum = 1
|
|
||||||
|
|
||||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret, oss.EnableMD5(true), oss.EnableCRC(true)); err != nil {
|
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||||
@ -354,15 +219,14 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
|||||||
timeout := time.NewTimer(options.Timeout)
|
timeout := time.NewTimer(options.Timeout)
|
||||||
|
|
||||||
if chunks, err = SplitFile(fileSize); err != nil {
|
if chunks, err = SplitFile(fileSize); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||||
oss.EnableSha1(), oss.Sequential(),
|
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
@ -380,41 +244,37 @@ func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.Upload
|
|||||||
quit <- struct{}{}
|
quit <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
completedNum := atomic.Int32{}
|
|
||||||
// consumers
|
// consumers
|
||||||
for i := 0; i < options.ThreadsNum; i++ {
|
for i := 0; i < options.ThreadsNum; i++ {
|
||||||
go func(threadId int) {
|
go func(threadId int) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
errCh <- fmt.Errorf("recovered in %v", r)
|
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
for chunk := range chunksCh {
|
for chunk := range chunksCh {
|
||||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||||
for retry := 0; retry < 3; retry++ {
|
for retry := 0; retry < 3; retry++ {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
|
||||||
break
|
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, chunk.Size)
|
buf := make([]byte, chunk.Size)
|
||||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if part, err = bucket.UploadPart(imur, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(buf)),
|
|
||||||
chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
b := bytes.NewBuffer(buf)
|
||||||
|
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||||
} else {
|
|
||||||
num := completedNum.Add(1)
|
|
||||||
up(float64(num) * 100.0 / float64(len(chunks)))
|
|
||||||
}
|
}
|
||||||
UploadedPartsCh <- part
|
UploadedPartsCh <- part
|
||||||
}
|
}
|
||||||
@ -433,38 +293,51 @@ LOOP:
|
|||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
// 到时重新获取ossToken
|
// 到时重新获取ossToken
|
||||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
case <-quit:
|
case <-quit:
|
||||||
break LOOP
|
break LOOP
|
||||||
case <-errCh:
|
case <-errCh:
|
||||||
return nil, err
|
return err
|
||||||
case <-timeout.C:
|
case <-timeout.C:
|
||||||
return nil, fmt.Errorf("time out")
|
return fmt.Errorf("time out")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 不知道啥原因,oss那边分片上传不计算sha1,导致115服务器校验错误
|
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||||
// params.Callback.Callback = strings.ReplaceAll(params.Callback.Callback, "${sha1}", params.SHA1)
|
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||||
if _, err := bucket.CompleteMultipartUpload(imur, parts, append(
|
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||||
driver115.OssOption(params, ossToken),
|
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||||
oss.CallbackResult(&bodyBytes),
|
return err
|
||||||
)...); err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
return d.checkUploadStatus(dirID, params.SHA1)
|
||||||
var uploadResult UploadResult
|
|
||||||
if err = json.Unmarshal(bodyBytes, &uploadResult); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &uploadResult, uploadResult.Err(string(bodyBytes))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
ch <- chunk
|
ch <- chunk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||||
|
// 验证上传是否成功
|
||||||
|
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||||
|
opts := []driver115.GetFileOptions{
|
||||||
|
driver115.WithOrder(driver115.FileOrderByTime),
|
||||||
|
driver115.WithShowDirEnable(false),
|
||||||
|
driver115.WithAsc(false),
|
||||||
|
driver115.WithLimit(500),
|
||||||
|
}
|
||||||
|
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, fileInfo := range fResp.Files {
|
||||||
|
if fileInfo.Sha1 == sha1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return driver115.ErrUploadFailed
|
||||||
|
}
|
||||||
|
|
||||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||||
for i := int64(1); i < 10; i++ {
|
for i := int64(1); i < 10; i++ {
|
||||||
@ -501,8 +374,8 @@ func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var chunks []oss.FileChunk
|
var chunks []oss.FileChunk
|
||||||
chunk := oss.FileChunk{}
|
var chunk = oss.FileChunk{}
|
||||||
chunkN := (int64)(chunkNum)
|
var chunkN = (int64)(chunkNum)
|
||||||
for i := int64(0); i < chunkN; i++ {
|
for i := int64(0); i < chunkN; i++ {
|
||||||
chunk.Number = int(i + 1)
|
chunk.Number = int(i + 1)
|
||||||
chunk.Offset = i * (fileSize / chunkN)
|
chunk.Offset = i * (fileSize / chunkN)
|
||||||
@ -524,13 +397,13 @@ func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, erro
|
|||||||
return nil, errors.New("chunkSize invalid")
|
return nil, errors.New("chunkSize invalid")
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkN := fileSize / chunkSize
|
var chunkN = fileSize / chunkSize
|
||||||
if chunkN >= 10000 {
|
if chunkN >= 10000 {
|
||||||
return nil, errors.New("Too many parts, please increase part size")
|
return nil, errors.New("Too many parts, please increase part size")
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunks []oss.FileChunk
|
var chunks []oss.FileChunk
|
||||||
chunk := oss.FileChunk{}
|
var chunk = oss.FileChunk{}
|
||||||
for i := int64(0); i < chunkN; i++ {
|
for i := int64(0); i < chunkN; i++ {
|
||||||
chunk.Number = int(i + 1)
|
chunk.Number = int(i + 1)
|
||||||
chunk.Offset = i * chunkSize
|
chunk.Offset = i * chunkSize
|
||||||
|
@ -1,299 +0,0 @@
|
|||||||
package _115_open
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
sdk "github.com/xhofe/115-sdk-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Open115 struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
client *sdk.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Config() driver.Config {
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Init(ctx context.Context) error {
|
|
||||||
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
|
|
||||||
sdk.WithAccessToken(d.Addition.AccessToken),
|
|
||||||
sdk.WithOnRefreshToken(func(s1, s2 string) {
|
|
||||||
d.Addition.AccessToken = s1
|
|
||||||
d.Addition.RefreshToken = s2
|
|
||||||
op.MustSaveDriverStorage(d)
|
|
||||||
}))
|
|
||||||
if flags.Debug || flags.Dev {
|
|
||||||
d.client.SetDebug(true)
|
|
||||||
}
|
|
||||||
_, err := d.client.UserInfo(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Drop(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
var res []model.Obj
|
|
||||||
pageSize := int64(200)
|
|
||||||
offset := int64(0)
|
|
||||||
for {
|
|
||||||
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
|
|
||||||
CID: dir.GetID(),
|
|
||||||
Limit: pageSize,
|
|
||||||
Offset: offset,
|
|
||||||
ASC: d.Addition.OrderDirection == "asc",
|
|
||||||
O: d.Addition.OrderBy,
|
|
||||||
// Cur: 1,
|
|
||||||
ShowDir: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
|
|
||||||
obj := Obj(src)
|
|
||||||
return &obj
|
|
||||||
})...)
|
|
||||||
if len(res) >= int(resp.Count) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
offset += pageSize
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
var ua string
|
|
||||||
if args.Header != nil {
|
|
||||||
ua = args.Header.Get("User-Agent")
|
|
||||||
}
|
|
||||||
if ua == "" {
|
|
||||||
ua = base.UserAgent
|
|
||||||
}
|
|
||||||
obj, ok := file.(*Obj)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("can't convert obj")
|
|
||||||
}
|
|
||||||
pc := obj.Pc
|
|
||||||
resp, err := d.client.DownURL(ctx, pc, ua)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
u, ok := resp[obj.GetID()]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("can't get link")
|
|
||||||
}
|
|
||||||
return &model.Link{
|
|
||||||
URL: u.URL.URL,
|
|
||||||
Header: http.Header{
|
|
||||||
"User-Agent": []string{ua},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
|
||||||
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Obj{
|
|
||||||
Fid: resp.FileID,
|
|
||||||
Pid: parentDir.GetID(),
|
|
||||||
Fn: dirName,
|
|
||||||
Fc: "0",
|
|
||||||
Upt: time.Now().Unix(),
|
|
||||||
Uet: time.Now().Unix(),
|
|
||||||
UpPt: time.Now().Unix(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
|
||||||
_, err := d.client.Move(ctx, &sdk.MoveReq{
|
|
||||||
FileIDs: srcObj.GetID(),
|
|
||||||
ToCid: dstDir.GetID(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return srcObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
|
||||||
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
|
|
||||||
FileID: srcObj.GetID(),
|
|
||||||
FileNma: newName,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
obj, ok := srcObj.(*Obj)
|
|
||||||
if ok {
|
|
||||||
obj.Fn = newName
|
|
||||||
}
|
|
||||||
return srcObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
|
||||||
_, err := d.client.Copy(ctx, &sdk.CopyReq{
|
|
||||||
PID: dstDir.GetID(),
|
|
||||||
FileID: srcObj.GetID(),
|
|
||||||
NoDupli: "1",
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return srcObj, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
_obj, ok := obj.(*Obj)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("can't convert obj")
|
|
||||||
}
|
|
||||||
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
|
|
||||||
FileIDs: _obj.GetID(),
|
|
||||||
ParentID: _obj.Pid,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
tempF, err := file.CacheFullInTempFile()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// cal full sha1
|
|
||||||
sha1, err := utils.HashReader(utils.SHA1, tempF)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempF.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// pre 128k sha1
|
|
||||||
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempF.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 1. Init
|
|
||||||
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
|
||||||
FileName: file.GetName(),
|
|
||||||
FileSize: file.GetSize(),
|
|
||||||
Target: dstDir.GetID(),
|
|
||||||
FileID: strings.ToUpper(sha1),
|
|
||||||
PreID: strings.ToUpper(sha1128k),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Status == 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// 2. two way verify
|
|
||||||
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
|
|
||||||
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
|
|
||||||
start, err := strconv.ParseInt(signCheck[0], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
end, err := strconv.ParseInt(signCheck[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempF.Seek(start, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempF.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
|
||||||
FileName: file.GetName(),
|
|
||||||
FileSize: file.GetSize(),
|
|
||||||
Target: dstDir.GetID(),
|
|
||||||
FileID: strings.ToUpper(sha1),
|
|
||||||
PreID: strings.ToUpper(sha1128k),
|
|
||||||
SignKey: resp.SignKey,
|
|
||||||
SignVal: strings.ToUpper(signVal),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Status == 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// 3. get upload token
|
|
||||||
tokenResp, err := d.client.UploadGetToken(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 4. upload
|
|
||||||
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
|
||||||
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
// return nil, errs.NotImplement
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
|
||||||
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
// return nil, errs.NotImplement
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
|
||||||
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
// return nil, errs.NotImplement
|
|
||||||
// }
|
|
||||||
|
|
||||||
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
|
||||||
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
|
||||||
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
|
||||||
// // return errs.NotImplement to use an internal archive tool
|
|
||||||
// return nil, errs.NotImplement
|
|
||||||
// }
|
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
|
||||||
// return nil, errs.NotSupport
|
|
||||||
//}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Open115)(nil)
|
|
@ -1,36 +0,0 @@
|
|||||||
package _115_open
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
// Usually one of two
|
|
||||||
driver.RootID
|
|
||||||
// define other
|
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
|
||||||
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
|
||||||
AccessToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "115 Open",
|
|
||||||
LocalSort: false,
|
|
||||||
OnlyLocal: false,
|
|
||||||
OnlyProxy: false,
|
|
||||||
NoCache: false,
|
|
||||||
NoUpload: false,
|
|
||||||
NeedMs: false,
|
|
||||||
DefaultRoot: "0",
|
|
||||||
CheckStatus: false,
|
|
||||||
Alert: "",
|
|
||||||
NoOverwriteUpload: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &Open115{}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,59 +0,0 @@
|
|||||||
package _115_open
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
sdk "github.com/xhofe/115-sdk-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Obj sdk.GetFilesResp_File
|
|
||||||
|
|
||||||
// Thumb implements model.Thumb.
|
|
||||||
func (o *Obj) Thumb() string {
|
|
||||||
return o.Thumbnail
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateTime implements model.Obj.
|
|
||||||
func (o *Obj) CreateTime() time.Time {
|
|
||||||
return time.Unix(o.UpPt, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHash implements model.Obj.
|
|
||||||
func (o *Obj) GetHash() utils.HashInfo {
|
|
||||||
return utils.NewHashInfo(utils.SHA1, o.Sha1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetID implements model.Obj.
|
|
||||||
func (o *Obj) GetID() string {
|
|
||||||
return o.Fid
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetName implements model.Obj.
|
|
||||||
func (o *Obj) GetName() string {
|
|
||||||
return o.Fn
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPath implements model.Obj.
|
|
||||||
func (o *Obj) GetPath() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSize implements model.Obj.
|
|
||||||
func (o *Obj) GetSize() int64 {
|
|
||||||
return o.FS
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDir implements model.Obj.
|
|
||||||
func (o *Obj) IsDir() bool {
|
|
||||||
return o.Fc == "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ModTime implements model.Obj.
|
|
||||||
func (o *Obj) ModTime() time.Time {
|
|
||||||
return time.Unix(o.Upt, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ model.Obj = (*Obj)(nil)
|
|
||||||
var _ model.Thumb = (*Obj)(nil)
|
|
@ -1,140 +0,0 @@
|
|||||||
package _115_open
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
|
||||||
"github.com/avast/retry-go"
|
|
||||||
sdk "github.com/xhofe/115-sdk-go"
|
|
||||||
)
|
|
||||||
|
|
||||||
func calPartSize(fileSize int64) int64 {
|
|
||||||
var partSize int64 = 20 * utils.MB
|
|
||||||
if fileSize > partSize {
|
|
||||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
|
||||||
partSize = 5 * utils.GB // file part size 5GB
|
|
||||||
} else if fileSize > 768*utils.GB { // over 768GB
|
|
||||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
|
||||||
} else if fileSize > 512*utils.GB { // over 512GB
|
|
||||||
partSize = 82463373 // ≈ 78.6432MB
|
|
||||||
} else if fileSize > 384*utils.GB { // over 384GB
|
|
||||||
partSize = 54975582 // ≈ 52.4288MB
|
|
||||||
} else if fileSize > 256*utils.GB { // over 256GB
|
|
||||||
partSize = 41231687 // ≈ 39.3216MB
|
|
||||||
} else if fileSize > 128*utils.GB { // over 128GB
|
|
||||||
partSize = 27487791 // ≈ 26.2144MB
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return partSize
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
|
||||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
bucket, err := ossClient.Bucket(initResp.Bucket)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = bucket.PutObject(initResp.Object, tempF,
|
|
||||||
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
|
||||||
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
|
||||||
)
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// type CallbackResult struct {
|
|
||||||
// State bool `json:"state"`
|
|
||||||
// Code int `json:"code"`
|
|
||||||
// Message string `json:"message"`
|
|
||||||
// Data struct {
|
|
||||||
// PickCode string `json:"pick_code"`
|
|
||||||
// FileName string `json:"file_name"`
|
|
||||||
// FileSize int64 `json:"file_size"`
|
|
||||||
// FileID string `json:"file_id"`
|
|
||||||
// ThumbURL string `json:"thumb_url"`
|
|
||||||
// Sha1 string `json:"sha1"`
|
|
||||||
// Aid int `json:"aid"`
|
|
||||||
// Cid string `json:"cid"`
|
|
||||||
// } `json:"data"`
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
|
||||||
fileSize := stream.GetSize()
|
|
||||||
chunkSize := calPartSize(fileSize)
|
|
||||||
|
|
||||||
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
bucket, err := ossClient.Bucket(initResp.Bucket)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
|
||||||
parts := make([]oss.UploadPart, partNum)
|
|
||||||
offset := int64(0)
|
|
||||||
for i := int64(1); i <= partNum; i++ {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
partSize := chunkSize
|
|
||||||
if i == partNum {
|
|
||||||
partSize = fileSize - (i-1)*chunkSize
|
|
||||||
}
|
|
||||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
|
||||||
err = retry.Do(func() error {
|
|
||||||
_ = rd.Reset()
|
|
||||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
|
||||||
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
parts[i-1] = part
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
retry.Attempts(3),
|
|
||||||
retry.DelayType(retry.BackOffDelay),
|
|
||||||
retry.Delay(time.Second))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == partNum {
|
|
||||||
offset = fileSize
|
|
||||||
} else {
|
|
||||||
offset += partSize
|
|
||||||
}
|
|
||||||
up(float64(offset) / float64(fileSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
// callbackRespBytes := make([]byte, 1024)
|
|
||||||
_, err = bucket.CompleteMultipartUpload(
|
|
||||||
imur,
|
|
||||||
parts,
|
|
||||||
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
|
||||||
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
|
||||||
// oss.CallbackResult(&callbackRespBytes),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,3 +0,0 @@
|
|||||||
package _115_open
|
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
|
@ -1,112 +0,0 @@
|
|||||||
package _115_share
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"golang.org/x/time/rate"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Pan115Share struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
client *driver115.Pan115Client
|
|
||||||
limiter *rate.Limiter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Config() driver.Config {
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Init(ctx context.Context) error {
|
|
||||||
if d.LimitRate > 0 {
|
|
||||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.login()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) WaitLimit(ctx context.Context) error {
|
|
||||||
if d.limiter != nil {
|
|
||||||
return d.limiter.Wait(ctx)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Drop(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files := make([]driver115.ShareFile, 0)
|
|
||||||
fileResp, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, dir.GetID(), driver115.QueryLimit(int(d.PageSize)))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, fileResp.Data.List...)
|
|
||||||
total := fileResp.Data.Count
|
|
||||||
count := len(fileResp.Data.List)
|
|
||||||
for total > count {
|
|
||||||
fileResp, err := d.client.GetShareSnap(
|
|
||||||
d.ShareCode, d.ReceiveCode, dir.GetID(),
|
|
||||||
driver115.QueryLimit(int(d.PageSize)), driver115.QueryOffset(count),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, fileResp.Data.List...)
|
|
||||||
count += len(fileResp.Data.List)
|
|
||||||
}
|
|
||||||
|
|
||||||
return utils.SliceConvert(files, transFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
if err := d.WaitLimit(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
downloadInfo, err := d.client.DownloadByShareCode(d.ShareCode, d.ReceiveCode, file.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Link{URL: downloadInfo.URL.URL}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan115Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan115Share)(nil)
|
|
@ -1,34 +0,0 @@
|
|||||||
package _115_share
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
|
||||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
|
||||||
QRCodeSource string `json:"qrcode_source" type:"select" options:"web,android,ios,tv,alipaymini,wechatmini,qandroid" default:"linux" help:"select the QR code device, default linux"`
|
|
||||||
PageSize int64 `json:"page_size" type:"number" default:"1000" help:"list api per page size of 115 driver"`
|
|
||||||
LimitRate float64 `json:"limit_rate" type:"float" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
|
||||||
ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"`
|
|
||||||
ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"`
|
|
||||||
driver.RootID
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "115 Share",
|
|
||||||
DefaultRoot: "0",
|
|
||||||
// OnlyProxy: true,
|
|
||||||
// OnlyLocal: true,
|
|
||||||
CheckStatus: false,
|
|
||||||
Alert: "",
|
|
||||||
NoOverwriteUpload: true,
|
|
||||||
NoUpload: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &Pan115Share{}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,111 +0,0 @@
|
|||||||
package _115_share
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ model.Obj = (*FileObj)(nil)
|
|
||||||
|
|
||||||
type FileObj struct {
|
|
||||||
Size int64
|
|
||||||
Sha1 string
|
|
||||||
Utm time.Time
|
|
||||||
FileName string
|
|
||||||
isDir bool
|
|
||||||
FileID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) CreateTime() time.Time {
|
|
||||||
return f.Utm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) GetHash() utils.HashInfo {
|
|
||||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) GetSize() int64 {
|
|
||||||
return f.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) GetName() string {
|
|
||||||
return f.FileName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) ModTime() time.Time {
|
|
||||||
return f.Utm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) IsDir() bool {
|
|
||||||
return f.isDir
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) GetID() string {
|
|
||||||
return f.FileID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileObj) GetPath() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func transFunc(sf driver115.ShareFile) (model.Obj, error) {
|
|
||||||
timeInt, err := strconv.ParseInt(sf.UpdateTime, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
utm = time.Unix(timeInt, 0)
|
|
||||||
isDir = (sf.IsFile == 0)
|
|
||||||
fileID = string(sf.FileID)
|
|
||||||
)
|
|
||||||
if isDir {
|
|
||||||
fileID = string(sf.CategoryID)
|
|
||||||
}
|
|
||||||
return &FileObj{
|
|
||||||
Size: int64(sf.Size),
|
|
||||||
Sha1: sf.Sha1,
|
|
||||||
Utm: utm,
|
|
||||||
FileName: string(sf.FileName),
|
|
||||||
isDir: isDir,
|
|
||||||
FileID: fileID,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var UserAgent = driver115.UA115Browser
|
|
||||||
|
|
||||||
func (d *Pan115Share) login() error {
|
|
||||||
var err error
|
|
||||||
opts := []driver115.Option{
|
|
||||||
driver115.UA(UserAgent),
|
|
||||||
}
|
|
||||||
d.client = driver115.New(opts...)
|
|
||||||
if _, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, ""); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get share snap")
|
|
||||||
}
|
|
||||||
cr := &driver115.Credential{}
|
|
||||||
if d.QRCodeToken != "" {
|
|
||||||
s := &driver115.QRCodeSession{
|
|
||||||
UID: d.QRCodeToken,
|
|
||||||
}
|
|
||||||
if cr, err = d.client.QRCodeLoginWithApp(s, driver115.LoginApp(d.QRCodeSource)); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to login by qrcode")
|
|
||||||
}
|
|
||||||
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s;KID=%s", cr.UID, cr.CID, cr.SEID, cr.KID)
|
|
||||||
d.QRCodeToken = ""
|
|
||||||
} else if d.Cookie != "" {
|
|
||||||
if err = cr.FromCookie(d.Cookie); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to login by cookies")
|
|
||||||
}
|
|
||||||
d.client.ImportCredential(cr)
|
|
||||||
} else {
|
|
||||||
return errors.New("missing cookie or qrcode account")
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.client.LoginCheck()
|
|
||||||
}
|
|
@ -6,14 +6,6 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/time/rate"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -25,12 +17,14 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Pan123 struct {
|
type Pan123 struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
apiRateLimit sync.Map
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Config() driver.Config {
|
func (d *Pan123) Config() driver.Config {
|
||||||
@ -42,19 +36,19 @@ func (d *Pan123) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Init(ctx context.Context) error {
|
func (d *Pan123) Init(ctx context.Context) error {
|
||||||
_, err := d.Request(UserInfo, http.MethodGet, nil, nil)
|
_, err := d.request(UserInfo, http.MethodGet, nil, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Drop(ctx context.Context) error {
|
func (d *Pan123) Drop(ctx context.Context) error {
|
||||||
_, _ = d.Request(Logout, http.MethodPost, func(req *resty.Request) {
|
_, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{})
|
req.SetBody(base.Json{})
|
||||||
}, nil)
|
}, nil)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Pan123) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
files, err := d.getFiles(ctx, dir.GetID(), dir.GetName())
|
files, err := d.getFiles(dir.GetID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -82,8 +76,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
"size": f.Size,
|
"size": f.Size,
|
||||||
"type": f.Type,
|
"type": f.Type,
|
||||||
}
|
}
|
||||||
resp, err := d.Request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||||
|
|
||||||
req.SetBody(data).SetHeaders(headers)
|
req.SetBody(data).SetHeaders(headers)
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -136,7 +129,7 @@ func (d *Pan123) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
"size": 0,
|
"size": 0,
|
||||||
"type": 1,
|
"type": 1,
|
||||||
}
|
}
|
||||||
_, err := d.Request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(Mkdir, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -147,7 +140,7 @@ func (d *Pan123) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"fileIdList": []base.Json{{"FileId": srcObj.GetID()}},
|
"fileIdList": []base.Json{{"FileId": srcObj.GetID()}},
|
||||||
"parentFileId": dstDir.GetID(),
|
"parentFileId": dstDir.GetID(),
|
||||||
}
|
}
|
||||||
_, err := d.Request(Move, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(Move, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -159,7 +152,7 @@ func (d *Pan123) Rename(ctx context.Context, srcObj model.Obj, newName string) e
|
|||||||
"fileId": srcObj.GetID(),
|
"fileId": srcObj.GetID(),
|
||||||
"fileName": newName,
|
"fileName": newName,
|
||||||
}
|
}
|
||||||
_, err := d.Request(Rename, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(Rename, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -176,7 +169,7 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
"operation": true,
|
"operation": true,
|
||||||
"fileTrashInfoList": []File{f},
|
"fileTrashInfoList": []File{f},
|
||||||
}
|
}
|
||||||
_, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(Trash, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -185,39 +178,36 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
etag := file.GetHash().GetHash(utils.MD5)
|
// const DEFAULT int64 = 10485760
|
||||||
if len(etag) < utils.MD5.Width {
|
h := md5.New()
|
||||||
// const DEFAULT int64 = 10485760
|
// need to calculate md5 of the full content
|
||||||
h := md5.New()
|
tempFile, err := stream.CacheFullInTempFile()
|
||||||
// need to calculate md5 of the full content
|
if err != nil {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
return err
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
}()
|
|
||||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
etag = hex.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = tempFile.Close()
|
||||||
|
}()
|
||||||
|
if _, err = io.Copy(h, tempFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempFile.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
etag := hex.EncodeToString(h.Sum(nil))
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"driveId": 0,
|
"driveId": 0,
|
||||||
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
||||||
"etag": etag,
|
"etag": etag,
|
||||||
"fileName": file.GetName(),
|
"fileName": stream.GetName(),
|
||||||
"parentFileId": dstDir.GetID(),
|
"parentFileId": dstDir.GetID(),
|
||||||
"size": file.GetSize(),
|
"size": stream.GetSize(),
|
||||||
"type": 0,
|
"type": 0,
|
||||||
}
|
}
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
res, err := d.Request(UploadRequest, http.MethodPost, func(req *resty.Request) {
|
res, err := d.request(UploadRequest, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data).SetContext(ctx)
|
req.SetBody(data).SetContext(ctx)
|
||||||
}, &resp)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -228,7 +218,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||||
err = d.newUpload(ctx, &resp, file, up)
|
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
@ -242,23 +232,17 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
|
||||||
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
|
||||||
}
|
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
Body: tempFile,
|
||||||
Reader: file,
|
|
||||||
UpdateProgress: up,
|
|
||||||
}),
|
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = d.request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"fileId": resp.Data.FileId,
|
"fileId": resp.Data.FileId,
|
||||||
}).SetContext(ctx)
|
}).SetContext(ctx)
|
||||||
@ -266,12 +250,4 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStrea
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) APIRateLimit(ctx context.Context, api string) error {
|
|
||||||
value, _ := d.apiRateLimit.LoadOrStore(api,
|
|
||||||
rate.NewLimiter(rate.Every(700*time.Millisecond), 1))
|
|
||||||
limiter := value.(*rate.Limiter)
|
|
||||||
|
|
||||||
return limiter.Wait(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan123)(nil)
|
var _ driver.Driver = (*Pan123)(nil)
|
||||||
|
@ -9,15 +9,14 @@ type Addition struct {
|
|||||||
Username string `json:"username" required:"true"`
|
Username string `json:"username" required:"true"`
|
||||||
Password string `json:"password" required:"true"`
|
Password string `json:"password" required:"true"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
//OrderBy string `json:"order_by" type:"select" options:"file_id,file_name,size,update_at" default:"file_name"`
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "123Pan",
|
Name: "123Pan",
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
LocalSort: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -87,9 +87,8 @@ var _ model.Thumb = (*File)(nil)
|
|||||||
type Files struct {
|
type Files struct {
|
||||||
//BaseResp
|
//BaseResp
|
||||||
Data struct {
|
Data struct {
|
||||||
Next string `json:"Next"`
|
|
||||||
Total int `json:"Total"`
|
|
||||||
InfoList []File `json:"InfoList"`
|
InfoList []File `json:"InfoList"`
|
||||||
|
Next string `json:"Next"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star
|
|||||||
"StorageNode": upReq.Data.StorageNode,
|
"StorageNode": upReq.Data.StorageNode,
|
||||||
}
|
}
|
||||||
var s3PreSignedUrls S3PreSignedURLs
|
var s3PreSignedUrls S3PreSignedURLs
|
||||||
_, err := d.Request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(S3PreSignedUrls, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data).SetContext(ctx)
|
req.SetBody(data).SetContext(ctx)
|
||||||
}, &s3PreSignedUrls)
|
}, &s3PreSignedUrls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -44,7 +44,7 @@ func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end in
|
|||||||
"uploadId": upReq.Data.UploadId,
|
"uploadId": upReq.Data.UploadId,
|
||||||
}
|
}
|
||||||
var s3PreSignedUrls S3PreSignedURLs
|
var s3PreSignedUrls S3PreSignedURLs
|
||||||
_, err := d.Request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data).SetContext(ctx)
|
req.SetBody(data).SetContext(ctx)
|
||||||
}, &s3PreSignedUrls)
|
}, &s3PreSignedUrls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -63,13 +63,13 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
|||||||
"key": upReq.Data.Key,
|
"key": upReq.Data.Key,
|
||||||
"uploadId": upReq.Data.UploadId,
|
"uploadId": upReq.Data.UploadId,
|
||||||
}
|
}
|
||||||
_, err := d.Request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
|
_, err := d.request(UploadCompleteV2, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data).SetContext(ctx)
|
req.SetBody(data).SetContext(ctx)
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
||||||
chunkSize := int64(1024 * 1024 * 16)
|
chunkSize := int64(1024 * 1024 * 16)
|
||||||
// fetch s3 pre signed urls
|
// fetch s3 pre signed urls
|
||||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||||
@ -81,7 +81,6 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
batchSize = 10
|
batchSize = 10
|
||||||
getS3UploadUrl = d.getS3PreSignedUrls
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
}
|
}
|
||||||
limited := driver.NewLimitedUploadStream(ctx, file)
|
|
||||||
for i := 1; i <= chunkCount; i += batchSize {
|
for i := 1; i <= chunkCount; i += batchSize {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -104,7 +103,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(limited, chunkSize), curSize, false, getS3UploadUrl)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,23 +1,15 @@
|
|||||||
package _123
|
package _123
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/crc32"
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
@ -26,9 +18,8 @@ const (
|
|||||||
Api = "https://www.123pan.com/api"
|
Api = "https://www.123pan.com/api"
|
||||||
AApi = "https://www.123pan.com/a/api"
|
AApi = "https://www.123pan.com/a/api"
|
||||||
BApi = "https://www.123pan.com/b/api"
|
BApi = "https://www.123pan.com/b/api"
|
||||||
LoginApi = "https://login.123pan.com/api"
|
MainApi = Api
|
||||||
MainApi = BApi
|
SignIn = MainApi + "/user/sign_in"
|
||||||
SignIn = LoginApi + "/user/sign_in"
|
|
||||||
Logout = MainApi + "/user/logout"
|
Logout = MainApi + "/user/logout"
|
||||||
UserInfo = MainApi + "/user/info"
|
UserInfo = MainApi + "/user/info"
|
||||||
FileList = MainApi + "/file/list/new"
|
FileList = MainApi + "/file/list/new"
|
||||||
@ -46,104 +37,6 @@ const (
|
|||||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
)
|
)
|
||||||
|
|
||||||
func signPath(path string, os string, version string) (k string, v string) {
|
|
||||||
table := []byte{'a', 'd', 'e', 'f', 'g', 'h', 'l', 'm', 'y', 'i', 'j', 'n', 'o', 'p', 'k', 'q', 'r', 's', 't', 'u', 'b', 'c', 'v', 'w', 's', 'z'}
|
|
||||||
random := fmt.Sprintf("%.f", math.Round(1e7*rand.Float64()))
|
|
||||||
now := time.Now().In(time.FixedZone("CST", 8*3600))
|
|
||||||
timestamp := fmt.Sprint(now.Unix())
|
|
||||||
nowStr := []byte(now.Format("200601021504"))
|
|
||||||
for i := 0; i < len(nowStr); i++ {
|
|
||||||
nowStr[i] = table[nowStr[i]-48]
|
|
||||||
}
|
|
||||||
timeSign := fmt.Sprint(crc32.ChecksumIEEE(nowStr))
|
|
||||||
data := strings.Join([]string{timestamp, random, path, os, version, timeSign}, "|")
|
|
||||||
dataSign := fmt.Sprint(crc32.ChecksumIEEE([]byte(data)))
|
|
||||||
return timeSign, strings.Join([]string{timestamp, random, dataSign}, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetApi(rawUrl string) string {
|
|
||||||
u, _ := url.Parse(rawUrl)
|
|
||||||
query := u.Query()
|
|
||||||
query.Add(signPath(u.Path, "web", "3"))
|
|
||||||
u.RawQuery = query.Encode()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
//func GetApi(url string) string {
|
|
||||||
// vm := js.New()
|
|
||||||
// vm.Set("url", url[22:])
|
|
||||||
// r, err := vm.RunString(`
|
|
||||||
// (function(e){
|
|
||||||
// function A(t, e) {
|
|
||||||
// e = 1 < arguments.length && void 0 !== e ? e : 10;
|
|
||||||
// for (var n = function() {
|
|
||||||
// for (var t = [], e = 0; e < 256; e++) {
|
|
||||||
// for (var n = e, r = 0; r < 8; r++)
|
|
||||||
// n = 1 & n ? 3988292384 ^ n >>> 1 : n >>> 1;
|
|
||||||
// t[e] = n
|
|
||||||
// }
|
|
||||||
// return t
|
|
||||||
// }(), r = function(t) {
|
|
||||||
// t = t.replace(/\\r\\n/g, "\\n");
|
|
||||||
// for (var e = "", n = 0; n < t.length; n++) {
|
|
||||||
// var r = t.charCodeAt(n);
|
|
||||||
// r < 128 ? e += String.fromCharCode(r) : e = 127 < r && r < 2048 ? (e += String.fromCharCode(r >> 6 | 192)) + String.fromCharCode(63 & r | 128) : (e = (e += String.fromCharCode(r >> 12 | 224)) + String.fromCharCode(r >> 6 & 63 | 128)) + String.fromCharCode(63 & r | 128)
|
|
||||||
// }
|
|
||||||
// return e
|
|
||||||
// }(t), a = -1, i = 0; i < r.length; i++)
|
|
||||||
// a = a >>> 8 ^ n[255 & (a ^ r.charCodeAt(i))];
|
|
||||||
// return (a = (-1 ^ a) >>> 0).toString(e)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// function v(t) {
|
|
||||||
// return (v = "function" == typeof Symbol && "symbol" == typeof Symbol.iterator ? function(t) {
|
|
||||||
// return typeof t
|
|
||||||
// }
|
|
||||||
// : function(t) {
|
|
||||||
// return t && "function" == typeof Symbol && t.constructor === Symbol && t !== Symbol.prototype ? "symbol" : typeof t
|
|
||||||
// }
|
|
||||||
// )(t)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// for (p in a = Math.round(1e7 * Math.random()),
|
|
||||||
// o = Math.round(((new Date).getTime() + 60 * (new Date).getTimezoneOffset() * 1e3 + 288e5) / 1e3).toString(),
|
|
||||||
// m = ["a", "d", "e", "f", "g", "h", "l", "m", "y", "i", "j", "n", "o", "p", "k", "q", "r", "s", "t", "u", "b", "c", "v", "w", "s", "z"],
|
|
||||||
// u = function(t, e, n) {
|
|
||||||
// var r;
|
|
||||||
// n = 2 < arguments.length && void 0 !== n ? n : 8;
|
|
||||||
// return 0 === arguments.length ? null : (r = "object" === v(t) ? t : (10 === "".concat(t).length && (t = 1e3 * Number.parseInt(t)),
|
|
||||||
// new Date(t)),
|
|
||||||
// t += 6e4 * new Date(t).getTimezoneOffset(),
|
|
||||||
// {
|
|
||||||
// y: (r = new Date(t + 36e5 * n)).getFullYear(),
|
|
||||||
// m: r.getMonth() + 1 < 10 ? "0".concat(r.getMonth() + 1) : r.getMonth() + 1,
|
|
||||||
// d: r.getDate() < 10 ? "0".concat(r.getDate()) : r.getDate(),
|
|
||||||
// h: r.getHours() < 10 ? "0".concat(r.getHours()) : r.getHours(),
|
|
||||||
// f: r.getMinutes() < 10 ? "0".concat(r.getMinutes()) : r.getMinutes()
|
|
||||||
// })
|
|
||||||
// }(o),
|
|
||||||
// h = u.y,
|
|
||||||
// g = u.m,
|
|
||||||
// l = u.d,
|
|
||||||
// c = u.h,
|
|
||||||
// u = u.f,
|
|
||||||
// d = [h, g, l, c, u].join(""),
|
|
||||||
// f = [],
|
|
||||||
// d)
|
|
||||||
// f.push(m[Number(d[p])]);
|
|
||||||
// return h = A(f.join("")),
|
|
||||||
// g = A("".concat(o, "|").concat(a, "|").concat(e, "|").concat("web", "|").concat("3", "|").concat(h)),
|
|
||||||
// "".concat(h, "=").concat(o, "-").concat(a, "-").concat(g);
|
|
||||||
// })(url)
|
|
||||||
// `)
|
|
||||||
// if err != nil {
|
|
||||||
// fmt.Println(err)
|
|
||||||
// return url
|
|
||||||
// }
|
|
||||||
// v, _ := r.Export().(string)
|
|
||||||
// return url + "?" + v
|
|
||||||
//}
|
|
||||||
|
|
||||||
func (d *Pan123) login() error {
|
func (d *Pan123) login() error {
|
||||||
var body base.Json
|
var body base.Json
|
||||||
if utils.IsEmailFormat(d.Username) {
|
if utils.IsEmailFormat(d.Username) {
|
||||||
@ -163,9 +56,9 @@ func (d *Pan123) login() error {
|
|||||||
SetHeaders(map[string]string{
|
SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"user-agent": "Dart/2.19(dart:io)-alist",
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
"platform": "web",
|
"platform": "android",
|
||||||
"app-version": "3",
|
"app-version": "36",
|
||||||
//"user-agent": base.UserAgent,
|
//"user-agent": base.UserAgent,
|
||||||
}).
|
}).
|
||||||
SetBody(body).Post(SignIn)
|
SetBody(body).Post(SignIn)
|
||||||
@ -194,17 +87,15 @@ func (d *Pan123) login() error {
|
|||||||
// return &authKey, nil
|
// return &authKey, nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
func (d *Pan123) Request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
isRetry := false
|
|
||||||
do:
|
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"authorization": "Bearer " + d.AccessToken,
|
||||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
"platform": "web",
|
"platform": "android",
|
||||||
"app-version": "3",
|
"app-version": "36",
|
||||||
//"user-agent": base.UserAgent,
|
//"user-agent": base.UserAgent,
|
||||||
})
|
})
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
@ -218,67 +109,51 @@ do:
|
|||||||
// return nil, err
|
// return nil, err
|
||||||
//}
|
//}
|
||||||
//req.SetQueryParam("auth-key", *authKey)
|
//req.SetQueryParam("auth-key", *authKey)
|
||||||
res, err := req.Execute(method, GetApi(url))
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
body := res.Body()
|
body := res.Body()
|
||||||
code := utils.Json.Get(body, "code").ToInt()
|
code := utils.Json.Get(body, "code").ToInt()
|
||||||
if code != 0 {
|
if code != 0 {
|
||||||
if !isRetry && code == 401 {
|
if code == 401 {
|
||||||
err := d.login()
|
err := d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
isRetry = true
|
return d.request(url, method, callback, resp)
|
||||||
goto do
|
|
||||||
}
|
}
|
||||||
return nil, errors.New(jsoniter.Get(body, "message").ToString())
|
return nil, errors.New(jsoniter.Get(body, "message").ToString())
|
||||||
}
|
}
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) getFiles(ctx context.Context, parentId string, name string) ([]File, error) {
|
func (d *Pan123) getFiles(parentId string) ([]File, error) {
|
||||||
page := 1
|
page := 1
|
||||||
total := 0
|
|
||||||
res := make([]File, 0)
|
res := make([]File, 0)
|
||||||
// 2024-02-06 fix concurrency by 123pan
|
|
||||||
for {
|
for {
|
||||||
if err := d.APIRateLimit(ctx, FileList); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var resp Files
|
var resp Files
|
||||||
query := map[string]string{
|
query := map[string]string{
|
||||||
"driveId": "0",
|
"driveId": "0",
|
||||||
"limit": "100",
|
"limit": "100",
|
||||||
"next": "0",
|
"next": "0",
|
||||||
"orderBy": "file_id",
|
"orderBy": d.OrderBy,
|
||||||
"orderDirection": "desc",
|
"orderDirection": d.OrderDirection,
|
||||||
"parentFileId": parentId,
|
"parentFileId": parentId,
|
||||||
"trashed": "false",
|
"trashed": "false",
|
||||||
"SearchData": "",
|
"Page": strconv.Itoa(page),
|
||||||
"Page": strconv.Itoa(page),
|
|
||||||
"OnlyLookAbnormalFile": "0",
|
|
||||||
"event": "homeListFile",
|
|
||||||
"operateType": "4",
|
|
||||||
"inDirectSpace": "false",
|
|
||||||
}
|
}
|
||||||
_res, err := d.Request(FileList, http.MethodGet, func(req *resty.Request) {
|
_, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetQueryParams(query)
|
req.SetQueryParams(query)
|
||||||
}, &resp)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
log.Debug(string(_res))
|
|
||||||
page++
|
page++
|
||||||
res = append(res, resp.Data.InfoList...)
|
res = append(res, resp.Data.InfoList...)
|
||||||
total = resp.Data.Total
|
|
||||||
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
|
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(res) != total {
|
|
||||||
log.Warnf("incorrect file count from remote at %s: expected %d, got %d", name, total, len(res))
|
|
||||||
}
|
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
@ -6,12 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/time/rate"
|
|
||||||
|
|
||||||
_123 "github.com/alist-org/alist/v3/drivers/123"
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -24,8 +19,6 @@ import (
|
|||||||
type Pan123Share struct {
|
type Pan123Share struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
apiRateLimit sync.Map
|
|
||||||
ref *_123.Pan123
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123Share) Config() driver.Config {
|
func (d *Pan123Share) Config() driver.Config {
|
||||||
@ -42,23 +35,13 @@ func (d *Pan123Share) Init(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123Share) InitReference(storage driver.Driver) error {
|
|
||||||
refStorage, ok := storage.(*_123.Pan123)
|
|
||||||
if ok {
|
|
||||||
d.ref = refStorage
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("ref: storage is not 123Pan")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan123Share) Drop(ctx context.Context) error {
|
func (d *Pan123Share) Drop(ctx context.Context) error {
|
||||||
d.ref = nil
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
// TODO return the files list, required
|
// TODO return the files list, required
|
||||||
files, err := d.getFiles(ctx, dir.GetID())
|
files, err := d.getFiles(dir.GetID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -163,12 +146,4 @@ func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
|
||||||
func (d *Pan123Share) APIRateLimit(ctx context.Context, api string) error {
|
|
||||||
value, _ := d.apiRateLimit.LoadOrStore(api,
|
|
||||||
rate.NewLimiter(rate.Every(700*time.Millisecond), 1))
|
|
||||||
limiter := value.(*rate.Limiter)
|
|
||||||
|
|
||||||
return limiter.Wait(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan123Share)(nil)
|
var _ driver.Driver = (*Pan123Share)(nil)
|
||||||
|
@ -7,11 +7,10 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
ShareKey string `json:"sharekey" required:"true"`
|
ShareKey string `json:"sharekey" required:"true"`
|
||||||
SharePwd string `json:"sharepassword"`
|
SharePwd string `json:"sharepassword" required:"true"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
//OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
//OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
AccessToken string `json:"accesstoken" type:"text"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -1,17 +1,9 @@
|
|||||||
package _123Share
|
package _123Share
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"hash/crc32"
|
|
||||||
"math"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
@ -23,48 +15,20 @@ const (
|
|||||||
Api = "https://www.123pan.com/api"
|
Api = "https://www.123pan.com/api"
|
||||||
AApi = "https://www.123pan.com/a/api"
|
AApi = "https://www.123pan.com/a/api"
|
||||||
BApi = "https://www.123pan.com/b/api"
|
BApi = "https://www.123pan.com/b/api"
|
||||||
MainApi = BApi
|
MainApi = Api
|
||||||
FileList = MainApi + "/share/get"
|
FileList = MainApi + "/share/get"
|
||||||
DownloadInfo = MainApi + "/share/download/info"
|
DownloadInfo = MainApi + "/share/download/info"
|
||||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
)
|
)
|
||||||
|
|
||||||
func signPath(path string, os string, version string) (k string, v string) {
|
|
||||||
table := []byte{'a', 'd', 'e', 'f', 'g', 'h', 'l', 'm', 'y', 'i', 'j', 'n', 'o', 'p', 'k', 'q', 'r', 's', 't', 'u', 'b', 'c', 'v', 'w', 's', 'z'}
|
|
||||||
random := fmt.Sprintf("%.f", math.Round(1e7*rand.Float64()))
|
|
||||||
now := time.Now().In(time.FixedZone("CST", 8*3600))
|
|
||||||
timestamp := fmt.Sprint(now.Unix())
|
|
||||||
nowStr := []byte(now.Format("200601021504"))
|
|
||||||
for i := 0; i < len(nowStr); i++ {
|
|
||||||
nowStr[i] = table[nowStr[i]-48]
|
|
||||||
}
|
|
||||||
timeSign := fmt.Sprint(crc32.ChecksumIEEE(nowStr))
|
|
||||||
data := strings.Join([]string{timestamp, random, path, os, version, timeSign}, "|")
|
|
||||||
dataSign := fmt.Sprint(crc32.ChecksumIEEE([]byte(data)))
|
|
||||||
return timeSign, strings.Join([]string{timestamp, random, dataSign}, "-")
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetApi(rawUrl string) string {
|
|
||||||
u, _ := url.Parse(rawUrl)
|
|
||||||
query := u.Query()
|
|
||||||
query.Add(signPath(u.Path, "web", "3"))
|
|
||||||
u.RawQuery = query.Encode()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.Request(url, method, callback, resp)
|
|
||||||
}
|
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) alist-client",
|
"platform": "android",
|
||||||
"platform": "web",
|
"app-version": "36",
|
||||||
"app-version": "3",
|
|
||||||
//"user-agent": base.UserAgent,
|
|
||||||
})
|
})
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(req)
|
callback(req)
|
||||||
@ -72,7 +36,7 @@ func (d *Pan123Share) request(url string, method string, callback base.ReqCallba
|
|||||||
if resp != nil {
|
if resp != nil {
|
||||||
req.SetResult(resp)
|
req.SetResult(resp)
|
||||||
}
|
}
|
||||||
res, err := req.Execute(method, GetApi(url))
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -84,19 +48,16 @@ func (d *Pan123Share) request(url string, method string, callback base.ReqCallba
|
|||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123Share) getFiles(ctx context.Context, parentId string) ([]File, error) {
|
func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
|
||||||
page := 1
|
page := 1
|
||||||
res := make([]File, 0)
|
res := make([]File, 0)
|
||||||
for {
|
for {
|
||||||
if err := d.APIRateLimit(ctx, FileList); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var resp Files
|
var resp Files
|
||||||
query := map[string]string{
|
query := map[string]string{
|
||||||
"limit": "100",
|
"limit": "100",
|
||||||
"next": "0",
|
"next": "0",
|
||||||
"orderBy": "file_id",
|
"orderBy": d.OrderBy,
|
||||||
"orderDirection": "desc",
|
"orderDirection": d.OrderDirection,
|
||||||
"parentFileId": parentId,
|
"parentFileId": parentId,
|
||||||
"Page": strconv.Itoa(page),
|
"Page": strconv.Itoa(page),
|
||||||
"shareKey": d.ShareKey,
|
"shareKey": d.ShareKey,
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,22 +9,17 @@ type Addition struct {
|
|||||||
//Account string `json:"account" required:"true"`
|
//Account string `json:"account" required:"true"`
|
||||||
Authorization string `json:"authorization" type:"text" required:"true"`
|
Authorization string `json:"authorization" type:"text" required:"true"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||||
CloudID string `json:"cloud_id"`
|
CloudID string `json:"cloud_id"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
|
||||||
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "139Yun",
|
Name: "139Yun",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
ProxyRangeOption: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
op.RegisterDriver(func() driver.Driver {
|
op.RegisterDriver(func() driver.Driver {
|
||||||
d := &Yun139{}
|
return &Yun139{}
|
||||||
d.ProxyRange = true
|
|
||||||
return d
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1,16 +1,5 @@
|
|||||||
package _139
|
package _139
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/xml"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
MetaPersonal string = "personal"
|
|
||||||
MetaFamily string = "family"
|
|
||||||
MetaGroup string = "group"
|
|
||||||
MetaPersonalNew string = "personal_new"
|
|
||||||
)
|
|
||||||
|
|
||||||
type BaseResp struct {
|
type BaseResp struct {
|
||||||
Success bool `json:"success"`
|
Success bool `json:"success"`
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
@ -55,7 +44,6 @@ type Content struct {
|
|||||||
//ContentDesc string `json:"contentDesc"`
|
//ContentDesc string `json:"contentDesc"`
|
||||||
//ContentType int `json:"contentType"`
|
//ContentType int `json:"contentType"`
|
||||||
//ContentOrigin int `json:"contentOrigin"`
|
//ContentOrigin int `json:"contentOrigin"`
|
||||||
CreateTime string `json:"createTime"`
|
|
||||||
UpdateTime string `json:"updateTime"`
|
UpdateTime string `json:"updateTime"`
|
||||||
//CommentCount int `json:"commentCount"`
|
//CommentCount int `json:"commentCount"`
|
||||||
ThumbnailURL string `json:"thumbnailURL"`
|
ThumbnailURL string `json:"thumbnailURL"`
|
||||||
@ -143,13 +131,6 @@ type UploadResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type InterLayerUploadResult struct {
|
|
||||||
XMLName xml.Name `xml:"result"`
|
|
||||||
Text string `xml:",chardata"`
|
|
||||||
ResultCode int `xml:"resultCode"`
|
|
||||||
Msg string `xml:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type CloudContent struct {
|
type CloudContent struct {
|
||||||
ContentID string `json:"contentID"`
|
ContentID string `json:"contentID"`
|
||||||
//Modifier string `json:"modifier"`
|
//Modifier string `json:"modifier"`
|
||||||
@ -204,92 +185,3 @@ type QueryContentListResp struct {
|
|||||||
RecallContent interface{} `json:"recallContent"`
|
RecallContent interface{} `json:"recallContent"`
|
||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type QueryGroupContentListResp struct {
|
|
||||||
BaseResp
|
|
||||||
Data struct {
|
|
||||||
Result struct {
|
|
||||||
ResultCode string `json:"resultCode"`
|
|
||||||
ResultDesc string `json:"resultDesc"`
|
|
||||||
} `json:"result"`
|
|
||||||
GetGroupContentResult struct {
|
|
||||||
ParentCatalogID string `json:"parentCatalogID"` // 根目录是"0"
|
|
||||||
CatalogList []struct {
|
|
||||||
Catalog
|
|
||||||
Path string `json:"path"`
|
|
||||||
} `json:"catalogList"`
|
|
||||||
ContentList []Content `json:"contentList"`
|
|
||||||
NodeCount int `json:"nodeCount"` // 文件+文件夹数量
|
|
||||||
CtlgCnt int `json:"ctlgCnt"` // 文件夹数量
|
|
||||||
ContCnt int `json:"contCnt"` // 文件数量
|
|
||||||
} `json:"getGroupContentResult"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ParallelHashCtx struct {
|
|
||||||
PartOffset int64 `json:"partOffset"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PartInfo struct {
|
|
||||||
PartNumber int64 `json:"partNumber"`
|
|
||||||
PartSize int64 `json:"partSize"`
|
|
||||||
ParallelHashCtx ParallelHashCtx `json:"parallelHashCtx"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalThumbnail struct {
|
|
||||||
Style string `json:"style"`
|
|
||||||
Url string `json:"url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalFileItem struct {
|
|
||||||
FileId string `json:"fileId"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
CreatedAt string `json:"createdAt"`
|
|
||||||
UpdatedAt string `json:"updatedAt"`
|
|
||||||
Thumbnails []PersonalThumbnail `json:"thumbnailUrls"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalListResp struct {
|
|
||||||
BaseResp
|
|
||||||
Data struct {
|
|
||||||
Items []PersonalFileItem `json:"items"`
|
|
||||||
NextPageCursor string `json:"nextPageCursor"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalPartInfo struct {
|
|
||||||
PartNumber int `json:"partNumber"`
|
|
||||||
UploadUrl string `json:"uploadUrl"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalUploadResp struct {
|
|
||||||
BaseResp
|
|
||||||
Data struct {
|
|
||||||
FileId string `json:"fileId"`
|
|
||||||
FileName string `json:"fileName"`
|
|
||||||
PartInfos []PersonalPartInfo `json:"partInfos"`
|
|
||||||
Exist bool `json:"exist"`
|
|
||||||
RapidUpload bool `json:"rapidUpload"`
|
|
||||||
UploadId string `json:"uploadId"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type PersonalUploadUrlResp struct {
|
|
||||||
BaseResp
|
|
||||||
Data struct {
|
|
||||||
FileId string `json:"fileId"`
|
|
||||||
UploadId string `json:"uploadId"`
|
|
||||||
PartInfos []PersonalPartInfo `json:"partInfos"`
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type RefreshTokenResp struct {
|
|
||||||
XMLName xml.Name `xml:"root"`
|
|
||||||
Return string `xml:"return"`
|
|
||||||
Token string `xml:"token"`
|
|
||||||
Expiretime int32 `xml:"expiretime"`
|
|
||||||
AccessToken string `xml:"accessToken"`
|
|
||||||
Desc string `xml:"desc"`
|
|
||||||
}
|
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -14,7 +13,6 @@ import (
|
|||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -54,55 +52,6 @@ func getTime(t string) time.Time {
|
|||||||
return stamp
|
return stamp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) refreshToken() error {
|
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.refreshToken()
|
|
||||||
}
|
|
||||||
decode, err := base64.StdEncoding.DecodeString(d.Authorization)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("authorization decode failed: %s", err)
|
|
||||||
}
|
|
||||||
decodeStr := string(decode)
|
|
||||||
splits := strings.Split(decodeStr, ":")
|
|
||||||
if len(splits) < 3 {
|
|
||||||
return fmt.Errorf("authorization is invalid, splits < 3")
|
|
||||||
}
|
|
||||||
strs := strings.Split(splits[2], "|")
|
|
||||||
if len(strs) < 4 {
|
|
||||||
return fmt.Errorf("authorization is invalid, strs < 4")
|
|
||||||
}
|
|
||||||
expiration, err := strconv.ParseInt(strs[3], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("authorization is invalid")
|
|
||||||
}
|
|
||||||
expiration -= time.Now().UnixMilli()
|
|
||||||
if expiration > 1000*60*60*24*15 {
|
|
||||||
// Authorization有效期大于15天无需刷新
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if expiration < 0 {
|
|
||||||
return fmt.Errorf("authorization has expired")
|
|
||||||
}
|
|
||||||
|
|
||||||
url := "https://aas.caiyun.feixin.10086.cn:443/tellin/authTokenRefresh.do"
|
|
||||||
var resp RefreshTokenResp
|
|
||||||
reqBody := "<root><token>" + splits[2] + "</token><account>" + splits[1] + "</account><clienttype>656</clienttype></root>"
|
|
||||||
_, err = base.RestyClient.R().
|
|
||||||
ForceContentType("application/xml").
|
|
||||||
SetBody(reqBody).
|
|
||||||
SetResult(&resp).
|
|
||||||
Post(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Return != "0" {
|
|
||||||
return fmt.Errorf("failed to refresh token: %s", resp.Desc)
|
|
||||||
}
|
|
||||||
d.Authorization = base64.StdEncoding.EncodeToString([]byte(splits[0] + ":" + splits[1] + ":" + resp.Token))
|
|
||||||
op.MustSaveDriverStorage(d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Yun139) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
url := "https://yun.139.com" + pathname
|
url := "https://yun.139.com" + pathname
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
@ -123,22 +72,21 @@ func (d *Yun139) request(pathname string, method string, callback base.ReqCallba
|
|||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"CMS-DEVICE": "default",
|
"CMS-DEVICE": "default",
|
||||||
"Authorization": "Basic " + d.getAuthorization(),
|
"Authorization": "Basic " + d.Authorization,
|
||||||
"mcloud-channel": "1000101",
|
"mcloud-channel": "1000101",
|
||||||
"mcloud-client": "10701",
|
"mcloud-client": "10701",
|
||||||
//"mcloud-route": "001",
|
//"mcloud-route": "001",
|
||||||
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
"mcloud-sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
||||||
//"mcloud-skey":"",
|
//"mcloud-skey":"",
|
||||||
"mcloud-version": "7.14.0",
|
"mcloud-version": "6.6.0",
|
||||||
"Origin": "https://yun.139.com",
|
"Origin": "https://yun.139.com",
|
||||||
"Referer": "https://yun.139.com/w/",
|
"Referer": "https://yun.139.com/w/",
|
||||||
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
|
"x-DeviceInfo": "||9|6.6.0|chrome|95.0.4638.69|uwIy75obnsRPIwlJSd7D9GhUvFwG96ce||macos 10.15.2||zh-CN|||",
|
||||||
"x-huawei-channelSrc": "10000034",
|
"x-huawei-channelSrc": "10000034",
|
||||||
"x-inner-ntwk": "2",
|
"x-inner-ntwk": "2",
|
||||||
"x-m4c-caller": "PC",
|
"x-m4c-caller": "PC",
|
||||||
"x-m4c-src": "10002",
|
"x-m4c-src": "10002",
|
||||||
"x-SvcType": svcType,
|
"x-SvcType": svcType,
|
||||||
"Inner-Hcy-Router-Https": "1",
|
|
||||||
})
|
})
|
||||||
|
|
||||||
var e BaseResp
|
var e BaseResp
|
||||||
@ -176,7 +124,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
"catalogSortType": 0,
|
"catalogSortType": 0,
|
||||||
"contentSortType": 0,
|
"contentSortType": 0,
|
||||||
"commonAccountInfo": base.Json{
|
"commonAccountInfo": base.Json{
|
||||||
"account": d.getAccount(),
|
"account": d.Account,
|
||||||
"accountType": 1,
|
"accountType": 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -224,7 +172,7 @@ func (d *Yun139) newJson(data map[string]interface{}) base.Json {
|
|||||||
"cloudID": d.CloudID,
|
"cloudID": d.CloudID,
|
||||||
"cloudType": 1,
|
"cloudType": 1,
|
||||||
"commonAccountInfo": base.Json{
|
"commonAccountInfo": base.Json{
|
||||||
"account": d.getAccount(),
|
"account": d.Account,
|
||||||
"accountType": 1,
|
"accountType": 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -245,11 +193,10 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
"sortDirection": 1,
|
"sortDirection": 1,
|
||||||
})
|
})
|
||||||
var resp QueryContentListResp
|
var resp QueryContentListResp
|
||||||
_, err := d.post("/orchestration/familyCloud-rebuild/content/v1.2/queryContentList", data, &resp)
|
_, err := d.post("/orchestration/familyCloud/content/v1.0/queryContentList", data, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
path := resp.Data.Path
|
|
||||||
for _, catalog := range resp.Data.CloudCatalogList {
|
for _, catalog := range resp.Data.CloudCatalogList {
|
||||||
f := model.Object{
|
f := model.Object{
|
||||||
ID: catalog.CatalogID,
|
ID: catalog.CatalogID,
|
||||||
@ -258,7 +205,6 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
Modified: getTime(catalog.LastUpdateTime),
|
Modified: getTime(catalog.LastUpdateTime),
|
||||||
Ctime: getTime(catalog.CreateTime),
|
Ctime: getTime(catalog.CreateTime),
|
||||||
Path: path, // 文件夹上一级的Path
|
|
||||||
}
|
}
|
||||||
files = append(files, &f)
|
files = append(files, &f)
|
||||||
}
|
}
|
||||||
@ -270,14 +216,13 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
Size: content.ContentSize,
|
Size: content.ContentSize,
|
||||||
Modified: getTime(content.LastUpdateTime),
|
Modified: getTime(content.LastUpdateTime),
|
||||||
Ctime: getTime(content.CreateTime),
|
Ctime: getTime(content.CreateTime),
|
||||||
Path: path, // 文件所在目录的Path
|
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||||
//Thumbnail: content.BigthumbnailURL,
|
//Thumbnail: content.BigthumbnailURL,
|
||||||
}
|
}
|
||||||
files = append(files, &f)
|
files = append(files, &f)
|
||||||
}
|
}
|
||||||
if resp.Data.TotalCount == 0 {
|
if 100*pageNum > resp.Data.TotalCount {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
pageNum++
|
pageNum++
|
||||||
@ -285,67 +230,12 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) groupGetFiles(catalogID string) ([]model.Obj, error) {
|
|
||||||
pageNum := 1
|
|
||||||
files := make([]model.Obj, 0)
|
|
||||||
for {
|
|
||||||
data := d.newJson(base.Json{
|
|
||||||
"groupID": d.CloudID,
|
|
||||||
"catalogID": path.Base(catalogID),
|
|
||||||
"contentSortType": 0,
|
|
||||||
"sortDirection": 1,
|
|
||||||
"startNumber": pageNum,
|
|
||||||
"endNumber": pageNum + 99,
|
|
||||||
"path": path.Join(d.RootFolderID, catalogID),
|
|
||||||
})
|
|
||||||
|
|
||||||
var resp QueryGroupContentListResp
|
|
||||||
_, err := d.post("/orchestration/group-rebuild/content/v1.0/queryGroupContentList", data, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
path := resp.Data.GetGroupContentResult.ParentCatalogID
|
|
||||||
for _, catalog := range resp.Data.GetGroupContentResult.CatalogList {
|
|
||||||
f := model.Object{
|
|
||||||
ID: catalog.CatalogID,
|
|
||||||
Name: catalog.CatalogName,
|
|
||||||
Size: 0,
|
|
||||||
IsFolder: true,
|
|
||||||
Modified: getTime(catalog.UpdateTime),
|
|
||||||
Ctime: getTime(catalog.CreateTime),
|
|
||||||
Path: catalog.Path, // 文件夹的真实Path, root:/开头
|
|
||||||
}
|
|
||||||
files = append(files, &f)
|
|
||||||
}
|
|
||||||
for _, content := range resp.Data.GetGroupContentResult.ContentList {
|
|
||||||
f := model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
ID: content.ContentID,
|
|
||||||
Name: content.ContentName,
|
|
||||||
Size: content.ContentSize,
|
|
||||||
Modified: getTime(content.UpdateTime),
|
|
||||||
Ctime: getTime(content.CreateTime),
|
|
||||||
Path: path, // 文件所在目录的Path
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
|
||||||
//Thumbnail: content.BigthumbnailURL,
|
|
||||||
}
|
|
||||||
files = append(files, &f)
|
|
||||||
}
|
|
||||||
if (pageNum + 99) > resp.Data.GetGroupContentResult.NodeCount {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
pageNum = pageNum + 100
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) getLink(contentId string) (string, error) {
|
func (d *Yun139) getLink(contentId string) (string, error) {
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"appName": "",
|
"appName": "",
|
||||||
"contentID": contentId,
|
"contentID": contentId,
|
||||||
"commonAccountInfo": base.Json{
|
"commonAccountInfo": base.Json{
|
||||||
"account": d.getAccount(),
|
"account": d.Account,
|
||||||
"accountType": 1,
|
"accountType": 1,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -356,199 +246,9 @@ func (d *Yun139) getLink(contentId string) (string, error) {
|
|||||||
}
|
}
|
||||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
||||||
}
|
}
|
||||||
func (d *Yun139) familyGetLink(contentId string, path string) (string, error) {
|
|
||||||
data := d.newJson(base.Json{
|
|
||||||
"contentID": contentId,
|
|
||||||
"path": path,
|
|
||||||
})
|
|
||||||
res, err := d.post("/orchestration/familyCloud-rebuild/content/v1.0/getFileDownLoadURL",
|
|
||||||
data, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) groupGetLink(contentId string, path string) (string, error) {
|
|
||||||
data := d.newJson(base.Json{
|
|
||||||
"contentID": contentId,
|
|
||||||
"groupID": d.CloudID,
|
|
||||||
"path": path,
|
|
||||||
})
|
|
||||||
res, err := d.post("/orchestration/group-rebuild/groupManage/v1.0/getGroupFileDownLoadURL",
|
|
||||||
data, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return jsoniter.Get(res, "data", "downloadURL").ToString(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unicode(str string) string {
|
func unicode(str string) string {
|
||||||
textQuoted := strconv.QuoteToASCII(str)
|
textQuoted := strconv.QuoteToASCII(str)
|
||||||
textUnquoted := textQuoted[1 : len(textQuoted)-1]
|
textUnquoted := textQuoted[1 : len(textQuoted)-1]
|
||||||
return textUnquoted
|
return textUnquoted
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) personalRequest(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
|
||||||
url := "https://personal-kd-njs.yun.139.com" + pathname
|
|
||||||
req := base.RestyClient.R()
|
|
||||||
randStr := random.String(16)
|
|
||||||
ts := time.Now().Format("2006-01-02 15:04:05")
|
|
||||||
if callback != nil {
|
|
||||||
callback(req)
|
|
||||||
}
|
|
||||||
body, err := utils.Json.Marshal(req.Body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sign := calSign(string(body), ts, randStr)
|
|
||||||
svcType := "1"
|
|
||||||
if d.isFamily() {
|
|
||||||
svcType = "2"
|
|
||||||
}
|
|
||||||
req.SetHeaders(map[string]string{
|
|
||||||
"Accept": "application/json, text/plain, */*",
|
|
||||||
"Authorization": "Basic " + d.getAuthorization(),
|
|
||||||
"Caller": "web",
|
|
||||||
"Cms-Device": "default",
|
|
||||||
"Mcloud-Channel": "1000101",
|
|
||||||
"Mcloud-Client": "10701",
|
|
||||||
"Mcloud-Route": "001",
|
|
||||||
"Mcloud-Sign": fmt.Sprintf("%s,%s,%s", ts, randStr, sign),
|
|
||||||
"Mcloud-Version": "7.14.0",
|
|
||||||
"Origin": "https://yun.139.com",
|
|
||||||
"Referer": "https://yun.139.com/w/",
|
|
||||||
"x-DeviceInfo": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||",
|
|
||||||
"x-huawei-channelSrc": "10000034",
|
|
||||||
"x-inner-ntwk": "2",
|
|
||||||
"x-m4c-caller": "PC",
|
|
||||||
"x-m4c-src": "10002",
|
|
||||||
"x-SvcType": svcType,
|
|
||||||
"X-Yun-Api-Version": "v1",
|
|
||||||
"X-Yun-App-Channel": "10000034",
|
|
||||||
"X-Yun-Channel-Source": "10000034",
|
|
||||||
"X-Yun-Client-Info": "||9|7.14.0|chrome|120.0.0.0|||windows 10||zh-CN|||dW5kZWZpbmVk||",
|
|
||||||
"X-Yun-Module-Type": "100",
|
|
||||||
"X-Yun-Svc-Type": "1",
|
|
||||||
})
|
|
||||||
|
|
||||||
var e BaseResp
|
|
||||||
req.SetResult(&e)
|
|
||||||
res, err := req.Execute(method, url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
log.Debugln(res.String())
|
|
||||||
if !e.Success {
|
|
||||||
return nil, errors.New(e.Message)
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
err = utils.Json.Unmarshal(res.Body(), resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res.Body(), nil
|
|
||||||
}
|
|
||||||
func (d *Yun139) personalPost(pathname string, data interface{}, resp interface{}) ([]byte, error) {
|
|
||||||
return d.personalRequest(pathname, http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(data)
|
|
||||||
}, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPersonalTime(t string) time.Time {
|
|
||||||
stamp, err := time.ParseInLocation("2006-01-02T15:04:05.999-07:00", t, utils.CNLoc)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return stamp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) personalGetFiles(fileId string) ([]model.Obj, error) {
|
|
||||||
files := make([]model.Obj, 0)
|
|
||||||
nextPageCursor := ""
|
|
||||||
for {
|
|
||||||
data := base.Json{
|
|
||||||
"imageThumbnailStyleList": []string{"Small", "Large"},
|
|
||||||
"orderBy": "updated_at",
|
|
||||||
"orderDirection": "DESC",
|
|
||||||
"pageInfo": base.Json{
|
|
||||||
"pageCursor": nextPageCursor,
|
|
||||||
"pageSize": 100,
|
|
||||||
},
|
|
||||||
"parentFileId": fileId,
|
|
||||||
}
|
|
||||||
var resp PersonalListResp
|
|
||||||
_, err := d.personalPost("/hcy/file/list", data, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
nextPageCursor = resp.Data.NextPageCursor
|
|
||||||
for _, item := range resp.Data.Items {
|
|
||||||
var isFolder = (item.Type == "folder")
|
|
||||||
var f model.Obj
|
|
||||||
if isFolder {
|
|
||||||
f = &model.Object{
|
|
||||||
ID: item.FileId,
|
|
||||||
Name: item.Name,
|
|
||||||
Size: 0,
|
|
||||||
Modified: getPersonalTime(item.UpdatedAt),
|
|
||||||
Ctime: getPersonalTime(item.CreatedAt),
|
|
||||||
IsFolder: isFolder,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var Thumbnails = item.Thumbnails
|
|
||||||
var ThumbnailUrl string
|
|
||||||
if len(Thumbnails) > 0 {
|
|
||||||
ThumbnailUrl = Thumbnails[len(Thumbnails)-1].Url
|
|
||||||
}
|
|
||||||
f = &model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
ID: item.FileId,
|
|
||||||
Name: item.Name,
|
|
||||||
Size: item.Size,
|
|
||||||
Modified: getPersonalTime(item.UpdatedAt),
|
|
||||||
Ctime: getPersonalTime(item.CreatedAt),
|
|
||||||
IsFolder: isFolder,
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: ThumbnailUrl},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
files = append(files, f)
|
|
||||||
}
|
|
||||||
if len(nextPageCursor) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) personalGetLink(fileId string) (string, error) {
|
|
||||||
data := base.Json{
|
|
||||||
"fileId": fileId,
|
|
||||||
}
|
|
||||||
res, err := d.personalPost("/hcy/file/getDownloadUrl",
|
|
||||||
data, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
var cdnUrl = jsoniter.Get(res, "data", "cdnUrl").ToString()
|
|
||||||
if cdnUrl != "" {
|
|
||||||
return cdnUrl, nil
|
|
||||||
} else {
|
|
||||||
return jsoniter.Get(res, "data", "url").ToString(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Yun139) getAuthorization() string {
|
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.getAuthorization()
|
|
||||||
}
|
|
||||||
return d.Authorization
|
|
||||||
}
|
|
||||||
func (d *Yun139) getAccount() string {
|
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.getAccount()
|
|
||||||
}
|
|
||||||
return d.Account
|
|
||||||
}
|
|
||||||
|
@ -365,7 +365,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
log.Debugf("uploadData: %+v", uploadData)
|
log.Debugf("uploadData: %+v", uploadData)
|
||||||
requestURL := uploadData.RequestURL
|
requestURL := uploadData.RequestURL
|
||||||
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
uploadHeaders := strings.Split(decodeURIComponent(uploadData.RequestHeader), "&")
|
||||||
req, err := http.NewRequest(http.MethodPut, requestURL, driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)))
|
req, err := http.NewRequest(http.MethodPut, requestURL, bytes.NewReader(byteData))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -375,11 +375,11 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
req.Header.Set(v[0:i], v[i+1:])
|
req.Header.Set(v[0:i], v[i+1:])
|
||||||
}
|
}
|
||||||
r, err := base.HttpClient.Do(req)
|
r, err := base.HttpClient.Do(req)
|
||||||
|
log.Debugf("%+v %+v", r, r.Request.Header)
|
||||||
|
r.Body.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("%+v %+v", r, r.Request.Header)
|
|
||||||
_ = r.Body.Close()
|
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||||
|
@ -2,7 +2,6 @@ package _189pc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -14,7 +13,6 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Cloud189PC struct {
|
type Cloud189PC struct {
|
||||||
@ -30,11 +28,7 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
|
|
||||||
familyTransferFolder *Cloud189Folder
|
|
||||||
cleanFamilyTransferFile func()
|
|
||||||
|
|
||||||
storageConfig driver.Config
|
storageConfig driver.Config
|
||||||
ref *Cloud189PC
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Config() driver.Config {
|
func (y *Cloud189PC) Config() driver.Config {
|
||||||
@ -49,24 +43,16 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||||
y.storageConfig = config
|
// 兼容旧上传接口
|
||||||
if y.isFamily() {
|
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
||||||
// 兼容旧上传接口
|
|
||||||
if y.Addition.RapidUpload || y.Addition.UploadMethod == "old" {
|
|
||||||
y.storageConfig.NoOverwriteUpload = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// 家庭云转存,不支持覆盖上传
|
|
||||||
if y.Addition.FamilyTransfer {
|
|
||||||
y.storageConfig.NoOverwriteUpload = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// 处理个人云和家庭云参数
|
// 处理个人云和家庭云参数
|
||||||
if y.isFamily() && y.RootFolderID == "-11" {
|
if y.isFamily() && y.RootFolderID == "-11" {
|
||||||
y.RootFolderID = ""
|
y.RootFolderID = ""
|
||||||
}
|
}
|
||||||
if !y.isFamily() && y.RootFolderID == "" {
|
if !y.isFamily() && y.RootFolderID == "" {
|
||||||
y.RootFolderID = "-11"
|
y.RootFolderID = "-11"
|
||||||
|
y.FamilyID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// 限制上传线程数
|
// 限制上传线程数
|
||||||
@ -75,64 +61,38 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
y.uploadThread, y.UploadThread = 3, "3"
|
y.uploadThread, y.UploadThread = 3, "3"
|
||||||
}
|
}
|
||||||
|
|
||||||
if y.ref == nil {
|
// 初始化请求客户端
|
||||||
// 初始化请求客户端
|
if y.client == nil {
|
||||||
if y.client == nil {
|
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
"Accept": "application/json;charset=UTF-8",
|
||||||
"Accept": "application/json;charset=UTF-8",
|
"Referer": WEB_URL,
|
||||||
"Referer": WEB_URL,
|
})
|
||||||
})
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// 避免重复登陆
|
// 避免重复登陆
|
||||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||||
if !y.isLogin() || y.identity != identity {
|
if !y.isLogin() || y.identity != identity {
|
||||||
y.identity = identity
|
y.identity = identity
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
return
|
return
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 处理家庭云ID
|
// 处理家庭云ID
|
||||||
if y.FamilyID == "" {
|
if y.isFamily() && y.FamilyID == "" {
|
||||||
if y.FamilyID, err = y.getFamilyID(); err != nil {
|
if y.FamilyID, err = y.getFamilyID(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建中转文件夹
|
|
||||||
if y.FamilyTransfer {
|
|
||||||
if err := y.createFamilyTransferFolder(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 清理转存文件节流
|
|
||||||
y.cleanFamilyTransferFile = utils.NewThrottle2(time.Minute, func() {
|
|
||||||
if err := y.cleanFamilyTransfer(context.TODO()); err != nil {
|
|
||||||
utils.Log.Errorf("cleanFamilyTransferFolderError:%s", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloud189PC) InitReference(storage driver.Driver) error {
|
|
||||||
refStorage, ok := storage.(*Cloud189PC)
|
|
||||||
if ok {
|
|
||||||
d.ref = refStorage
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
func (y *Cloud189PC) Drop(ctx context.Context) error {
|
||||||
y.ref = nil
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (y *Cloud189PC) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
return y.getFiles(ctx, dir.GetID(), y.isFamily())
|
return y.getFiles(ctx, dir.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
@ -140,9 +100,8 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
URL string `json:"fileDownloadUrl"`
|
URL string `json:"fileDownloadUrl"`
|
||||||
}
|
}
|
||||||
|
|
||||||
isFamily := y.isFamily()
|
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl += "/family/file"
|
fullUrl += "/family/file"
|
||||||
}
|
}
|
||||||
fullUrl += "/getFileDownloadUrl.action"
|
fullUrl += "/getFileDownloadUrl.action"
|
||||||
@ -150,7 +109,7 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
_, err := y.get(fullUrl, func(r *resty.Request) {
|
_, err := y.get(fullUrl, func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetQueryParam("fileId", file.GetID())
|
r.SetQueryParam("fileId", file.GetID())
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
r.SetQueryParams(map[string]string{
|
r.SetQueryParams(map[string]string{
|
||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
@ -160,7 +119,7 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
"flag": "1",
|
"flag": "1",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &downloadUrl, isFamily)
|
}, &downloadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -197,9 +156,8 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
isFamily := y.isFamily()
|
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl += "/family/file"
|
fullUrl += "/family/file"
|
||||||
}
|
}
|
||||||
fullUrl += "/createFolder.action"
|
fullUrl += "/createFolder.action"
|
||||||
@ -211,7 +169,7 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
|||||||
"folderName": dirName,
|
"folderName": dirName,
|
||||||
"relativePath": "",
|
"relativePath": "",
|
||||||
})
|
})
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
"parentId": parentDir.GetID(),
|
"parentId": parentDir.GetID(),
|
||||||
@ -221,7 +179,7 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
|||||||
"parentFolderId": parentDir.GetID(),
|
"parentFolderId": parentDir.GetID(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &newFolder, isFamily)
|
}, &newFolder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -229,14 +187,27 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
isFamily := y.isFamily()
|
var resp CreateBatchTaskResp
|
||||||
other := map[string]string{"targetFileName": dstDir.GetName()}
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
resp, err := y.CreateBatchTask("MOVE", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, BatchTaskInfo{
|
req.SetFormData(map[string]string{
|
||||||
FileId: srcObj.GetID(),
|
"type": "MOVE",
|
||||||
FileName: srcObj.GetName(),
|
"taskInfos": MustString(utils.Json.MarshalToString(
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
[]BatchTaskInfo{
|
||||||
})
|
{
|
||||||
|
FileId: srcObj.GetID(),
|
||||||
|
FileName: srcObj.GetName(),
|
||||||
|
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||||
|
},
|
||||||
|
})),
|
||||||
|
"targetFolderId": dstDir.GetID(),
|
||||||
|
})
|
||||||
|
if y.isFamily() {
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"familyId": y.FamilyID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -247,11 +218,10 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
isFamily := y.isFamily()
|
|
||||||
queryParam := make(map[string]string)
|
queryParam := make(map[string]string)
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
method := http.MethodPost
|
method := http.MethodPost
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl += "/family/file"
|
fullUrl += "/family/file"
|
||||||
method = http.MethodGet
|
method = http.MethodGet
|
||||||
queryParam["familyId"] = y.FamilyID
|
queryParam["familyId"] = y.FamilyID
|
||||||
@ -275,7 +245,7 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
|||||||
|
|
||||||
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
||||||
req.SetContext(ctx).SetQueryParams(queryParam)
|
req.SetContext(ctx).SetQueryParams(queryParam)
|
||||||
}, nil, newObj, isFamily)
|
}, nil, newObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -283,15 +253,28 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
isFamily := y.isFamily()
|
var resp CreateBatchTaskResp
|
||||||
other := map[string]string{"targetFileName": dstDir.GetName()}
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
resp, err := y.CreateBatchTask("COPY", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, BatchTaskInfo{
|
req.SetFormData(map[string]string{
|
||||||
FileId: srcObj.GetID(),
|
"type": "COPY",
|
||||||
FileName: srcObj.GetName(),
|
"taskInfos": MustString(utils.Json.MarshalToString(
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
[]BatchTaskInfo{
|
||||||
})
|
{
|
||||||
|
FileId: srcObj.GetID(),
|
||||||
|
FileName: srcObj.GetName(),
|
||||||
|
IsFolder: BoolToNumber(srcObj.IsDir()),
|
||||||
|
},
|
||||||
|
})),
|
||||||
|
"targetFolderId": dstDir.GetID(),
|
||||||
|
"targetFileName": dstDir.GetName(),
|
||||||
|
})
|
||||||
|
if y.isFamily() {
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"familyId": y.FamilyID,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -299,13 +282,27 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
isFamily := y.isFamily()
|
var resp CreateBatchTaskResp
|
||||||
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"type": "DELETE",
|
||||||
|
"taskInfos": MustString(utils.Json.MarshalToString(
|
||||||
|
[]*BatchTaskInfo{
|
||||||
|
{
|
||||||
|
FileId: obj.GetID(),
|
||||||
|
FileName: obj.GetName(),
|
||||||
|
IsFolder: BoolToNumber(obj.IsDir()),
|
||||||
|
},
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
|
||||||
resp, err := y.CreateBatchTask("DELETE", IF(isFamily, y.FamilyID, ""), "", nil, BatchTaskInfo{
|
if y.isFamily() {
|
||||||
FileId: obj.GetID(),
|
req.SetFormData(map[string]string{
|
||||||
FileName: obj.GetName(),
|
"familyId": y.FamilyID,
|
||||||
IsFolder: BoolToNumber(obj.IsDir()),
|
})
|
||||||
})
|
}
|
||||||
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -313,87 +310,25 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
|
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (newObj model.Obj, err error) {
|
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
overwrite := true
|
|
||||||
isFamily := y.isFamily()
|
|
||||||
|
|
||||||
// 响应时间长,按需启用
|
// 响应时间长,按需启用
|
||||||
if y.Addition.RapidUpload && !stream.IsForceStreamUpload() {
|
if y.Addition.RapidUpload {
|
||||||
if newObj, err := y.RapidUpload(ctx, dstDir, stream, isFamily, overwrite); err == nil {
|
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
|
||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadMethod := y.UploadMethod
|
switch y.UploadMethod {
|
||||||
if stream.IsForceStreamUpload() {
|
case "old":
|
||||||
uploadMethod = "stream"
|
return y.OldUpload(ctx, dstDir, stream, up)
|
||||||
}
|
|
||||||
|
|
||||||
// 旧版上传家庭云也有限制
|
|
||||||
if uploadMethod == "old" {
|
|
||||||
return y.OldUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 开启家庭云转存
|
|
||||||
if !isFamily && y.FamilyTransfer {
|
|
||||||
// 修改上传目标为家庭云文件夹
|
|
||||||
transferDstDir := dstDir
|
|
||||||
dstDir = y.familyTransferFolder
|
|
||||||
|
|
||||||
// 使用临时文件名
|
|
||||||
srcName := stream.GetName()
|
|
||||||
stream = &WrapFileStreamer{
|
|
||||||
FileStreamer: stream,
|
|
||||||
Name: fmt.Sprintf("0%s.transfer", uuid.NewString()),
|
|
||||||
}
|
|
||||||
|
|
||||||
// 使用家庭云上传
|
|
||||||
isFamily = true
|
|
||||||
overwrite = false
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if newObj != nil {
|
|
||||||
// 转存家庭云文件到个人云
|
|
||||||
err = y.SaveFamilyFileToPersonCloud(context.TODO(), y.FamilyID, newObj, transferDstDir, true)
|
|
||||||
// 删除家庭云源文件
|
|
||||||
go y.Delete(context.TODO(), y.FamilyID, newObj)
|
|
||||||
// 批量任务有概率删不掉
|
|
||||||
go y.cleanFamilyTransferFile()
|
|
||||||
// 转存失败返回错误
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 查找转存文件
|
|
||||||
var file *Cloud189File
|
|
||||||
file, err = y.findFileByName(context.TODO(), newObj.GetName(), transferDstDir.GetID(), false)
|
|
||||||
if err != nil {
|
|
||||||
if err == errs.ObjectNotFound {
|
|
||||||
err = fmt.Errorf("unknown error: No transfer file obtained %s", newObj.GetName())
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 重命名转存文件
|
|
||||||
newObj, err = y.Rename(context.TODO(), file, srcName)
|
|
||||||
if err != nil {
|
|
||||||
// 重命名失败删除源文件
|
|
||||||
_ = y.Delete(context.TODO(), "", file)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch uploadMethod {
|
|
||||||
case "rapid":
|
case "rapid":
|
||||||
return y.FastUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
case "stream":
|
case "stream":
|
||||||
if stream.GetSize() == 0 {
|
if stream.GetSize() == 0 {
|
||||||
return y.FastUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
default:
|
default:
|
||||||
return y.StreamUpload(ctx, dstDir, stream, up, isFamily, overwrite)
|
return y.StreamUpload(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -193,28 +192,3 @@ func partSize(size int64) int64 {
|
|||||||
}
|
}
|
||||||
return DEFAULT
|
return DEFAULT
|
||||||
}
|
}
|
||||||
|
|
||||||
func isBool(bs ...bool) bool {
|
|
||||||
for _, b := range bs {
|
|
||||||
if b {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func IF[V any](o bool, t V, f V) V {
|
|
||||||
if o {
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
type WrapFileStreamer struct {
|
|
||||||
model.FileStreamer
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *WrapFileStreamer) GetName() string {
|
|
||||||
return w.Name
|
|
||||||
}
|
|
||||||
|
@ -16,7 +16,6 @@ type Addition struct {
|
|||||||
FamilyID string `json:"family_id"`
|
FamilyID string `json:"family_id"`
|
||||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
FamilyTransfer bool `json:"family_transfer"`
|
|
||||||
RapidUpload bool `json:"rapid_upload"`
|
RapidUpload bool `json:"rapid_upload"`
|
||||||
NoUseOcr bool `json:"no_use_ocr"`
|
NoUseOcr bool `json:"no_use_ocr"`
|
||||||
}
|
}
|
||||||
|
@ -3,11 +3,10 @@ package _189pc
|
|||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// 居然有四种返回方式
|
// 居然有四种返回方式
|
||||||
@ -143,7 +142,7 @@ type FamilyInfoListResp struct {
|
|||||||
type FamilyInfoResp struct {
|
type FamilyInfoResp struct {
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
CreateTime string `json:"createTime"`
|
CreateTime string `json:"createTime"`
|
||||||
FamilyID int64 `json:"familyId"`
|
FamilyID int `json:"familyId"`
|
||||||
RemarkName string `json:"remarkName"`
|
RemarkName string `json:"remarkName"`
|
||||||
Type int `json:"type"`
|
Type int `json:"type"`
|
||||||
UseFlag int `json:"useFlag"`
|
UseFlag int `json:"useFlag"`
|
||||||
@ -243,12 +242,7 @@ type BatchTaskInfo struct {
|
|||||||
// IsFolder 是否是文件夹,0-否,1-是
|
// IsFolder 是否是文件夹,0-否,1-是
|
||||||
IsFolder int `json:"isFolder"`
|
IsFolder int `json:"isFolder"`
|
||||||
// SrcParentId 文件所在父目录ID
|
// SrcParentId 文件所在父目录ID
|
||||||
SrcParentId string `json:"srcParentId,omitempty"`
|
//SrcParentId string `json:"srcParentId"`
|
||||||
|
|
||||||
/* 冲突管理 */
|
|
||||||
// 1 -> 跳过 2 -> 保留 3 -> 覆盖
|
|
||||||
DealWay int `json:"dealWay,omitempty"`
|
|
||||||
IsConflict int `json:"isConflict,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 上传部分 */
|
/* 上传部分 */
|
||||||
@ -361,14 +355,6 @@ type BatchTaskStateResp struct {
|
|||||||
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中,4 完成
|
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中,4 完成
|
||||||
}
|
}
|
||||||
|
|
||||||
type BatchTaskConflictTaskInfoResp struct {
|
|
||||||
SessionKey string `json:"sessionKey"`
|
|
||||||
TargetFolderID int `json:"targetFolderId"`
|
|
||||||
TaskID string `json:"taskId"`
|
|
||||||
TaskInfos []BatchTaskInfo
|
|
||||||
TaskType int `json:"taskType"`
|
|
||||||
}
|
|
||||||
|
|
||||||
/* query 加密参数*/
|
/* query 加密参数*/
|
||||||
type Params map[string]string
|
type Params map[string]string
|
||||||
|
|
||||||
|
@ -19,12 +19,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/sync/semaphore"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
@ -57,13 +54,13 @@ const (
|
|||||||
CHANNEL_ID = "web_cloud.189.cn"
|
CHANNEL_ID = "web_cloud.189.cn"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string {
|
func (y *Cloud189PC) SignatureHeader(url, method, params string) map[string]string {
|
||||||
dateOfGmt := getHttpDateStr()
|
dateOfGmt := getHttpDateStr()
|
||||||
sessionKey := y.getTokenInfo().SessionKey
|
sessionKey := y.tokenInfo.SessionKey
|
||||||
sessionSecret := y.getTokenInfo().SessionSecret
|
sessionSecret := y.tokenInfo.SessionSecret
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
sessionKey = y.getTokenInfo().FamilySessionKey
|
sessionKey = y.tokenInfo.FamilySessionKey
|
||||||
sessionSecret = y.getTokenInfo().FamilySessionSecret
|
sessionSecret = y.tokenInfo.FamilySessionSecret
|
||||||
}
|
}
|
||||||
|
|
||||||
header := map[string]string{
|
header := map[string]string{
|
||||||
@ -75,10 +72,10 @@ func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool)
|
|||||||
return header
|
return header
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
|
func (y *Cloud189PC) EncryptParams(params Params) string {
|
||||||
sessionSecret := y.getTokenInfo().SessionSecret
|
sessionSecret := y.tokenInfo.SessionSecret
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
sessionSecret = y.getTokenInfo().FamilySessionSecret
|
sessionSecret = y.tokenInfo.FamilySessionSecret
|
||||||
}
|
}
|
||||||
if params != nil {
|
if params != nil {
|
||||||
return AesECBEncrypt(params.Encode(), sessionSecret[:16])
|
return AesECBEncrypt(params.Encode(), sessionSecret[:16])
|
||||||
@ -86,17 +83,17 @@ func (y *Cloud189PC) EncryptParams(params Params, isFamily bool) string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, params Params, resp interface{}) ([]byte, error) {
|
||||||
req := y.getClient().R().SetQueryParams(clientSuffix())
|
req := y.client.R().SetQueryParams(clientSuffix())
|
||||||
|
|
||||||
// 设置params
|
// 设置params
|
||||||
paramsData := y.EncryptParams(params, isBool(isFamily...))
|
paramsData := y.EncryptParams(params)
|
||||||
if paramsData != "" {
|
if paramsData != "" {
|
||||||
req.SetQueryParam("params", paramsData)
|
req.SetQueryParam("params", paramsData)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Signature
|
// Signature
|
||||||
req.SetHeaders(y.SignatureHeader(url, method, paramsData, isBool(isFamily...)))
|
req.SetHeaders(y.SignatureHeader(url, method, paramsData))
|
||||||
|
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
req.SetError(&erron)
|
req.SetError(&erron)
|
||||||
@ -116,33 +113,31 @@ func (y *Cloud189PC) request(url, method string, callback base.ReqCallback, para
|
|||||||
if err = y.refreshSession(); err != nil {
|
if err = y.refreshSession(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return y.request(url, method, callback, params, resp, isFamily...)
|
return y.request(url, method, callback, params, resp)
|
||||||
}
|
|
||||||
|
|
||||||
// if erron.ErrorCode == "InvalidSessionKey" || erron.Code == "InvalidSessionKey" {
|
|
||||||
if strings.Contains(res.String(), "InvalidSessionKey") {
|
|
||||||
if err = y.refreshSession(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return y.request(url, method, callback, params, resp, isFamily...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 处理错误
|
// 处理错误
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
|
if erron.ErrorCode == "InvalidSessionKey" {
|
||||||
|
if err = y.refreshSession(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return y.request(url, method, callback, params, resp)
|
||||||
|
}
|
||||||
return nil, &erron
|
return nil, &erron
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) get(url string, callback base.ReqCallback, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189PC) get(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
return y.request(url, http.MethodGet, callback, nil, resp, isFamily...)
|
return y.request(url, http.MethodGet, callback, nil, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) post(url string, callback base.ReqCallback, resp interface{}, isFamily ...bool) ([]byte, error) {
|
func (y *Cloud189PC) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
return y.request(url, http.MethodPost, callback, nil, resp, isFamily...)
|
return y.request(url, http.MethodPost, callback, nil, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]string, sign bool, file io.Reader, isFamily bool) ([]byte, error) {
|
func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]string, sign bool, file io.Reader) ([]byte, error) {
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, file)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -159,7 +154,7 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
|
|
||||||
if sign {
|
if sign {
|
||||||
for key, value := range y.SignatureHeader(url, http.MethodPut, "", isFamily) {
|
for key, value := range y.SignatureHeader(url, http.MethodPut, "") {
|
||||||
req.Header.Add(key, value)
|
req.Header.Add(key, value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -176,8 +171,8 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
|
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
_ = jsoniter.Unmarshal(body, &erron)
|
jsoniter.Unmarshal(body, &erron)
|
||||||
_ = xml.Unmarshal(body, &erron)
|
xml.Unmarshal(body, &erron)
|
||||||
if erron.HasError() {
|
if erron.HasError() {
|
||||||
return nil, &erron
|
return nil, &erron
|
||||||
}
|
}
|
||||||
@ -186,10 +181,40 @@ func (y *Cloud189PC) put(ctx context.Context, url string, headers map[string]str
|
|||||||
}
|
}
|
||||||
return body, nil
|
return body, nil
|
||||||
}
|
}
|
||||||
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool) ([]model.Obj, error) {
|
func (y *Cloud189PC) getFiles(ctx context.Context, fileId string) ([]model.Obj, error) {
|
||||||
res := make([]model.Obj, 0, 100)
|
fullUrl := API_URL
|
||||||
|
if y.isFamily() {
|
||||||
|
fullUrl += "/family/file"
|
||||||
|
}
|
||||||
|
fullUrl += "/listFiles.action"
|
||||||
|
|
||||||
|
res := make([]model.Obj, 0, 130)
|
||||||
for pageNum := 1; ; pageNum++ {
|
for pageNum := 1; ; pageNum++ {
|
||||||
resp, err := y.getFilesWithPage(ctx, fileId, isFamily, pageNum, 1000, y.OrderBy, y.OrderDirection)
|
var resp Cloud189FilesResp
|
||||||
|
_, err := y.get(fullUrl, func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"folderId": fileId,
|
||||||
|
"fileType": "0",
|
||||||
|
"mediaAttr": "0",
|
||||||
|
"iconOption": "5",
|
||||||
|
"pageNum": fmt.Sprint(pageNum),
|
||||||
|
"pageSize": "130",
|
||||||
|
})
|
||||||
|
if y.isFamily() {
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"familyId": y.FamilyID,
|
||||||
|
"orderBy": toFamilyOrderBy(y.OrderBy),
|
||||||
|
"descending": toDesc(y.OrderDirection),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"recursive": "0",
|
||||||
|
"orderBy": y.OrderBy,
|
||||||
|
"descending": toDesc(y.OrderDirection),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -208,63 +233,6 @@ func (y *Cloud189PC) getFiles(ctx context.Context, fileId string, isFamily bool)
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) getFilesWithPage(ctx context.Context, fileId string, isFamily bool, pageNum int, pageSize int, orderBy string, orderDirection string) (*Cloud189FilesResp, error) {
|
|
||||||
fullUrl := API_URL
|
|
||||||
if isFamily {
|
|
||||||
fullUrl += "/family/file"
|
|
||||||
}
|
|
||||||
fullUrl += "/listFiles.action"
|
|
||||||
|
|
||||||
var resp Cloud189FilesResp
|
|
||||||
_, err := y.get(fullUrl, func(r *resty.Request) {
|
|
||||||
r.SetContext(ctx)
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"folderId": fileId,
|
|
||||||
"fileType": "0",
|
|
||||||
"mediaAttr": "0",
|
|
||||||
"iconOption": "5",
|
|
||||||
"pageNum": fmt.Sprint(pageNum),
|
|
||||||
"pageSize": fmt.Sprint(pageSize),
|
|
||||||
})
|
|
||||||
if isFamily {
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"familyId": y.FamilyID,
|
|
||||||
"orderBy": toFamilyOrderBy(orderBy),
|
|
||||||
"descending": toDesc(orderDirection),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
r.SetQueryParams(map[string]string{
|
|
||||||
"recursive": "0",
|
|
||||||
"orderBy": orderBy,
|
|
||||||
"descending": toDesc(orderDirection),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}, &resp, isFamily)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, folderId string, isFamily bool) (*Cloud189File, error) {
|
|
||||||
for pageNum := 1; ; pageNum++ {
|
|
||||||
resp, err := y.getFilesWithPage(ctx, folderId, isFamily, pageNum, 10, "filename", "asc")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// 获取完毕跳出
|
|
||||||
if resp.FileListAO.Count == 0 {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
|
||||||
file := resp.FileListAO.FileList[i]
|
|
||||||
if file.Name == searchName {
|
|
||||||
return &file, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (y *Cloud189PC) login() (err error) {
|
func (y *Cloud189PC) login() (err error) {
|
||||||
// 初始化登陆所需参数
|
// 初始化登陆所需参数
|
||||||
if y.loginParam == nil {
|
if y.loginParam == nil {
|
||||||
@ -432,9 +400,6 @@ func (y *Cloud189PC) initLoginParam() error {
|
|||||||
|
|
||||||
// 刷新会话
|
// 刷新会话
|
||||||
func (y *Cloud189PC) refreshSession() (err error) {
|
func (y *Cloud189PC) refreshSession() (err error) {
|
||||||
if y.ref != nil {
|
|
||||||
return y.ref.refreshSession()
|
|
||||||
}
|
|
||||||
var erron RespErr
|
var erron RespErr
|
||||||
var userSessionResp UserSessionResp
|
var userSessionResp UserSessionResp
|
||||||
_, err = y.client.R().
|
_, err = y.client.R().
|
||||||
@ -472,7 +437,7 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
|
|
||||||
// 普通上传
|
// 普通上传
|
||||||
// 无法上传大小为0的文件
|
// 无法上传大小为0的文件
|
||||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var sliceSize = partSize(file.GetSize())
|
var sliceSize = partSize(file.GetSize())
|
||||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||||
lastPartSize := file.GetSize() % sliceSize
|
lastPartSize := file.GetSize() % sliceSize
|
||||||
@ -489,7 +454,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
params.Set("familyId", y.FamilyID)
|
params.Set("familyId", y.FamilyID)
|
||||||
fullUrl += "/family"
|
fullUrl += "/family"
|
||||||
} else {
|
} else {
|
||||||
@ -501,7 +466,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
var initMultiUpload InitMultiUploadResp
|
var initMultiUpload InitMultiUploadResp
|
||||||
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, params, &initMultiUpload, isFamily)
|
}, params, &initMultiUpload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -510,7 +475,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
sem := semaphore.NewWeighted(3)
|
|
||||||
|
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
@ -520,6 +484,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
byteData := make([]byte, sliceSize)
|
byteData := make([]byte, sliceSize)
|
||||||
if i == count {
|
if i == count {
|
||||||
byteData = byteData[:lastPartSize]
|
byteData = byteData[:lastPartSize]
|
||||||
@ -528,7 +493,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
// 读取块
|
// 读取块
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||||
sem.Release(1)
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -538,19 +502,14 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sem.Release(1)
|
|
||||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
uploadUrl := uploadUrls[0]
|
uploadUrl := uploadUrls[0]
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false,
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||||
driver.NewLimitedUploadStream(ctx, bytes.NewReader(byteData)), isFamily)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -579,21 +538,21 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
"sliceMd5": sliceMd5Hex,
|
"sliceMd5": sliceMd5Hex,
|
||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": IF(overwrite, "3", "1"),
|
"opertype": "3",
|
||||||
}, &resp, isFamily)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return resp.toFile(), nil
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||||
if len(fileMd5) < utils.MD5.Width {
|
if len(fileMd5) < utils.MD5.Width {
|
||||||
return nil, errors.New("invalid hash")
|
return nil, errors.New("invalid hash")
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()), isFamily)
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -602,11 +561,11 @@ func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
return nil, errors.New("rapid upload fail")
|
return nil, errors.New("rapid upload fail")
|
||||||
}
|
}
|
||||||
|
|
||||||
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId, isFamily, overwrite)
|
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 快传
|
// 快传
|
||||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -635,7 +594,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
md5Byte := silceMd5.Sum(nil)
|
md5Byte := silceMd5.Sum(nil)
|
||||||
@ -650,7 +609,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl += "/family"
|
fullUrl += "/family"
|
||||||
} else {
|
} else {
|
||||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||||
@ -658,7 +617,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 尝试恢复进度
|
// 尝试恢复进度
|
||||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.getTokenInfo().SessionKey, fileMd5Hex)
|
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
if !ok {
|
if !ok {
|
||||||
//step.2 预上传
|
//step.2 预上传
|
||||||
params := Params{
|
params := Params{
|
||||||
@ -669,13 +628,13 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
"sliceSize": fmt.Sprint(sliceSize),
|
"sliceSize": fmt.Sprint(sliceSize),
|
||||||
"sliceMd5": sliceMd5Hex,
|
"sliceMd5": sliceMd5Hex,
|
||||||
}
|
}
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
params.Set("familyId", y.FamilyID)
|
params.Set("familyId", y.FamilyID)
|
||||||
}
|
}
|
||||||
var uploadInfo InitMultiUploadResp
|
var uploadInfo InitMultiUploadResp
|
||||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, params, &uploadInfo, isFamily)
|
}, params, &uploadInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -700,7 +659,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
i, uploadPart := i, uploadPart
|
i, uploadPart := i, uploadPart
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
// step.3 获取上传链接
|
// step.3 获取上传链接
|
||||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, uploadInfo.UploadFileID, uploadPart)
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -712,7 +671,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
// step.4 上传切片
|
// step.4 上传切片
|
||||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize), isFamily)
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -725,7 +684,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
if err = threadG.Wait(); err != nil {
|
if err = threadG.Wait(); err != nil {
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||||
base.SaveUploadProgress(y, uploadProgress, y.getTokenInfo().SessionKey, fileMd5Hex)
|
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -739,8 +698,8 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
}, Params{
|
}, Params{
|
||||||
"uploadFileId": uploadInfo.UploadFileID,
|
"uploadFileId": uploadInfo.UploadFileID,
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": IF(overwrite, "3", "1"),
|
"opertype": "3",
|
||||||
}, &resp, isFamily)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -749,9 +708,9 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
|||||||
|
|
||||||
// 获取上传切片信息
|
// 获取上传切片信息
|
||||||
// 对http body有大小限制,分片信息太多会出错
|
// 对http body有大小限制,分片信息太多会出错
|
||||||
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl += "/family"
|
fullUrl += "/family"
|
||||||
} else {
|
} else {
|
||||||
fullUrl += "/person"
|
fullUrl += "/person"
|
||||||
@ -764,7 +723,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
|||||||
}, Params{
|
}, Params{
|
||||||
"uploadFileId": uploadFileId,
|
"uploadFileId": uploadFileId,
|
||||||
"partInfo": strings.Join(partInfo, ","),
|
"partInfo": strings.Join(partInfo, ","),
|
||||||
}, &uploadUrlsResp, isFamily)
|
}, &uploadUrlsResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -793,7 +752,7 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, isFamily bool, uplo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 旧版本上传,家庭云不支持覆盖
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
tempFile, err := file.CacheFullInTempFile()
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -802,10 +761,9 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rateLimited := driver.NewLimitedUploadStream(ctx, io.NopCloser(tempFile))
|
|
||||||
|
|
||||||
// 创建上传会话
|
// 创建上传会话
|
||||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()), isFamily)
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -822,14 +780,14 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
"Expect": "100-continue",
|
"Expect": "100-continue",
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
header["FamilyId"] = fmt.Sprint(y.FamilyID)
|
header["FamilyId"] = fmt.Sprint(y.FamilyID)
|
||||||
header["UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
} else {
|
} else {
|
||||||
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
header["Edrive-UploadFileId"] = fmt.Sprint(status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, rateLimited, isFamily)
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -844,10 +802,10 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
"uploadFileId": fmt.Sprint(status.UploadFileId),
|
"uploadFileId": fmt.Sprint(status.UploadFileId),
|
||||||
"resumePolicy": "1",
|
"resumePolicy": "1",
|
||||||
})
|
})
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
req.SetQueryParam("familyId", fmt.Sprint(y.FamilyID))
|
req.SetQueryParam("familyId", fmt.Sprint(y.FamilyID))
|
||||||
}
|
}
|
||||||
}, &status, isFamily)
|
}, &status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -857,20 +815,20 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
|
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
|
||||||
}
|
}
|
||||||
|
|
||||||
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId, isFamily, overwrite)
|
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建上传会话
|
// 创建上传会话
|
||||||
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string, isFamily bool) (*CreateUploadFileResp, error) {
|
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
|
||||||
var uploadInfo CreateUploadFileResp
|
var uploadInfo CreateUploadFileResp
|
||||||
|
|
||||||
fullUrl := API_URL + "/createUploadFile.action"
|
fullUrl := API_URL + "/createUploadFile.action"
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||||
}
|
}
|
||||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
"parentId": parentID,
|
"parentId": parentID,
|
||||||
@ -891,7 +849,7 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
|
|||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &uploadInfo, isFamily)
|
}, &uploadInfo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -900,11 +858,11 @@ func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 提交上传文件
|
// 提交上传文件
|
||||||
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64, isFamily bool, overwrite bool) (model.Obj, error) {
|
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
|
||||||
var resp OldCommitUploadFileResp
|
var resp OldCommitUploadFileResp
|
||||||
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
|
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if isFamily {
|
if y.isFamily() {
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"ResumePolicy": "1",
|
"ResumePolicy": "1",
|
||||||
"UploadFileId": fmt.Sprint(uploadFileID),
|
"UploadFileId": fmt.Sprint(uploadFileID),
|
||||||
@ -912,13 +870,13 @@ func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string,
|
|||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"opertype": IF(overwrite, "3", "1"),
|
"opertype": "3",
|
||||||
"resumePolicy": "1",
|
"resumePolicy": "1",
|
||||||
"uploadFileId": fmt.Sprint(uploadFileID),
|
"uploadFileId": fmt.Sprint(uploadFileID),
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &resp, isFamily)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -937,79 +895,10 @@ func (y *Cloud189PC) isLogin() bool {
|
|||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 创建家庭云中转文件夹
|
|
||||||
func (y *Cloud189PC) createFamilyTransferFolder() error {
|
|
||||||
var rootFolder Cloud189Folder
|
|
||||||
_, err := y.post(API_URL+"/family/file/createFolder.action", func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(map[string]string{
|
|
||||||
"folderName": "FamilyTransferFolder",
|
|
||||||
"familyId": y.FamilyID,
|
|
||||||
})
|
|
||||||
}, &rootFolder, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
y.familyTransferFolder = &rootFolder
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 清理中转文件夹
|
|
||||||
func (y *Cloud189PC) cleanFamilyTransfer(ctx context.Context) error {
|
|
||||||
transferFolderId := y.familyTransferFolder.GetID()
|
|
||||||
for pageNum := 1; ; pageNum++ {
|
|
||||||
resp, err := y.getFilesWithPage(ctx, transferFolderId, true, pageNum, 100, "lastOpTime", "asc")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 获取完毕跳出
|
|
||||||
if resp.FileListAO.Count == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var tasks []BatchTaskInfo
|
|
||||||
for i := 0; i < len(resp.FileListAO.FolderList); i++ {
|
|
||||||
folder := resp.FileListAO.FolderList[i]
|
|
||||||
tasks = append(tasks, BatchTaskInfo{
|
|
||||||
FileId: folder.GetID(),
|
|
||||||
FileName: folder.GetName(),
|
|
||||||
IsFolder: BoolToNumber(folder.IsDir()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for i := 0; i < len(resp.FileListAO.FileList); i++ {
|
|
||||||
file := resp.FileListAO.FileList[i]
|
|
||||||
tasks = append(tasks, BatchTaskInfo{
|
|
||||||
FileId: file.GetID(),
|
|
||||||
FileName: file.GetName(),
|
|
||||||
IsFolder: BoolToNumber(file.IsDir()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tasks) > 0 {
|
|
||||||
// 删除
|
|
||||||
resp, err := y.CreateBatchTask("DELETE", y.FamilyID, "", nil, tasks...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 永久删除
|
|
||||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", y.FamilyID, "", nil, tasks...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 获取家庭云所有用户信息
|
// 获取家庭云所有用户信息
|
||||||
func (y *Cloud189PC) getFamilyInfoList() ([]FamilyInfoResp, error) {
|
func (y *Cloud189PC) getFamilyInfoList() ([]FamilyInfoResp, error) {
|
||||||
var resp FamilyInfoListResp
|
var resp FamilyInfoListResp
|
||||||
_, err := y.get(API_URL+"/family/manage/getFamilyList.action", nil, &resp, true)
|
_, err := y.get(API_URL+"/family/manage/getFamilyList.action", nil, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1026,108 +915,13 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
|
|||||||
return "", fmt.Errorf("cannot get automatically,please input family_id")
|
return "", fmt.Errorf("cannot get automatically,please input family_id")
|
||||||
}
|
}
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
if strings.Contains(y.getTokenInfo().LoginName, info.RemarkName) {
|
if strings.Contains(y.tokenInfo.LoginName, info.RemarkName) {
|
||||||
return fmt.Sprint(info.FamilyID), nil
|
return fmt.Sprint(info.FamilyID), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Sprint(infos[0].FamilyID), nil
|
return fmt.Sprint(infos[0].FamilyID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 保存家庭云中的文件到个人云
|
|
||||||
func (y *Cloud189PC) SaveFamilyFileToPersonCloud(ctx context.Context, familyId string, srcObj, dstDir model.Obj, overwrite bool) error {
|
|
||||||
// _, err := y.post(API_URL+"/family/file/saveFileToMember.action", func(req *resty.Request) {
|
|
||||||
// req.SetQueryParams(map[string]string{
|
|
||||||
// "channelId": "home",
|
|
||||||
// "familyId": familyId,
|
|
||||||
// "destParentId": destParentId,
|
|
||||||
// "fileIdList": familyFileId,
|
|
||||||
// })
|
|
||||||
// }, nil)
|
|
||||||
// return err
|
|
||||||
|
|
||||||
task := BatchTaskInfo{
|
|
||||||
FileId: srcObj.GetID(),
|
|
||||||
FileName: srcObj.GetName(),
|
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
|
||||||
}
|
|
||||||
resp, err := y.CreateBatchTask("COPY", familyId, dstDir.GetID(), map[string]string{
|
|
||||||
"groupId": "null",
|
|
||||||
"copyType": "2",
|
|
||||||
"shareId": "null",
|
|
||||||
}, task)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
state, err := y.CheckBatchTask("COPY", resp.TaskID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
switch state.TaskStatus {
|
|
||||||
case 2:
|
|
||||||
task.DealWay = IF(overwrite, 3, 2)
|
|
||||||
// 冲突时覆盖文件
|
|
||||||
if err := y.ManageBatchTask("COPY", resp.TaskID, dstDir.GetID(), task); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case 4:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
time.Sleep(time.Millisecond * 400)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 永久删除文件
|
|
||||||
func (y *Cloud189PC) Delete(ctx context.Context, familyId string, srcObj model.Obj) error {
|
|
||||||
task := BatchTaskInfo{
|
|
||||||
FileId: srcObj.GetID(),
|
|
||||||
FileName: srcObj.GetName(),
|
|
||||||
IsFolder: BoolToNumber(srcObj.IsDir()),
|
|
||||||
}
|
|
||||||
// 删除源文件
|
|
||||||
resp, err := y.CreateBatchTask("DELETE", familyId, "", nil, task)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = y.WaitBatchTask("DELETE", resp.TaskID, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 清除回收站
|
|
||||||
resp, err = y.CreateBatchTask("CLEAR_RECYCLE", familyId, "", nil, task)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = y.WaitBatchTask("CLEAR_RECYCLE", resp.TaskID, time.Second)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (y *Cloud189PC) CreateBatchTask(aType string, familyID string, targetFolderId string, other map[string]string, taskInfos ...BatchTaskInfo) (*CreateBatchTaskResp, error) {
|
|
||||||
var resp CreateBatchTaskResp
|
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
|
||||||
req.SetFormData(map[string]string{
|
|
||||||
"type": aType,
|
|
||||||
"taskInfos": MustString(utils.Json.MarshalToString(taskInfos)),
|
|
||||||
})
|
|
||||||
if targetFolderId != "" {
|
|
||||||
req.SetFormData(map[string]string{"targetFolderId": targetFolderId})
|
|
||||||
}
|
|
||||||
if familyID != "" {
|
|
||||||
req.SetFormData(map[string]string{"familyId": familyID})
|
|
||||||
}
|
|
||||||
req.SetFormData(other)
|
|
||||||
}, &resp, familyID != "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 检测任务状态
|
|
||||||
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
|
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
|
||||||
var resp BatchTaskStateResp
|
var resp BatchTaskStateResp
|
||||||
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
|
||||||
@ -1142,37 +936,6 @@ func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStat
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取冲突的任务信息
|
|
||||||
func (y *Cloud189PC) GetConflictTaskInfo(aType string, taskID string) (*BatchTaskConflictTaskInfoResp, error) {
|
|
||||||
var resp BatchTaskConflictTaskInfoResp
|
|
||||||
_, err := y.post(API_URL+"/batch/getConflictTaskInfo.action", func(req *resty.Request) {
|
|
||||||
req.SetFormData(map[string]string{
|
|
||||||
"type": aType,
|
|
||||||
"taskId": taskID,
|
|
||||||
})
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 处理冲突
|
|
||||||
func (y *Cloud189PC) ManageBatchTask(aType string, taskID string, targetFolderId string, taskInfos ...BatchTaskInfo) error {
|
|
||||||
_, err := y.post(API_URL+"/batch/manageBatchTask.action", func(req *resty.Request) {
|
|
||||||
req.SetFormData(map[string]string{
|
|
||||||
"targetFolderId": targetFolderId,
|
|
||||||
"type": aType,
|
|
||||||
"taskId": taskID,
|
|
||||||
"taskInfos": MustString(utils.Json.MarshalToString(taskInfos)),
|
|
||||||
})
|
|
||||||
}, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var ErrIsConflict = errors.New("there is a conflict with the target object")
|
|
||||||
|
|
||||||
// 等待任务完成
|
|
||||||
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
|
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
|
||||||
for {
|
for {
|
||||||
state, err := y.CheckBatchTask(aType, taskID)
|
state, err := y.CheckBatchTask(aType, taskID)
|
||||||
@ -1181,24 +944,10 @@ func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration)
|
|||||||
}
|
}
|
||||||
switch state.TaskStatus {
|
switch state.TaskStatus {
|
||||||
case 2:
|
case 2:
|
||||||
return ErrIsConflict
|
return errors.New("there is a conflict with the target object")
|
||||||
case 4:
|
case 4:
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
time.Sleep(t)
|
time.Sleep(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) getTokenInfo() *AppSessionResp {
|
|
||||||
if y.ref != nil {
|
|
||||||
return y.ref.getTokenInfo()
|
|
||||||
}
|
|
||||||
return y.tokenInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (y *Cloud189PC) getClient() *resty.Client {
|
|
||||||
if y.ref != nil {
|
|
||||||
return y.ref.getClient()
|
|
||||||
}
|
|
||||||
return y.client
|
|
||||||
}
|
|
||||||
|
@ -3,12 +3,10 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
stdpath "path"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
)
|
)
|
||||||
@ -47,9 +45,6 @@ func (d *Alias) Init(ctx context.Context) error {
|
|||||||
d.oneKey = k
|
d.oneKey = k
|
||||||
}
|
}
|
||||||
d.autoFlatten = true
|
d.autoFlatten = true
|
||||||
} else {
|
|
||||||
d.oneKey = ""
|
|
||||||
d.autoFlatten = false
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -92,9 +87,8 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
var objs []model.Obj
|
var objs []model.Obj
|
||||||
fsArgs := &fs.ListArgs{NoLog: true, Refresh: args.Refresh}
|
|
||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
tmp, err := d.list(ctx, dst, sub, fsArgs)
|
tmp, err := d.list(ctx, dst, sub)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
objs = append(objs, tmp...)
|
objs = append(objs, tmp...)
|
||||||
}
|
}
|
||||||
@ -111,211 +105,10 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
for _, dst := range dsts {
|
for _, dst := range dsts {
|
||||||
link, err := d.link(ctx, dst, sub, args)
|
link, err := d.link(ctx, dst, sub, args)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if !args.Redirect && len(link.URL) > 0 {
|
|
||||||
// 正常情况下 多并发 仅支持返回URL的驱动
|
|
||||||
// alias套娃alias 可以让crypt、mega等驱动(不返回URL的) 支持并发
|
|
||||||
if d.DownloadConcurrency > 0 {
|
|
||||||
link.Concurrency = d.DownloadConcurrency
|
|
||||||
}
|
|
||||||
if d.DownloadPartSize > 0 {
|
|
||||||
link.PartSize = d.DownloadPartSize * utils.KB
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
reqPath, err := d.getReqPath(ctx, parentDir, true)
|
|
||||||
if err == nil {
|
|
||||||
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
|
|
||||||
}
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name dirs cannot make sub-dir")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot be moved")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name dirs cannot be moved to")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return fs.Move(ctx, *srcPath, *dstPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
reqPath, err := d.getReqPath(ctx, srcObj, false)
|
|
||||||
if err == nil {
|
|
||||||
return fs.Rename(ctx, *reqPath, newName)
|
|
||||||
}
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot be Rename")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot be copied")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name dirs cannot be copied to")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = fs.Copy(ctx, *srcPath, *dstPath)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
reqPath, err := d.getReqPath(ctx, obj, false)
|
|
||||||
if err == nil {
|
|
||||||
return fs.Remove(ctx, *reqPath)
|
|
||||||
}
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot be Delete")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
|
||||||
if err == nil {
|
|
||||||
return fs.PutDirectly(ctx, *reqPath, s)
|
|
||||||
}
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name dirs cannot be Put")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
|
||||||
if err == nil {
|
|
||||||
return fs.PutURL(ctx, *reqPath, name, url)
|
|
||||||
}
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot offline download")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
|
||||||
dsts, ok := d.pathMap[root]
|
|
||||||
if !ok {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
for _, dst := range dsts {
|
|
||||||
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
|
|
||||||
if err == nil {
|
|
||||||
return meta, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
|
||||||
dsts, ok := d.pathMap[root]
|
|
||||||
if !ok {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
for _, dst := range dsts {
|
|
||||||
l, err := d.listArchive(ctx, dst, sub, args)
|
|
||||||
if err == nil {
|
|
||||||
return l, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
|
||||||
// alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容?
|
|
||||||
// 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用
|
|
||||||
// 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用
|
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
|
||||||
dsts, ok := d.pathMap[root]
|
|
||||||
if !ok {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
for _, dst := range dsts {
|
|
||||||
link, err := d.extract(ctx, dst, sub, args)
|
|
||||||
if err == nil {
|
|
||||||
if !args.Redirect && len(link.URL) > 0 {
|
|
||||||
if d.DownloadConcurrency > 0 {
|
|
||||||
link.Concurrency = d.DownloadConcurrency
|
|
||||||
}
|
|
||||||
if d.DownloadPartSize > 0 {
|
|
||||||
link.PartSize = d.DownloadPartSize * utils.KB
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return link, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
|
||||||
if !d.Writable {
|
|
||||||
return errs.PermissionDenied
|
|
||||||
}
|
|
||||||
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name files cannot be decompressed")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
|
||||||
if errs.IsNotImplement(err) {
|
|
||||||
return errors.New("same-name dirs cannot be decompressed to")
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Alias)(nil)
|
var _ driver.Driver = (*Alias)(nil)
|
||||||
|
@ -9,28 +9,19 @@ type Addition struct {
|
|||||||
// Usually one of two
|
// Usually one of two
|
||||||
// driver.RootPath
|
// driver.RootPath
|
||||||
// define other
|
// define other
|
||||||
Paths string `json:"paths" required:"true" type:"text"`
|
Paths string `json:"paths" required:"true" type:"text"`
|
||||||
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
|
||||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
|
||||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
|
||||||
Writable bool `json:"writable" type:"bool" default:"false"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "Alias",
|
Name: "Alias",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
NoCache: true,
|
NoCache: true,
|
||||||
NoUpload: false,
|
NoUpload: true,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
ProxyRangeOption: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
op.RegisterDriver(func() driver.Driver {
|
op.RegisterDriver(func() driver.Driver {
|
||||||
return &Alias{
|
return &Alias{}
|
||||||
Addition: Addition{
|
|
||||||
ProtectSameName: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -3,15 +3,11 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
"github.com/alist-org/alist/v3/internal/sign"
|
"github.com/alist-org/alist/v3/internal/sign"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
@ -19,7 +15,7 @@ import (
|
|||||||
|
|
||||||
func (d *Alias) listRoot() []model.Obj {
|
func (d *Alias) listRoot() []model.Obj {
|
||||||
var objs []model.Obj
|
var objs []model.Obj
|
||||||
for k := range d.pathMap {
|
for k, _ := range d.pathMap {
|
||||||
obj := model.Object{
|
obj := model.Object{
|
||||||
Name: k,
|
Name: k,
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
@ -65,12 +61,11 @@ func (d *Alias) get(ctx context.Context, path string, dst, sub string) (model.Ob
|
|||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
Modified: obj.ModTime(),
|
Modified: obj.ModTime(),
|
||||||
IsFolder: obj.IsDir(),
|
IsFolder: obj.IsDir(),
|
||||||
HashInfo: obj.GetHash(),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([]model.Obj, error) {
|
func (d *Alias) list(ctx context.Context, dst, sub string) ([]model.Obj, error) {
|
||||||
objs, err := fs.List(ctx, stdpath.Join(dst, sub), args)
|
objs, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{NoLog: true})
|
||||||
// the obj must implement the model.SetPath interface
|
// the obj must implement the model.SetPath interface
|
||||||
// return objs, err
|
// return objs, err
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -98,128 +93,22 @@ func (d *Alias) list(ctx context.Context, dst, sub string, args *fs.ListArgs) ([
|
|||||||
|
|
||||||
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
|
func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs) (*model.Link, error) {
|
||||||
reqPath := stdpath.Join(dst, sub)
|
reqPath := stdpath.Join(dst, sub)
|
||||||
// 参考 crypt 驱动
|
storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{})
|
||||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
|
||||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
|
||||||
return link, err
|
|
||||||
}
|
|
||||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||||
link := &model.Link{
|
return &model.Link{
|
||||||
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
URL: fmt.Sprintf("%s/p%s?sign=%s",
|
||||||
common.GetApiUrl(args.HttpReq),
|
common.GetApiUrl(args.HttpReq),
|
||||||
utils.EncodePath(reqPath, true),
|
utils.EncodePath(reqPath, true),
|
||||||
sign.Sign(reqPath)),
|
sign.Sign(reqPath)),
|
||||||
}
|
}, nil
|
||||||
if args.HttpReq != nil && d.ProxyRange {
|
|
||||||
link.RangeReadCloser = common.NoProxyRange
|
|
||||||
}
|
|
||||||
return link, nil
|
|
||||||
}
|
}
|
||||||
link, _, err := op.Link(ctx, storage, reqActualPath, args)
|
link, _, err := fs.Link(ctx, reqPath, args)
|
||||||
return link, err
|
return link, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) {
|
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
|
||||||
if sub == "" && !isParent {
|
|
||||||
return nil, errs.NotSupport
|
|
||||||
}
|
|
||||||
dsts, ok := d.pathMap[root]
|
|
||||||
if !ok {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
var reqPath *string
|
|
||||||
for _, dst := range dsts {
|
|
||||||
path := stdpath.Join(dst, sub)
|
|
||||||
_, err := fs.Get(ctx, path, &fs.GetArgs{NoLog: true})
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !d.ProtectSameName {
|
|
||||||
return &path, nil
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
ok = false
|
|
||||||
} else {
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
reqPath = &path
|
|
||||||
}
|
|
||||||
if reqPath == nil {
|
|
||||||
return nil, errs.ObjectNotFound
|
|
||||||
}
|
|
||||||
return reqPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
|
||||||
reqPath := stdpath.Join(dst, sub)
|
|
||||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
|
||||||
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
|
|
||||||
ArchiveArgs: args,
|
|
||||||
Refresh: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
|
||||||
reqPath := stdpath.Join(dst, sub)
|
|
||||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
|
||||||
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
|
|
||||||
ArchiveInnerArgs: args,
|
|
||||||
Refresh: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
|
||||||
reqPath := stdpath.Join(dst, sub)
|
|
||||||
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := storage.(driver.ArchiveReader); ok {
|
|
||||||
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
|
||||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
|
||||||
return link, err
|
|
||||||
}
|
|
||||||
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
|
||||||
link := &model.Link{
|
|
||||||
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
|
||||||
common.GetApiUrl(args.HttpReq),
|
|
||||||
utils.EncodePath(reqPath, true),
|
|
||||||
utils.EncodePath(args.InnerPath, true),
|
|
||||||
url.QueryEscape(args.Password),
|
|
||||||
sign.SignArchive(reqPath)),
|
|
||||||
}
|
|
||||||
if args.HttpReq != nil && d.ProxyRange {
|
|
||||||
link.RangeReadCloser = common.NoProxyRange
|
|
||||||
}
|
|
||||||
return link, nil
|
|
||||||
}
|
|
||||||
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
|
||||||
return link, err
|
|
||||||
}
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
@ -5,19 +5,17 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type AListV3 struct {
|
type AListV3 struct {
|
||||||
@ -36,29 +34,29 @@ func (d *AListV3) GetAddition() driver.Additional {
|
|||||||
func (d *AListV3) Init(ctx context.Context) error {
|
func (d *AListV3) Init(ctx context.Context) error {
|
||||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||||
var resp common.Resp[MeResp]
|
var resp common.Resp[MeResp]
|
||||||
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// if the username is not empty and the username is not the same as the current username, then login again
|
// if the username is not empty and the username is not the same as the current username, then login again
|
||||||
if d.Username != resp.Data.Username {
|
if d.Username != "" && d.Username != resp.Data.Username {
|
||||||
err = d.login()
|
err = d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// re-get the user info
|
// re-get the user info
|
||||||
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.Data.Role == model.GUEST {
|
if resp.Data.Role == model.GUEST {
|
||||||
u := d.Address + "/api/public/settings"
|
url := d.Address + "/api/public/settings"
|
||||||
res, err := base.RestyClient.R().Get(u)
|
res, err := base.RestyClient.R().Get(url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -76,7 +74,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
var resp common.Resp[FsListResp]
|
var resp common.Resp[FsListResp]
|
||||||
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(ListReq{
|
req.SetResult(&resp).SetBody(ListReq{
|
||||||
PageReq: model.PageReq{
|
PageReq: model.PageReq{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
@ -110,19 +108,11 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
|||||||
|
|
||||||
func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
var resp common.Resp[FsGetResp]
|
var resp common.Resp[FsGetResp]
|
||||||
// if PassUAToUpsteam is true, then pass the user-agent to the upstream
|
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||||
userAgent := base.UserAgent
|
|
||||||
if d.PassUAToUpsteam {
|
|
||||||
userAgent = args.Header.Get("user-agent")
|
|
||||||
if userAgent == "" {
|
|
||||||
userAgent = base.UserAgent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetResult(&resp).SetBody(FsGetReq{
|
req.SetResult(&resp).SetBody(FsGetReq{
|
||||||
Path: file.GetPath(),
|
Path: file.GetPath(),
|
||||||
Password: d.MetaPassword,
|
Password: d.MetaPassword,
|
||||||
}).SetHeader("user-agent", userAgent)
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -133,7 +123,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MkdirOrLinkReq{
|
req.SetBody(MkdirOrLinkReq{
|
||||||
Path: path.Join(parentDir.GetPath(), dirName),
|
Path: path.Join(parentDir.GetPath(), dirName),
|
||||||
})
|
})
|
||||||
@ -142,7 +132,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@ -153,7 +143,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RenameReq{
|
req.SetBody(RenameReq{
|
||||||
Path: srcObj.GetPath(),
|
Path: srcObj.GetPath(),
|
||||||
Name: newName,
|
Name: newName,
|
||||||
@ -163,7 +153,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@ -174,7 +164,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RemoveReq{
|
req.SetBody(RemoveReq{
|
||||||
Dir: path.Dir(obj.GetPath()),
|
Dir: path.Dir(obj.GetPath()),
|
||||||
Names: []string{obj.GetName()},
|
Names: []string{obj.GetName()},
|
||||||
@ -183,174 +173,13 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
_, err := d.request("/fs/put", http.MethodPut, func(req *resty.Request) {
|
||||||
Reader: s,
|
req.SetHeader("File-Path", path.Join(dstDir.GetPath(), stream.GetName())).
|
||||||
UpdateProgress: up,
|
SetHeader("Password", d.MetaPassword).
|
||||||
})
|
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", reader)
|
SetContentLength(true).
|
||||||
if err != nil {
|
SetBody(io.ReadCloser(stream))
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", d.Token)
|
|
||||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
|
|
||||||
req.Header.Set("Password", d.MetaPassword)
|
|
||||||
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
|
||||||
req.Header.Set("X-File-Md5", md5)
|
|
||||||
}
|
|
||||||
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
|
||||||
req.Header.Set("X-File-Sha1", sha1)
|
|
||||||
}
|
|
||||||
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
|
||||||
req.Header.Set("X-File-Sha256", sha256)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.ContentLength = s.GetSize()
|
|
||||||
// client := base.NewHttpClient()
|
|
||||||
// client.Timeout = time.Hour * 6
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
bytes, err := io.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("[alist_v3] response body: %s", string(bytes))
|
|
||||||
if res.StatusCode >= 400 {
|
|
||||||
return fmt.Errorf("request failed, status: %s", res.Status)
|
|
||||||
}
|
|
||||||
code := utils.Json.Get(bytes, "code").ToInt()
|
|
||||||
if code != 200 {
|
|
||||||
if code == 401 || code == 403 {
|
|
||||||
err = d.login()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(bytes, "message").ToString())
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
|
||||||
if !d.ForwardArchiveReq {
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
var resp common.Resp[ArchiveMetaResp]
|
|
||||||
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
|
||||||
ArchivePass: args.Password,
|
|
||||||
Password: d.MetaPassword,
|
|
||||||
Path: obj.GetPath(),
|
|
||||||
Refresh: false,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
if code == 202 {
|
|
||||||
return nil, errs.WrongArchivePassword
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var tree []model.ObjTree
|
|
||||||
if resp.Data.Content != nil {
|
|
||||||
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
|
|
||||||
for _, content := range resp.Data.Content {
|
|
||||||
tree = append(tree, &content)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &model.ArchiveMetaInfo{
|
|
||||||
Comment: resp.Data.Comment,
|
|
||||||
Encrypted: resp.Data.Encrypted,
|
|
||||||
Tree: tree,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
|
||||||
if !d.ForwardArchiveReq {
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
var resp common.Resp[ArchiveListResp]
|
|
||||||
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetResult(&resp).SetBody(ArchiveListReq{
|
|
||||||
ArchiveMetaReq: ArchiveMetaReq{
|
|
||||||
ArchivePass: args.Password,
|
|
||||||
Password: d.MetaPassword,
|
|
||||||
Path: obj.GetPath(),
|
|
||||||
Refresh: false,
|
|
||||||
},
|
|
||||||
PageReq: model.PageReq{
|
|
||||||
Page: 1,
|
|
||||||
PerPage: 0,
|
|
||||||
},
|
|
||||||
InnerPath: args.InnerPath,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
if code == 202 {
|
|
||||||
return nil, errs.WrongArchivePassword
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var files []model.Obj
|
|
||||||
for _, f := range resp.Data.Content {
|
|
||||||
file := model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
Name: f.Name,
|
|
||||||
Modified: f.Modified,
|
|
||||||
Ctime: f.Created,
|
|
||||||
Size: f.Size,
|
|
||||||
IsFolder: f.IsDir,
|
|
||||||
HashInfo: utils.FromString(f.HashInfo),
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
|
||||||
}
|
|
||||||
files = append(files, &file)
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
|
||||||
if !d.ForwardArchiveReq {
|
|
||||||
return nil, errs.NotSupport
|
|
||||||
}
|
|
||||||
var resp common.Resp[ArchiveMetaResp]
|
|
||||||
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
|
||||||
ArchivePass: args.Password,
|
|
||||||
Password: d.MetaPassword,
|
|
||||||
Path: obj.GetPath(),
|
|
||||||
Refresh: false,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &model.Link{
|
|
||||||
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
|
|
||||||
resp.Data.RawURL,
|
|
||||||
utils.EncodePath(args.InnerPath, true),
|
|
||||||
url.QueryEscape(args.Password),
|
|
||||||
resp.Data.Sign),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
|
||||||
if !d.ForwardArchiveReq {
|
|
||||||
return errs.NotImplement
|
|
||||||
}
|
|
||||||
dir, name := path.Split(srcObj.GetPath())
|
|
||||||
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(DecompressReq{
|
|
||||||
ArchivePass: args.Password,
|
|
||||||
CacheFull: args.CacheFull,
|
|
||||||
DstDir: dstDir.GetPath(),
|
|
||||||
InnerPath: args.InnerPath,
|
|
||||||
Name: []string{name},
|
|
||||||
PutIntoNewDir: args.PutIntoNewDir,
|
|
||||||
SrcDir: dir,
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -7,21 +7,18 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
Address string `json:"url" required:"true"`
|
Address string `json:"url" required:"true"`
|
||||||
MetaPassword string `json:"meta_password"`
|
MetaPassword string `json:"meta_password"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
|
||||||
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "AList V3",
|
Name: "AList V3",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
CheckStatus: true,
|
CheckStatus: true,
|
||||||
ProxyRangeOption: true,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ListReq struct {
|
type ListReq struct {
|
||||||
@ -82,89 +81,3 @@ type MeResp struct {
|
|||||||
SsoId string `json:"sso_id"`
|
SsoId string `json:"sso_id"`
|
||||||
Otp bool `json:"otp"`
|
Otp bool `json:"otp"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ArchiveMetaReq struct {
|
|
||||||
ArchivePass string `json:"archive_pass"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Refresh bool `json:"refresh"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TreeResp struct {
|
|
||||||
ObjResp
|
|
||||||
Children []TreeResp `json:"children"`
|
|
||||||
hashCache *utils.HashInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetSize() int64 {
|
|
||||||
return t.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetName() string {
|
|
||||||
return t.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) ModTime() time.Time {
|
|
||||||
return t.Modified
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) CreateTime() time.Time {
|
|
||||||
return t.Created
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) IsDir() bool {
|
|
||||||
return t.ObjResp.IsDir
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetHash() utils.HashInfo {
|
|
||||||
return utils.FromString(t.HashInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetID() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetPath() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) GetChildren() []model.ObjTree {
|
|
||||||
ret := make([]model.ObjTree, 0, len(t.Children))
|
|
||||||
for _, child := range t.Children {
|
|
||||||
ret = append(ret, &child)
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TreeResp) Thumb() string {
|
|
||||||
return t.ObjResp.Thumb
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArchiveMetaResp struct {
|
|
||||||
Comment string `json:"comment"`
|
|
||||||
Encrypted bool `json:"encrypted"`
|
|
||||||
Content []TreeResp `json:"content"`
|
|
||||||
RawURL string `json:"raw_url"`
|
|
||||||
Sign string `json:"sign"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArchiveListReq struct {
|
|
||||||
model.PageReq
|
|
||||||
ArchiveMetaReq
|
|
||||||
InnerPath string `json:"inner_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ArchiveListResp struct {
|
|
||||||
Content []ObjResp `json:"content"`
|
|
||||||
Total int64 `json:"total"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DecompressReq struct {
|
|
||||||
ArchivePass string `json:"archive_pass"`
|
|
||||||
CacheFull bool `json:"cache_full"`
|
|
||||||
DstDir string `json:"dst_dir"`
|
|
||||||
InnerPath string `json:"inner_path"`
|
|
||||||
Name []string `json:"name"`
|
|
||||||
PutIntoNewDir bool `json:"put_into_new_dir"`
|
|
||||||
SrcDir string `json:"src_dir"`
|
|
||||||
}
|
|
||||||
|
@ -13,11 +13,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (d *AListV3) login() error {
|
func (d *AListV3) login() error {
|
||||||
if d.Username == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var resp common.Resp[LoginResp]
|
var resp common.Resp[LoginResp]
|
||||||
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(base.Json{
|
req.SetResult(&resp).SetBody(base.Json{
|
||||||
"username": d.Username,
|
"username": d.Username,
|
||||||
"password": d.Password,
|
"password": d.Password,
|
||||||
@ -31,7 +28,7 @@ func (d *AListV3) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) {
|
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||||
url := d.Address + "/api" + api
|
url := d.Address + "/api" + api
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeader("Authorization", d.Token)
|
req.SetHeader("Authorization", d.Token)
|
||||||
@ -40,26 +37,22 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
|
|||||||
}
|
}
|
||||||
res, err := req.Execute(method, url)
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
code := 0
|
return nil, err
|
||||||
if res != nil {
|
|
||||||
code = res.StatusCode()
|
|
||||||
}
|
|
||||||
return nil, code, err
|
|
||||||
}
|
}
|
||||||
log.Debugf("[alist_v3] response body: %s", res.String())
|
log.Debugf("[alist_v3] response body: %s", res.String())
|
||||||
if res.StatusCode() >= 400 {
|
if res.StatusCode() >= 400 {
|
||||||
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
|
return nil, fmt.Errorf("request failed, status: %s", res.Status())
|
||||||
}
|
}
|
||||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
||||||
err = d.login()
|
err = d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, code, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return d.request(api, method, callback, true)
|
return d.request(api, method, callback, true)
|
||||||
}
|
}
|
||||||
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||||
}
|
}
|
||||||
return res.Body(), 200, nil
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
@ -14,12 +14,13 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -51,7 +52,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// get driver id
|
// get driver id
|
||||||
res, err, _ := d.request("https://api.alipan.com/v2/user/get", http.MethodPost, nil, nil)
|
res, err, _ := d.request("https://api.aliyundrive.com/v2/user/get", http.MethodPost, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -105,7 +106,7 @@ func (d *AliDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
|||||||
"file_id": file.GetID(),
|
"file_id": file.GetID(),
|
||||||
"expire_sec": 14400,
|
"expire_sec": 14400,
|
||||||
}
|
}
|
||||||
res, err, _ := d.request("https://api.alipan.com/v2/file/get_download_url", http.MethodPost, func(req *resty.Request) {
|
res, err, _ := d.request("https://api.aliyundrive.com/v2/file/get_download_url", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -113,14 +114,14 @@ func (d *AliDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
|||||||
}
|
}
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Referer": []string{"https://www.alipan.com/"},
|
"Referer": []string{"https://www.aliyundrive.com/"},
|
||||||
},
|
},
|
||||||
URL: utils.Json.Get(res, "url").ToString(),
|
URL: utils.Json.Get(res, "url").ToString(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AliDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
_, err, _ := d.request("https://api.alipan.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
_, err, _ := d.request("https://api.aliyundrive.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"check_name_mode": "refuse",
|
"check_name_mode": "refuse",
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -138,7 +139,7 @@ func (d *AliDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AliDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
_, err, _ := d.request("https://api.alipan.com/v3/file/update", http.MethodPost, func(req *resty.Request) {
|
_, err, _ := d.request("https://api.aliyundrive.com/v3/file/update", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"check_name_mode": "refuse",
|
"check_name_mode": "refuse",
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -155,7 +156,7 @@ func (d *AliDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
_, err, _ := d.request("https://api.alipan.com/v2/recyclebin/trash", http.MethodPost, func(req *resty.Request) {
|
_, err, _ := d.request("https://api.aliyundrive.com/v2/recyclebin/trash", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": obj.GetID(),
|
"file_id": obj.GetID(),
|
||||||
@ -193,10 +194,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
}
|
}
|
||||||
if d.RapidUpload {
|
if d.RapidUpload {
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
_, err := utils.CopyWithBufferN(buf, file, 1024)
|
io.CopyN(buf, file, 1024)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||||
if localFile != nil {
|
if localFile != nil {
|
||||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
@ -218,7 +216,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
_, err, e := d.request("https://api.alipan.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
_, err, e := d.request("https://api.aliyundrive.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(reqBody)
|
req.SetBody(reqBody)
|
||||||
}, &resp)
|
}, &resp)
|
||||||
|
|
||||||
@ -272,7 +270,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
n, _ := io.NewSectionReader(localFile, o.Int64(), 8).Read(buf[:8])
|
n, _ := io.NewSectionReader(localFile, o.Int64(), 8).Read(buf[:8])
|
||||||
reqBody["proof_code"] = base64.StdEncoding.EncodeToString(buf[:n])
|
reqBody["proof_code"] = base64.StdEncoding.EncodeToString(buf[:n])
|
||||||
|
|
||||||
_, err, e := d.request("https://api.alipan.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
_, err, e := d.request("https://api.aliyundrive.com/adrive/v2/file/createWithFolders", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(reqBody)
|
req.SetBody(reqBody)
|
||||||
}, &resp)
|
}, &resp)
|
||||||
if err != nil && e.Code != "PreHashMatched" {
|
if err != nil && e.Code != "PreHashMatched" {
|
||||||
@ -288,7 +286,6 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
file.Reader = localFile
|
file.Reader = localFile
|
||||||
}
|
}
|
||||||
|
|
||||||
rateLimited := driver.NewLimitedUploadStream(ctx, file)
|
|
||||||
for i, partInfo := range resp.PartInfoList {
|
for i, partInfo := range resp.PartInfoList {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -297,7 +294,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
if d.InternalUpload {
|
if d.InternalUpload {
|
||||||
url = partInfo.InternalUploadUrl
|
url = partInfo.InternalUploadUrl
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("PUT", url, io.LimitReader(rateLimited, DEFAULT))
|
req, err := http.NewRequest("PUT", url, io.LimitReader(file, DEFAULT))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -306,13 +303,13 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
res.Body.Close()
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
up(float64(i) * 100 / float64(count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var resp2 base.Json
|
var resp2 base.Json
|
||||||
_, err, e = d.request("https://api.alipan.com/v2/file/complete", http.MethodPost, func(req *resty.Request) {
|
_, err, e = d.request("https://api.aliyundrive.com/v2/file/complete", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": resp.FileId,
|
"file_id": resp.FileId,
|
||||||
@ -337,10 +334,10 @@ func (d *AliDrive) Other(ctx context.Context, args model.OtherArgs) (interface{}
|
|||||||
}
|
}
|
||||||
switch args.Method {
|
switch args.Method {
|
||||||
case "doc_preview":
|
case "doc_preview":
|
||||||
url = "https://api.alipan.com/v2/file/get_office_preview_url"
|
url = "https://api.aliyundrive.com/v2/file/get_office_preview_url"
|
||||||
data["access_token"] = d.AccessToken
|
data["access_token"] = d.AccessToken
|
||||||
case "video_preview":
|
case "video_preview":
|
||||||
url = "https://api.alipan.com/v2/file/get_video_preview_play_info"
|
url = "https://api.aliyundrive.com/v2/file/get_video_preview_play_info"
|
||||||
data["category"] = "live_transcoding"
|
data["category"] = "live_transcoding"
|
||||||
data["url_expire_sec"] = 14400
|
data["url_expire_sec"] = 14400
|
||||||
default:
|
default:
|
||||||
|
@ -26,7 +26,7 @@ func (d *AliDrive) createSession() error {
|
|||||||
state.retry = 0
|
state.retry = 0
|
||||||
return fmt.Errorf("createSession failed after three retries")
|
return fmt.Errorf("createSession failed after three retries")
|
||||||
}
|
}
|
||||||
_, err, _ := d.request("https://api.alipan.com/users/v1/users/device/create_session", http.MethodPost, func(req *resty.Request) {
|
_, err, _ := d.request("https://api.aliyundrive.com/users/v1/users/device/create_session", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"deviceName": "samsung",
|
"deviceName": "samsung",
|
||||||
"modelName": "SM-G9810",
|
"modelName": "SM-G9810",
|
||||||
@ -42,7 +42,7 @@ func (d *AliDrive) createSession() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// func (d *AliDrive) renewSession() error {
|
// func (d *AliDrive) renewSession() error {
|
||||||
// _, err, _ := d.request("https://api.alipan.com/users/v1/users/device/renew_session", http.MethodPost, nil, nil)
|
// _, err, _ := d.request("https://api.aliyundrive.com/users/v1/users/device/renew_session", http.MethodPost, nil, nil)
|
||||||
// return err
|
// return err
|
||||||
// }
|
// }
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ func (d *AliDrive) sign() {
|
|||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
func (d *AliDrive) refreshToken() error {
|
func (d *AliDrive) refreshToken() error {
|
||||||
url := "https://auth.alipan.com/v2/account/token"
|
url := "https://auth.aliyundrive.com/v2/account/token"
|
||||||
var resp base.TokenResp
|
var resp base.TokenResp
|
||||||
var e RespErr
|
var e RespErr
|
||||||
_, err := base.RestyClient.R().
|
_, err := base.RestyClient.R().
|
||||||
@ -85,7 +85,7 @@ func (d *AliDrive) request(url, method string, callback base.ReqCallback, resp i
|
|||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
state, ok := global.Load(d.UserID)
|
state, ok := global.Load(d.UserID)
|
||||||
if !ok {
|
if !ok {
|
||||||
if url == "https://api.alipan.com/v2/user/get" {
|
if url == "https://api.aliyundrive.com/v2/user/get" {
|
||||||
state = &State{}
|
state = &State{}
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("can't load user state, user_id: %s", d.UserID), RespErr{}
|
return nil, fmt.Errorf("can't load user state, user_id: %s", d.UserID), RespErr{}
|
||||||
@ -94,8 +94,8 @@ func (d *AliDrive) request(url, method string, callback base.ReqCallback, resp i
|
|||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Authorization": "Bearer\t" + d.AccessToken,
|
"Authorization": "Bearer\t" + d.AccessToken,
|
||||||
"content-type": "application/json",
|
"content-type": "application/json",
|
||||||
"origin": "https://www.alipan.com",
|
"origin": "https://www.aliyundrive.com",
|
||||||
"Referer": "https://alipan.com/",
|
"Referer": "https://aliyundrive.com/",
|
||||||
"X-Signature": state.signature,
|
"X-Signature": state.signature,
|
||||||
"x-request-id": uuid.NewString(),
|
"x-request-id": uuid.NewString(),
|
||||||
"X-Canary": "client=Android,app=adrive,version=v4.1.0",
|
"X-Canary": "client=Android,app=adrive,version=v4.1.0",
|
||||||
@ -158,7 +158,7 @@ func (d *AliDrive) getFiles(fileId string) ([]File, error) {
|
|||||||
"video_thumbnail_process": "video/snapshot,t_0,f_jpg,ar_auto,w_300",
|
"video_thumbnail_process": "video/snapshot,t_0,f_jpg,ar_auto,w_300",
|
||||||
"url_expire_sec": 14400,
|
"url_expire_sec": 14400,
|
||||||
}
|
}
|
||||||
_, err, _ := d.request("https://api.alipan.com/v2/file/list", http.MethodPost, func(req *resty.Request) {
|
_, err, _ := d.request("https://api.aliyundrive.com/v2/file/list", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(data)
|
||||||
}, &resp)
|
}, &resp)
|
||||||
|
|
||||||
@ -172,7 +172,7 @@ func (d *AliDrive) getFiles(fileId string) ([]File, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliDrive) batch(srcId, dstId string, url string) error {
|
func (d *AliDrive) batch(srcId, dstId string, url string) error {
|
||||||
res, err, _ := d.request("https://api.alipan.com/v3/batch", http.MethodPost, func(req *resty.Request) {
|
res, err, _ := d.request("https://api.aliyundrive.com/v3/batch", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"requests": []base.Json{
|
"requests": []base.Json{
|
||||||
{
|
{
|
||||||
|
@ -19,12 +19,12 @@ import (
|
|||||||
type AliyundriveOpen struct {
|
type AliyundriveOpen struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
base string
|
||||||
|
|
||||||
DriveId string
|
DriveId string
|
||||||
|
|
||||||
limitList func(ctx context.Context, data base.Json) (*Files, error)
|
limitList func(ctx context.Context, data base.Json) (*Files, error)
|
||||||
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
limitLink func(ctx context.Context, file model.Obj) (*model.Link, error)
|
||||||
ref *AliyundriveOpen
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Config() driver.Config {
|
func (d *AliyundriveOpen) Config() driver.Config {
|
||||||
@ -58,17 +58,7 @@ func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) InitReference(storage driver.Driver) error {
|
|
||||||
refStorage, ok := storage.(*AliyundriveOpen)
|
|
||||||
if ok {
|
|
||||||
d.ref = refStorage
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errs.NotSupport
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
||||||
d.ref = nil
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,7 +93,7 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
|||||||
}
|
}
|
||||||
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
|
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
|
||||||
}
|
}
|
||||||
exp := time.Minute
|
exp := time.Hour
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: url,
|
URL: url,
|
||||||
Expiration: &exp,
|
Expiration: &exp,
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"resource"`
|
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||||
@ -32,10 +32,11 @@ var config = driver.Config{
|
|||||||
DefaultRoot: "root",
|
DefaultRoot: "root",
|
||||||
NoOverwriteUpload: true,
|
NoOverwriteUpload: true,
|
||||||
}
|
}
|
||||||
var API_URL = "https://openapi.alipan.com"
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
op.RegisterDriver(func() driver.Driver {
|
op.RegisterDriver(func() driver.Driver {
|
||||||
return &AliyundriveOpen{}
|
return &AliyundriveOpen{
|
||||||
|
base: "https://openapi.aliyundrive.com",
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
res.Body.Close()
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
@ -126,7 +126,7 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||||
proofRange, err := getProofRange(d.getAccessToken(), stream.GetSize())
|
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
_, err = utils.CopyWithBufferN(buf, reader, length)
|
_, err = io.CopyN(buf, reader, length)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -164,7 +164,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||||
createData["part_info_list"] = makePartInfos(count)
|
createData["part_info_list"] = makePartInfos(count)
|
||||||
// rapid upload
|
// rapid upload
|
||||||
rapidUpload := !stream.IsForceStreamUpload() && stream.GetSize() > 100*utils.KB && d.RapidUpload
|
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||||
if rapidUpload {
|
if rapidUpload {
|
||||||
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||||
// read 1024 bytes to calculate pre hash
|
// read 1024 bytes to calculate pre hash
|
||||||
@ -242,18 +242,14 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
|
|||||||
if remain := stream.GetSize() - offset; length > remain {
|
if remain := stream.GetSize() - offset; length > remain {
|
||||||
length = remain
|
length = remain
|
||||||
}
|
}
|
||||||
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||||
if rapidUpload {
|
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||||
srd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rd = utils.NewMultiReadable(srd)
|
|
||||||
}
|
}
|
||||||
err = retry.Do(func() error {
|
err = retry.Do(func() error {
|
||||||
_ = rd.Reset()
|
//rd.Reset()
|
||||||
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||||
return d.uploadPart(ctx, rateLimitedRd, createResp.PartInfoList[i])
|
|
||||||
},
|
},
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.DelayType(retry.BackOffDelay),
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
@ -19,7 +19,7 @@ import (
|
|||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||||
url := API_URL + "/oauth/access_token"
|
url := d.base + "/oauth/access_token"
|
||||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||||
url = d.OauthTokenURL
|
url = d.OauthTokenURL
|
||||||
}
|
}
|
||||||
@ -74,9 +74,6 @@ func getSub(token string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) refreshToken() error {
|
func (d *AliyundriveOpen) refreshToken() error {
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.refreshToken()
|
|
||||||
}
|
|
||||||
refresh, access, err := d._refreshToken()
|
refresh, access, err := d._refreshToken()
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -103,7 +100,7 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback,
|
|||||||
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
// TODO check whether access_token is expired
|
// TODO check whether access_token is expired
|
||||||
req.SetHeader("Authorization", "Bearer "+d.getAccessToken())
|
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||||
if method == http.MethodPost {
|
if method == http.MethodPost {
|
||||||
req.SetHeader("Content-Type", "application/json")
|
req.SetHeader("Content-Type", "application/json")
|
||||||
}
|
}
|
||||||
@ -112,7 +109,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
|||||||
}
|
}
|
||||||
var e ErrResp
|
var e ErrResp
|
||||||
req.SetError(&e)
|
req.SetError(&e)
|
||||||
res, err := req.Execute(method, API_URL+uri)
|
res, err := req.Execute(method, d.base+uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if res != nil {
|
if res != nil {
|
||||||
log.Errorf("[aliyundrive_open] request error: %s", res.String())
|
log.Errorf("[aliyundrive_open] request error: %s", res.String())
|
||||||
@ -121,7 +118,7 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
|
|||||||
}
|
}
|
||||||
isRetry := len(retry) > 0 && retry[0]
|
isRetry := len(retry) > 0 && retry[0]
|
||||||
if e.Code != "" {
|
if e.Code != "" {
|
||||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.getAccessToken() == "") {
|
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
||||||
err = d.refreshToken()
|
err = d.refreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err, nil
|
return nil, err, nil
|
||||||
@ -179,10 +176,3 @@ func getNowTime() (time.Time, string) {
|
|||||||
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||||
return nowTime, nowTimeStr
|
return nowTime, nowTimeStr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) getAccessToken() string {
|
|
||||||
if d.ref != nil {
|
|
||||||
return d.ref.getAccessToken()
|
|
||||||
}
|
|
||||||
return d.AccessToken
|
|
||||||
}
|
|
||||||
|
@ -105,7 +105,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
|
|||||||
"share_id": d.ShareId,
|
"share_id": d.ShareId,
|
||||||
}
|
}
|
||||||
var resp ShareLinkResp
|
var resp ShareLinkResp
|
||||||
_, err := d.request("https://api.alipan.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("https://api.aliyundrive.com/v2/file/get_share_link_download_url", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
|
req.SetHeader(CanaryHeaderKey, CanaryHeaderValue).SetBody(data).SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -113,7 +113,7 @@ func (d *AliyundriveShare) link(ctx context.Context, file model.Obj) (*model.Lin
|
|||||||
}
|
}
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"Referer": []string{"https://www.alipan.com/"},
|
"Referer": []string{"https://www.aliyundrive.com/"},
|
||||||
},
|
},
|
||||||
URL: resp.DownloadUrl,
|
URL: resp.DownloadUrl,
|
||||||
}, nil
|
}, nil
|
||||||
@ -128,9 +128,9 @@ func (d *AliyundriveShare) Other(ctx context.Context, args model.OtherArgs) (int
|
|||||||
}
|
}
|
||||||
switch args.Method {
|
switch args.Method {
|
||||||
case "doc_preview":
|
case "doc_preview":
|
||||||
url = "https://api.alipan.com/v2/file/get_office_preview_url"
|
url = "https://api.aliyundrive.com/v2/file/get_office_preview_url"
|
||||||
case "video_preview":
|
case "video_preview":
|
||||||
url = "https://api.alipan.com/v2/file/get_video_preview_play_info"
|
url = "https://api.aliyundrive.com/v2/file/get_video_preview_play_info"
|
||||||
data["category"] = "live_transcoding"
|
data["category"] = "live_transcoding"
|
||||||
default:
|
default:
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
|
@ -16,7 +16,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (d *AliyundriveShare) refreshToken() error {
|
func (d *AliyundriveShare) refreshToken() error {
|
||||||
url := "https://auth.alipan.com/v2/account/token"
|
url := "https://auth.aliyundrive.com/v2/account/token"
|
||||||
var resp base.TokenResp
|
var resp base.TokenResp
|
||||||
var e ErrorResp
|
var e ErrorResp
|
||||||
_, err := base.RestyClient.R().
|
_, err := base.RestyClient.R().
|
||||||
@ -47,7 +47,7 @@ func (d *AliyundriveShare) getShareToken() error {
|
|||||||
var resp ShareTokenResp
|
var resp ShareTokenResp
|
||||||
_, err := base.RestyClient.R().
|
_, err := base.RestyClient.R().
|
||||||
SetResult(&resp).SetError(&e).SetBody(data).
|
SetResult(&resp).SetError(&e).SetBody(data).
|
||||||
Post("https://api.alipan.com/v2/share_link/get_share_token")
|
Post("https://api.aliyundrive.com/v2/share_link/get_share_token")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -116,7 +116,7 @@ func (d *AliyundriveShare) getFiles(fileId string) ([]File, error) {
|
|||||||
SetHeader("x-share-token", d.ShareToken).
|
SetHeader("x-share-token", d.ShareToken).
|
||||||
SetHeader(CanaryHeaderKey, CanaryHeaderValue).
|
SetHeader(CanaryHeaderKey, CanaryHeaderValue).
|
||||||
SetResult(&resp).SetError(&e).SetBody(data).
|
SetResult(&resp).SetError(&e).SetBody(data).
|
||||||
Post("https://api.alipan.com/adrive/v3/file/list")
|
Post("https://api.aliyundrive.com/adrive/v3/file/list")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,6 @@ package drivers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115_open"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||||
@ -16,41 +14,26 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/github"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/github_releases"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
_ "github.com/alist-org/alist/v3/drivers/google_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/halalcloud"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ilanzou"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ipfs_api"
|
_ "github.com/alist-org/alist/v3/drivers/ipfs_api"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/kodbox"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/lanzou"
|
_ "github.com/alist-org/alist/v3/drivers/lanzou"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/lenovonas_share"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/local"
|
_ "github.com/alist-org/alist/v3/drivers/local"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
|
_ "github.com/alist-org/alist/v3/drivers/mediatrack"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/mega"
|
_ "github.com/alist-org/alist/v3/drivers/mega"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/misskey"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/mopan"
|
_ "github.com/alist-org/alist/v3/drivers/mopan"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/netease_music"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/onedrive"
|
_ "github.com/alist-org/alist/v3/drivers/onedrive"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
|
_ "github.com/alist-org/alist/v3/drivers/onedrive_app"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/onedrive_sharelink"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/pikpak"
|
_ "github.com/alist-org/alist/v3/drivers/pikpak"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
|
_ "github.com/alist-org/alist/v3/drivers/pikpak_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
|
_ "github.com/alist-org/alist/v3/drivers/quark_uc"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/quark_uc_tv"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/quqi"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/s3"
|
_ "github.com/alist-org/alist/v3/drivers/s3"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/seafile"
|
_ "github.com/alist-org/alist/v3/drivers/seafile"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/sftp"
|
_ "github.com/alist-org/alist/v3/drivers/sftp"
|
||||||
@ -58,13 +41,10 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/teambition"
|
_ "github.com/alist-org/alist/v3/drivers/teambition"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/terabox"
|
_ "github.com/alist-org/alist/v3/drivers/terabox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/thunder"
|
_ "github.com/alist-org/alist/v3/drivers/thunder"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/thunder_browser"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/thunderx"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/trainbit"
|
_ "github.com/alist-org/alist/v3/drivers/trainbit"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/url_tree"
|
_ "github.com/alist-org/alist/v3/drivers/url_tree"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/uss"
|
_ "github.com/alist-org/alist/v3/drivers/uss"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/vtencent"
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
||||||
|
@ -1,313 +0,0 @@
|
|||||||
package azure_blob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
)
|
|
||||||
// Azure Blob Storage based on the blob APIs
|
|
||||||
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
|
|
||||||
type AzureBlob struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
client *azblob.Client
|
|
||||||
containerClient *container.Client
|
|
||||||
config driver.Config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config returns the driver configuration.
|
|
||||||
func (d *AzureBlob) Config() driver.Config {
|
|
||||||
return d.config
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAddition returns additional settings specific to Azure Blob Storage.
|
|
||||||
func (d *AzureBlob) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes the Azure Blob Storage client using shared key authentication.
|
|
||||||
func (d *AzureBlob) Init(ctx context.Context) error {
|
|
||||||
// Validate the endpoint URL
|
|
||||||
accountName := extractAccountName(d.Addition.Endpoint)
|
|
||||||
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
|
|
||||||
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
|
|
||||||
}
|
|
||||||
|
|
||||||
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create credential: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if Endpoint is just account name
|
|
||||||
endpoint := d.Addition.Endpoint
|
|
||||||
if accountName == endpoint {
|
|
||||||
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
|
|
||||||
}
|
|
||||||
// Initialize Azure Blob client with retry policy
|
|
||||||
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
|
|
||||||
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
|
|
||||||
Retry: policy.RetryOptions{
|
|
||||||
MaxRetries: MaxRetries,
|
|
||||||
RetryDelay: RetryDelay,
|
|
||||||
},
|
|
||||||
}})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create client: %w", err)
|
|
||||||
}
|
|
||||||
d.client = client
|
|
||||||
|
|
||||||
// Ensure container exists or create it
|
|
||||||
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
|
|
||||||
if containerName == "" {
|
|
||||||
return fmt.Errorf("container name cannot be empty")
|
|
||||||
}
|
|
||||||
return d.createContainerIfNotExists(ctx, containerName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop releases resources associated with the Azure Blob client.
|
|
||||||
func (d *AzureBlob) Drop(ctx context.Context) error {
|
|
||||||
d.client = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List retrieves blobs and directories under the specified path.
|
|
||||||
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
prefix := ensureTrailingSlash(dir.GetPath())
|
|
||||||
|
|
||||||
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
|
|
||||||
Prefix: &prefix,
|
|
||||||
})
|
|
||||||
|
|
||||||
var objs []model.Obj
|
|
||||||
for pager.More() {
|
|
||||||
page, err := pager.NextPage(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process directories
|
|
||||||
for _, blobPrefix := range page.Segment.BlobPrefixes {
|
|
||||||
objs = append(objs, &model.Object{
|
|
||||||
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
|
|
||||||
Path: *blobPrefix.Name,
|
|
||||||
Modified: *blobPrefix.Properties.LastModified,
|
|
||||||
Ctime: *blobPrefix.Properties.CreationTime,
|
|
||||||
IsFolder: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process files
|
|
||||||
for _, blob := range page.Segment.BlobItems {
|
|
||||||
if strings.HasSuffix(*blob.Name, "/") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objs = append(objs, &model.Object{
|
|
||||||
Name: path.Base(*blob.Name),
|
|
||||||
Path: *blob.Name,
|
|
||||||
Size: *blob.Properties.ContentLength,
|
|
||||||
Modified: *blob.Properties.LastModified,
|
|
||||||
Ctime: *blob.Properties.CreationTime,
|
|
||||||
IsFolder: false,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return objs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Link generates a temporary SAS URL for accessing a blob.
|
|
||||||
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
blobClient := d.containerClient.NewBlobClient(file.GetPath())
|
|
||||||
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
|
||||||
|
|
||||||
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
|
|
||||||
}
|
|
||||||
return &model.Link{URL: sasURL}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
|
|
||||||
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
|
||||||
dirPath := path.Join(parentDir.GetPath(), dirName)
|
|
||||||
if err := d.mkDir(ctx, dirPath); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create directory marker: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Object{
|
|
||||||
Path: dirPath,
|
|
||||||
Name: dirName,
|
|
||||||
IsFolder: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move relocates an object (file or directory) to a new directory.
|
|
||||||
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
|
||||||
srcPath := srcObj.GetPath()
|
|
||||||
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
|
||||||
|
|
||||||
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
|
||||||
return nil, fmt.Errorf("move operation failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Object{
|
|
||||||
Path: dstPath,
|
|
||||||
Name: srcObj.GetName(),
|
|
||||||
Modified: time.Now(),
|
|
||||||
IsFolder: srcObj.IsDir(),
|
|
||||||
Size: srcObj.GetSize(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rename changes the name of an existing object.
|
|
||||||
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
|
||||||
srcPath := srcObj.GetPath()
|
|
||||||
dstPath := path.Join(path.Dir(srcPath), newName)
|
|
||||||
|
|
||||||
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
|
||||||
return nil, fmt.Errorf("rename operation failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Object{
|
|
||||||
Path: dstPath,
|
|
||||||
Name: newName,
|
|
||||||
Modified: time.Now(),
|
|
||||||
IsFolder: srcObj.IsDir(),
|
|
||||||
Size: srcObj.GetSize(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy duplicates an object (file or directory) to a specified destination directory.
|
|
||||||
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
|
||||||
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
|
||||||
|
|
||||||
// Handle directory copying using flat listing
|
|
||||||
if srcObj.IsDir() {
|
|
||||||
srcPrefix := srcObj.GetPath()
|
|
||||||
srcPrefix = ensureTrailingSlash(srcPrefix)
|
|
||||||
|
|
||||||
// Get all blobs under the source directory
|
|
||||||
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process each blob - copy to destination
|
|
||||||
for _, blob := range blobs {
|
|
||||||
// Skip the directory marker itself
|
|
||||||
if *blob.Name == srcPrefix {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate relative path from source
|
|
||||||
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
|
|
||||||
itemDstPath := path.Join(dstPath, relPath)
|
|
||||||
|
|
||||||
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
|
|
||||||
// Create directory marker at destination
|
|
||||||
err := d.mkDir(ctx, itemDstPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Copy the blob
|
|
||||||
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create directory marker at destination if needed
|
|
||||||
if len(blobs) == 0 {
|
|
||||||
err := d.mkDir(ctx, dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Object{
|
|
||||||
Path: dstPath,
|
|
||||||
Name: srcObj.GetName(),
|
|
||||||
Modified: time.Now(),
|
|
||||||
IsFolder: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy a single file
|
|
||||||
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to copy blob: %w", err)
|
|
||||||
}
|
|
||||||
return &model.Object{
|
|
||||||
Path: dstPath,
|
|
||||||
Name: srcObj.GetName(),
|
|
||||||
Size: srcObj.GetSize(),
|
|
||||||
Modified: time.Now(),
|
|
||||||
IsFolder: false,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove deletes a specified blob or recursively deletes a directory and its contents.
|
|
||||||
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
path := obj.GetPath()
|
|
||||||
|
|
||||||
// Handle recursive directory deletion
|
|
||||||
if obj.IsDir() {
|
|
||||||
return d.deleteFolder(ctx, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete single file
|
|
||||||
return d.deleteFile(ctx, path, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put uploads a file stream to Azure Blob Storage with progress tracking.
|
|
||||||
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
|
||||||
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
|
|
||||||
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
|
|
||||||
|
|
||||||
// Determine optimal upload options based on file size
|
|
||||||
options := optimizedUploadOptions(stream.GetSize())
|
|
||||||
|
|
||||||
// Track upload progress
|
|
||||||
progressTracker := &progressTracker{
|
|
||||||
total: stream.GetSize(),
|
|
||||||
updateProgress: up,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap stream to handle context cancellation and progress tracking
|
|
||||||
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
|
|
||||||
|
|
||||||
// Upload the stream to Azure Blob Storage
|
|
||||||
_, err := blobClient.UploadStream(ctx, limitedStream, options)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to upload file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Object{
|
|
||||||
Path: blobPath,
|
|
||||||
Name: stream.GetName(),
|
|
||||||
Size: stream.GetSize(),
|
|
||||||
Modified: time.Now(),
|
|
||||||
IsFolder: false,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following methods related to archive handling are not implemented yet.
|
|
||||||
// func (d *AzureBlob) GetArchiveMeta(...) {...}
|
|
||||||
// func (d *AzureBlob) ListArchive(...) {...}
|
|
||||||
// func (d *AzureBlob) Extract(...) {...}
|
|
||||||
// func (d *AzureBlob) ArchiveDecompress(...) {...}
|
|
||||||
|
|
||||||
// Ensure AzureBlob implements the driver.Driver interface.
|
|
||||||
var _ driver.Driver = (*AzureBlob)(nil)
|
|
@ -1,27 +0,0 @@
|
|||||||
package azure_blob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
|
|
||||||
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
|
|
||||||
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
|
|
||||||
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "Azure Blob Storage",
|
|
||||||
LocalSort: true,
|
|
||||||
CheckStatus: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &AzureBlob{
|
|
||||||
config: config,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,20 +0,0 @@
|
|||||||
package azure_blob
|
|
||||||
|
|
||||||
import "github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
|
|
||||||
// progressTracker is used to track upload progress
|
|
||||||
type progressTracker struct {
|
|
||||||
total int64
|
|
||||||
current int64
|
|
||||||
updateProgress driver.UpdateProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write implements io.Writer to track progress
|
|
||||||
func (pt *progressTracker) Write(p []byte) (n int, err error) {
|
|
||||||
n = len(p)
|
|
||||||
pt.current += int64(n)
|
|
||||||
if pt.updateProgress != nil && pt.total > 0 {
|
|
||||||
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
@ -1,401 +0,0 @@
|
|||||||
package azure_blob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"path"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
|
||||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// MaxRetries defines the maximum number of retry attempts for Azure operations
|
|
||||||
MaxRetries = 3
|
|
||||||
// RetryDelay defines the base delay between retries
|
|
||||||
RetryDelay = 3 * time.Second
|
|
||||||
// MaxBatchSize defines the maximum number of operations in a single batch request
|
|
||||||
MaxBatchSize = 128
|
|
||||||
)
|
|
||||||
|
|
||||||
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
|
|
||||||
func extractAccountName(endpoint string) string {
|
|
||||||
// 移除协议前缀
|
|
||||||
endpoint = strings.TrimPrefix(endpoint, "https://")
|
|
||||||
endpoint = strings.TrimPrefix(endpoint, "http://")
|
|
||||||
|
|
||||||
// 获取第一个点之前的部分(即账户名)
|
|
||||||
parts := strings.Split(endpoint, ".")
|
|
||||||
if len(parts) > 0 {
|
|
||||||
// to lower case
|
|
||||||
return strings.ToLower(parts[0])
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// isNotFoundError checks if the error is a "not found" type error
|
|
||||||
func isNotFoundError(err error) bool {
|
|
||||||
var storageErr *azcore.ResponseError
|
|
||||||
if errors.As(err, &storageErr) {
|
|
||||||
return storageErr.StatusCode == 404
|
|
||||||
}
|
|
||||||
// Fallback to string matching for backwards compatibility
|
|
||||||
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
|
|
||||||
}
|
|
||||||
|
|
||||||
// flattenListBlobs - Optimize blob listing to handle pagination better
|
|
||||||
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
|
|
||||||
// Standardize prefix format
|
|
||||||
prefix = ensureTrailingSlash(prefix)
|
|
||||||
|
|
||||||
var blobItems []container.BlobItem
|
|
||||||
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
|
||||||
Prefix: &prefix,
|
|
||||||
Include: container.ListBlobsInclude{
|
|
||||||
Metadata: true,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
for pager.More() {
|
|
||||||
page, err := pager.NextPage(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, blob := range page.Segment.BlobItems {
|
|
||||||
blobItems = append(blobItems, *blob)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return blobItems, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// batchDeleteBlobs - Simplify batch deletion logic
|
|
||||||
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
|
|
||||||
if len(blobPaths) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process in batches of MaxBatchSize
|
|
||||||
for i := 0; i < len(blobPaths); i += MaxBatchSize {
|
|
||||||
end := min(i+MaxBatchSize, len(blobPaths))
|
|
||||||
currentBatch := blobPaths[i:end]
|
|
||||||
|
|
||||||
// Create batch builder
|
|
||||||
batchBuilder, err := d.containerClient.NewBatchBuilder()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create batch builder: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add delete operations
|
|
||||||
for _, blobPath := range currentBatch {
|
|
||||||
if err := batchBuilder.Delete(blobPath, nil); err != nil {
|
|
||||||
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Submit batch
|
|
||||||
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("batch delete request failed: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check responses
|
|
||||||
for _, resp := range responses.Responses {
|
|
||||||
if resp.Error != nil && !isNotFoundError(resp.Error) {
|
|
||||||
// 获取 blob 名称以提供更好的错误信息
|
|
||||||
blobName := "unknown"
|
|
||||||
if resp.BlobName != nil {
|
|
||||||
blobName = *resp.BlobName
|
|
||||||
}
|
|
||||||
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteFolder recursively deletes a directory and all its contents
|
|
||||||
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
|
|
||||||
// Ensure directory path ends with slash
|
|
||||||
prefix = ensureTrailingSlash(prefix)
|
|
||||||
|
|
||||||
// Get all blobs under the directory using flattenListBlobs
|
|
||||||
globs, err := d.flattenListBlobs(ctx, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to list blobs for deletion: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are blobs in the directory, delete them
|
|
||||||
if len(globs) > 0 {
|
|
||||||
// 分离文件和目录标记
|
|
||||||
var filePaths []string
|
|
||||||
var dirPaths []string
|
|
||||||
|
|
||||||
for _, blob := range globs {
|
|
||||||
blobName := *blob.Name
|
|
||||||
if isDirectory(blob) {
|
|
||||||
// remove trailing slash for directory names
|
|
||||||
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
|
|
||||||
} else {
|
|
||||||
filePaths = append(filePaths, blobName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 先删除文件,再删除目录
|
|
||||||
if len(filePaths) > 0 {
|
|
||||||
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(dirPaths) > 0 {
|
|
||||||
// 按路径深度分组
|
|
||||||
depthMap := make(map[int][]string)
|
|
||||||
for _, dir := range dirPaths {
|
|
||||||
depth := strings.Count(dir, "/") // 计算目录深度
|
|
||||||
depthMap[depth] = append(depthMap[depth], dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 按深度从大到小排序
|
|
||||||
var depths []int
|
|
||||||
for depth := range depthMap {
|
|
||||||
depths = append(depths, depth)
|
|
||||||
}
|
|
||||||
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
|
|
||||||
|
|
||||||
// 按深度逐层批量删除
|
|
||||||
for _, depth := range depths {
|
|
||||||
batch := depthMap[depth]
|
|
||||||
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 最后删除目录标记本身
|
|
||||||
return d.deleteEmptyDirectory(ctx, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteFile deletes a single file or blob with better error handling
|
|
||||||
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
|
|
||||||
blobClient := d.containerClient.NewBlobClient(path)
|
|
||||||
_, err := blobClient.Delete(ctx, nil)
|
|
||||||
if err != nil && !(isDir && isNotFoundError(err)) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyFile copies a single blob from source path to destination path
|
|
||||||
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
|
|
||||||
srcBlob := d.containerClient.NewBlobClient(srcPath)
|
|
||||||
dstBlob := d.containerClient.NewBlobClient(dstPath)
|
|
||||||
|
|
||||||
// Use configured expiration time for SAS URL
|
|
||||||
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
|
||||||
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to generate source SAS URL: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
|
|
||||||
return err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// createContainerIfNotExists - Create container if not exists
|
|
||||||
// Clean up commented code
|
|
||||||
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
|
|
||||||
serviceClient := d.client.ServiceClient()
|
|
||||||
containerClient := serviceClient.NewContainerClient(containerName)
|
|
||||||
|
|
||||||
var options = service.CreateContainerOptions{}
|
|
||||||
_, err := containerClient.Create(ctx, &options)
|
|
||||||
if err != nil {
|
|
||||||
var responseErr *azcore.ResponseError
|
|
||||||
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
|
|
||||||
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
d.containerClient = containerClient
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
|
|
||||||
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
|
|
||||||
dirPath := ensureTrailingSlash(fullDirName)
|
|
||||||
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
|
|
||||||
|
|
||||||
// Upload an empty blob with metadata indicating it's a directory
|
|
||||||
_, err := blobClient.Upload(ctx, struct {
|
|
||||||
*bytes.Reader
|
|
||||||
io.Closer
|
|
||||||
}{
|
|
||||||
Reader: bytes.NewReader([]byte{}),
|
|
||||||
Closer: io.NopCloser(nil),
|
|
||||||
}, &blockblob.UploadOptions{
|
|
||||||
Metadata: map[string]*string{
|
|
||||||
"hdi_isfolder": to.Ptr("true"),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
|
|
||||||
func ensureTrailingSlash(path string) string {
|
|
||||||
if !strings.HasSuffix(path, "/") {
|
|
||||||
return path + "/"
|
|
||||||
}
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
// moveOrRename moves or renames blobs or directories from source to destination.
|
|
||||||
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
|
|
||||||
if isDir {
|
|
||||||
// Normalize paths for directory operations
|
|
||||||
srcPath = ensureTrailingSlash(srcPath)
|
|
||||||
dstPath = ensureTrailingSlash(dstPath)
|
|
||||||
|
|
||||||
// List all blobs under the source directory
|
|
||||||
blobs, err := d.flattenListBlobs(ctx, srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to list blobs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate and copy each blob to the destination
|
|
||||||
for _, item := range blobs {
|
|
||||||
srcBlobName := *item.Name
|
|
||||||
relPath := strings.TrimPrefix(srcBlobName, srcPath)
|
|
||||||
itemDstPath := path.Join(dstPath, relPath)
|
|
||||||
|
|
||||||
if isDirectory(item) {
|
|
||||||
// Create directory marker at destination
|
|
||||||
if err := d.mkDir(ctx, itemDstPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Copy file blob to destination
|
|
||||||
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle empty directories by creating a marker at destination
|
|
||||||
if len(blobs) == 0 {
|
|
||||||
if err := d.mkDir(ctx, dstPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete source directory and its contents
|
|
||||||
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
|
||||||
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
|
|
||||||
// Retry deletion once more and ignore the result
|
|
||||||
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
|
||||||
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Single file move or rename operation
|
|
||||||
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
|
|
||||||
return fmt.Errorf("failed to copy file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete source file after successful copy
|
|
||||||
if err := d.deleteFile(ctx, srcPath, false); err != nil {
|
|
||||||
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// optimizedUploadOptions returns the optimal upload options based on file size
|
|
||||||
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
|
|
||||||
options := &azblob.UploadStreamOptions{
|
|
||||||
BlockSize: 4 * 1024 * 1024, // 4MB block size
|
|
||||||
Concurrency: 4, // Default concurrency
|
|
||||||
}
|
|
||||||
|
|
||||||
// For large files, increase block size and concurrency
|
|
||||||
if fileSize > 256*1024*1024 { // For files larger than 256MB
|
|
||||||
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
|
|
||||||
options.Concurrency = 8 // More concurrent uploads
|
|
||||||
}
|
|
||||||
|
|
||||||
// For very large files (>1GB)
|
|
||||||
if fileSize > 1024*1024*1024 {
|
|
||||||
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
|
|
||||||
options.Concurrency = 16 // Higher concurrency
|
|
||||||
}
|
|
||||||
|
|
||||||
return options
|
|
||||||
}
|
|
||||||
|
|
||||||
// isDirectory determines if a blob represents a directory
|
|
||||||
// Checks multiple indicators: path suffix, metadata, and content type
|
|
||||||
func isDirectory(blob container.BlobItem) bool {
|
|
||||||
// Check path suffix
|
|
||||||
if strings.HasSuffix(*blob.Name, "/") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check metadata for directory marker
|
|
||||||
if blob.Metadata != nil {
|
|
||||||
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Azure Storage Explorer and other tools may use different metadata keys
|
|
||||||
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check content type (some tools mark directories with specific content types)
|
|
||||||
if blob.Properties != nil && blob.Properties.ContentType != nil {
|
|
||||||
contentType := strings.ToLower(*blob.Properties.ContentType)
|
|
||||||
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// deleteEmptyDirectory deletes a directory only if it's empty
|
|
||||||
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
|
|
||||||
// Directory is empty, delete the directory marker
|
|
||||||
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
|
|
||||||
_, err := blobClient.Delete(ctx, nil)
|
|
||||||
|
|
||||||
// Also try deleting with trailing slash (for different directory marker formats)
|
|
||||||
if err != nil && isNotFoundError(err) {
|
|
||||||
blobClient = d.containerClient.NewBlobClient(dirPath)
|
|
||||||
_, err = blobClient.Delete(ctx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore not found errors
|
|
||||||
if err != nil && isNotFoundError(err) {
|
|
||||||
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
@ -12,8 +12,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/sync/semaphore"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -78,8 +76,6 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
|
|||||||
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
if d.DownloadAPI == "crack" {
|
if d.DownloadAPI == "crack" {
|
||||||
return d.linkCrack(file, args)
|
return d.linkCrack(file, args)
|
||||||
} else if d.DownloadAPI == "crack_video" {
|
|
||||||
return d.linkCrackVideo(file, args)
|
|
||||||
}
|
}
|
||||||
return d.linkOfficial(file, args)
|
return d.linkOfficial(file, args)
|
||||||
}
|
}
|
||||||
@ -169,16 +165,9 @@ func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream mo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
|
||||||
newFile.Ctime = stream.CreateTime().Unix()
|
|
||||||
newFile.Mtime = stream.ModTime().Unix()
|
|
||||||
return fileToObj(newFile), nil
|
return fileToObj(newFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put
|
|
||||||
//
|
|
||||||
// **注意**: 截至 2024/04/20 百度云盘 api 接口返回的时间永远是当前时间,而不是文件时间。
|
|
||||||
// 而实际上云盘存储的时间是文件时间,所以此处需要覆盖时间,保证缓存与云盘的数据一致
|
|
||||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// rapid upload
|
// rapid upload
|
||||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||||
@ -191,7 +180,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
streamSize := stream.GetSize()
|
streamSize := stream.GetSize()
|
||||||
sliceSize := d.getSliceSize(streamSize)
|
sliceSize := d.getSliceSize()
|
||||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
||||||
lastBlockSize := streamSize % sliceSize
|
lastBlockSize := streamSize % sliceSize
|
||||||
if streamSize > 0 && lastBlockSize == 0 {
|
if streamSize > 0 && lastBlockSize == 0 {
|
||||||
@ -199,7 +188,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
//cal md5 for first 256k data
|
//cal md5 for first 256k data
|
||||||
const SliceSize int64 = 256 * utils.KB
|
const SliceSize int64 = 256 * 1024
|
||||||
// cal md5
|
// cal md5
|
||||||
blockList := make([]string, 0, count)
|
blockList := make([]string, 0, count)
|
||||||
byteSize := sliceSize
|
byteSize := sliceSize
|
||||||
@ -215,7 +204,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
if i == count {
|
if i == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -256,18 +245,17 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
log.Debugf("%+v", precreateResp)
|
log.Debugf("%+v", precreateResp)
|
||||||
if precreateResp.ReturnType == 2 {
|
if precreateResp.ReturnType == 2 {
|
||||||
//rapid upload, since got md5 match from baidu server
|
//rapid upload, since got md5 match from baidu server
|
||||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
if err != nil {
|
||||||
precreateResp.File.Ctime = ctime
|
return nil, err
|
||||||
precreateResp.File.Mtime = mtime
|
}
|
||||||
return fileToObj(precreateResp.File), nil
|
return fileToObj(precreateResp.File), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// step.2 上传分片
|
// step.2 上传分片
|
||||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
retry.Attempts(1),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
sem := semaphore.NewWeighted(3)
|
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
@ -278,10 +266,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sem.Release(1)
|
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"access_token": d.AccessToken,
|
"access_token": d.AccessToken,
|
||||||
@ -290,8 +274,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
"uploadid": precreateResp.Uploadid,
|
"uploadid": precreateResp.Uploadid,
|
||||||
"partseq": strconv.Itoa(partseq),
|
"partseq": strconv.Itoa(partseq),
|
||||||
}
|
}
|
||||||
err := d.uploadSlice(ctx, params, stream.GetName(),
|
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -315,9 +298,6 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// 修复时间,具体原因见 Put 方法注释的 **注意**
|
|
||||||
newFile.Ctime = ctime
|
|
||||||
newFile.Mtime = mtime
|
|
||||||
return fileToObj(newFile), nil
|
return fileToObj(newFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,18 +8,15 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
|
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
|
||||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
|
||||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
|
||||||
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type TokenErrResp struct {
|
type TokenErrResp struct {
|
||||||
@ -17,7 +16,7 @@ type TokenErrResp struct {
|
|||||||
type File struct {
|
type File struct {
|
||||||
//TkbindId int `json:"tkbind_id"`
|
//TkbindId int `json:"tkbind_id"`
|
||||||
//OwnerType int `json:"owner_type"`
|
//OwnerType int `json:"owner_type"`
|
||||||
Category int `json:"category"`
|
//Category int `json:"category"`
|
||||||
//RealCategory string `json:"real_category"`
|
//RealCategory string `json:"real_category"`
|
||||||
FsId int64 `json:"fs_id"`
|
FsId int64 `json:"fs_id"`
|
||||||
//OperId int `json:"oper_id"`
|
//OperId int `json:"oper_id"`
|
||||||
@ -56,11 +55,11 @@ func fileToObj(f File) *model.ObjThumb {
|
|||||||
if f.ServerFilename == "" {
|
if f.ServerFilename == "" {
|
||||||
f.ServerFilename = path.Base(f.Path)
|
f.ServerFilename = path.Base(f.Path)
|
||||||
}
|
}
|
||||||
if f.ServerCtime == 0 {
|
if f.LocalCtime == 0 {
|
||||||
f.ServerCtime = f.Ctime
|
f.LocalCtime = f.Ctime
|
||||||
}
|
}
|
||||||
if f.ServerMtime == 0 {
|
if f.LocalMtime == 0 {
|
||||||
f.ServerMtime = f.Mtime
|
f.LocalMtime = f.Mtime
|
||||||
}
|
}
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
@ -68,12 +67,12 @@ func fileToObj(f File) *model.ObjThumb {
|
|||||||
Path: f.Path,
|
Path: f.Path,
|
||||||
Name: f.ServerFilename,
|
Name: f.ServerFilename,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Modified: time.Unix(f.ServerMtime, 0),
|
Modified: time.Unix(f.LocalMtime, 0),
|
||||||
Ctime: time.Unix(f.ServerCtime, 0),
|
Ctime: time.Unix(f.LocalCtime, 0),
|
||||||
IsFolder: f.Isdir == 1,
|
IsFolder: f.Isdir == 1,
|
||||||
|
|
||||||
// 直接获取的MD5是错误的
|
// 直接获取的MD5是错误的
|
||||||
HashInfo: utils.NewHashInfo(utils.MD5, DecryptMd5(f.Md5)),
|
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -79,12 +76,6 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
|||||||
return retry.Unrecoverable(err2)
|
return retry.Unrecoverable(err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if 31023 == errno && d.DownloadAPI == "crack_video" {
|
|
||||||
result = res.Body()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||||
}
|
}
|
||||||
result = res.Body()
|
result = res.Body()
|
||||||
@ -137,21 +128,12 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
|
|||||||
if len(resp.List) == 0 {
|
if len(resp.List) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
res = append(res, resp.List...)
|
||||||
if d.OnlyListVideoFile {
|
|
||||||
for _, file := range resp.List {
|
|
||||||
if file.Isdir == 1 || file.Category == 1 {
|
|
||||||
res = append(res, file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
res = append(res, resp.List...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
var resp DownloadResp
|
var resp DownloadResp
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemetas",
|
"method": "filemetas",
|
||||||
@ -171,6 +153,8 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
|
|||||||
u = res.Header().Get("location")
|
u = res.Header().Get("location")
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
updateObjMd5(file, "pan.baidu.com", u)
|
||||||
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: u,
|
URL: u,
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
@ -179,7 +163,7 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
var resp DownloadResp2
|
var resp DownloadResp2
|
||||||
param := map[string]string{
|
param := map[string]string{
|
||||||
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
|
"target": fmt.Sprintf("[\"%s\"]", file.GetPath()),
|
||||||
@ -194,6 +178,8 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
|
||||||
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: resp.Info[0].Dlink,
|
URL: resp.Info[0].Dlink,
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
@ -202,34 +188,6 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
|
||||||
param := map[string]string{
|
|
||||||
"type": "VideoURL",
|
|
||||||
"path": fmt.Sprintf("%s", file.GetPath()),
|
|
||||||
"fs_id": file.GetID(),
|
|
||||||
"devuid": "0%1",
|
|
||||||
"clienttype": "1",
|
|
||||||
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
|
|
||||||
"nom3u8": "1",
|
|
||||||
"dlink": "1",
|
|
||||||
"media": "1",
|
|
||||||
"origin": "dlna",
|
|
||||||
}
|
|
||||||
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(param)
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.Link{
|
|
||||||
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
|
|
||||||
Header: http.Header{
|
|
||||||
"User-Agent": []string{d.CustomCrackUA},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemanager",
|
"method": "filemanager",
|
||||||
@ -271,74 +229,34 @@ func joinTime(form map[string]string, ctime, mtime int64) {
|
|||||||
form["local_ctime"] = strconv.FormatInt(ctime, 10)
|
form["local_ctime"] = strconv.FormatInt(ctime, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateObjMd5(obj model.Obj, userAgent, u string) {
|
||||||
|
object := model.GetRawObject(obj)
|
||||||
|
if object != nil {
|
||||||
|
req, _ := http.NewRequest(http.MethodHead, u, nil)
|
||||||
|
req.Header.Add("User-Agent", userAgent)
|
||||||
|
resp, _ := base.HttpClient.Do(req)
|
||||||
|
if resp != nil {
|
||||||
|
contentMd5 := resp.Header.Get("Content-Md5")
|
||||||
|
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultSliceSize int64 = 4 * utils.MB
|
DefaultSliceSize int64 = 4 * utils.MB
|
||||||
VipSliceSize int64 = 16 * utils.MB
|
VipSliceSize = 16 * utils.MB
|
||||||
SVipSliceSize int64 = 32 * utils.MB
|
SVipSliceSize = 32 * utils.MB
|
||||||
|
|
||||||
MaxSliceNum = 2048 // 文档写的是 1024/没写 ,但实际测试是 2048
|
|
||||||
SliceStep int64 = 1 * utils.MB
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
func (d *BaiduNetdisk) getSliceSize() int64 {
|
||||||
// 非会员固定为 4MB
|
|
||||||
if d.vipType == 0 {
|
|
||||||
if d.CustomUploadPartSize != 0 {
|
|
||||||
log.Warnf("CustomUploadPartSize is not supported for non-vip user, use DefaultSliceSize")
|
|
||||||
}
|
|
||||||
if filesize > MaxSliceNum*DefaultSliceSize {
|
|
||||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
|
||||||
}
|
|
||||||
|
|
||||||
return DefaultSliceSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.CustomUploadPartSize != 0 {
|
|
||||||
if d.CustomUploadPartSize < DefaultSliceSize {
|
|
||||||
log.Warnf("CustomUploadPartSize(%d) is less than DefaultSliceSize(%d), use DefaultSliceSize", d.CustomUploadPartSize, DefaultSliceSize)
|
|
||||||
return DefaultSliceSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.vipType == 1 && d.CustomUploadPartSize > VipSliceSize {
|
|
||||||
log.Warnf("CustomUploadPartSize(%d) is greater than VipSliceSize(%d), use VipSliceSize", d.CustomUploadPartSize, VipSliceSize)
|
|
||||||
return VipSliceSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.vipType == 2 && d.CustomUploadPartSize > SVipSliceSize {
|
|
||||||
log.Warnf("CustomUploadPartSize(%d) is greater than SVipSliceSize(%d), use SVipSliceSize", d.CustomUploadPartSize, SVipSliceSize)
|
|
||||||
return SVipSliceSize
|
|
||||||
}
|
|
||||||
|
|
||||||
return d.CustomUploadPartSize
|
|
||||||
}
|
|
||||||
|
|
||||||
maxSliceSize := DefaultSliceSize
|
|
||||||
|
|
||||||
switch d.vipType {
|
switch d.vipType {
|
||||||
case 1:
|
case 1:
|
||||||
maxSliceSize = VipSliceSize
|
return VipSliceSize
|
||||||
case 2:
|
case 2:
|
||||||
maxSliceSize = SVipSliceSize
|
return SVipSliceSize
|
||||||
|
default:
|
||||||
|
return DefaultSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload on low bandwidth
|
|
||||||
if d.LowBandwithUploadMode {
|
|
||||||
size := DefaultSliceSize
|
|
||||||
|
|
||||||
for size <= maxSliceSize {
|
|
||||||
if filesize <= MaxSliceNum*size {
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
size += SliceStep
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if filesize > MaxSliceNum*maxSliceSize {
|
|
||||||
log.Warnf("File size(%d) is too large, may cause upload failure", filesize)
|
|
||||||
}
|
|
||||||
|
|
||||||
return maxSliceSize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// func encodeURIComponent(str string) string {
|
// func encodeURIComponent(str string) string {
|
||||||
@ -346,40 +264,3 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 {
|
|||||||
// r = strings.ReplaceAll(r, "+", "%20")
|
// r = strings.ReplaceAll(r, "+", "%20")
|
||||||
// return r
|
// return r
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func DecryptMd5(encryptMd5 string) string {
|
|
||||||
if _, err := hex.DecodeString(encryptMd5); err == nil {
|
|
||||||
return encryptMd5
|
|
||||||
}
|
|
||||||
|
|
||||||
var out strings.Builder
|
|
||||||
out.Grow(len(encryptMd5))
|
|
||||||
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
|
|
||||||
if i == 9 {
|
|
||||||
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
|
|
||||||
} else {
|
|
||||||
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
|
|
||||||
}
|
|
||||||
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptMd5 = out.String()
|
|
||||||
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
|
|
||||||
}
|
|
||||||
|
|
||||||
func EncryptMd5(originalMd5 string) string {
|
|
||||||
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
|
|
||||||
|
|
||||||
var out strings.Builder
|
|
||||||
out.Grow(len(reversed))
|
|
||||||
for i, n := 0, int64(0); i < len(reversed); i++ {
|
|
||||||
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
|
|
||||||
n ^= int64(15 & i)
|
|
||||||
if i == 9 {
|
|
||||||
out.WriteRune(rune(n) + 'g')
|
|
||||||
} else {
|
|
||||||
out.WriteString(strconv.FormatInt(n, 16))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out.String()
|
|
||||||
}
|
|
||||||
|
@ -13,8 +13,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/sync/semaphore"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -29,10 +27,9 @@ type BaiduPhoto struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
// AccessToken string
|
AccessToken string
|
||||||
Uk int64
|
Uk int64
|
||||||
bdstoken string
|
root model.Obj
|
||||||
root model.Obj
|
|
||||||
|
|
||||||
uploadThread int
|
uploadThread int
|
||||||
}
|
}
|
||||||
@ -51,9 +48,9 @@ func (d *BaiduPhoto) Init(ctx context.Context) error {
|
|||||||
d.uploadThread, d.UploadThread = 3, "3"
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
}
|
}
|
||||||
|
|
||||||
// if err := d.refreshToken(); err != nil {
|
if err := d.refreshToken(); err != nil {
|
||||||
// return err
|
return err
|
||||||
// }
|
}
|
||||||
|
|
||||||
// root
|
// root
|
||||||
if d.AlbumID != "" {
|
if d.AlbumID != "" {
|
||||||
@ -76,10 +73,6 @@ func (d *BaiduPhoto) Init(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.bdstoken, err = d.getBDStoken()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64)
|
d.Uk, err = strconv.ParseInt(info.YouaID, 10, 64)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -89,7 +82,7 @@ func (d *BaiduPhoto) GetRoot(ctx context.Context) (model.Obj, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Drop(ctx context.Context) error {
|
func (d *BaiduPhoto) Drop(ctx context.Context) error {
|
||||||
// d.AccessToken = ""
|
d.AccessToken = ""
|
||||||
d.Uk = 0
|
d.Uk = 0
|
||||||
d.root = nil
|
d.root = nil
|
||||||
return nil
|
return nil
|
||||||
@ -144,18 +137,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
case *File:
|
case *File:
|
||||||
return d.linkFile(ctx, file, args)
|
return d.linkFile(ctx, file, args)
|
||||||
case *AlbumFile:
|
case *AlbumFile:
|
||||||
// 处理共享相册
|
f, err := d.CopyAlbumFile(ctx, file)
|
||||||
if d.Uk != file.Uk {
|
if err != nil {
|
||||||
// 有概率无法获取到链接
|
return nil, err
|
||||||
// return d.linkAlbum(ctx, file, args)
|
|
||||||
|
|
||||||
f, err := d.CopyAlbumFile(ctx, file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.linkFile(ctx, f, args)
|
|
||||||
}
|
}
|
||||||
return d.linkFile(ctx, &file.File, args)
|
return d.linkFile(ctx, f, args)
|
||||||
|
// 有概率无法获取到链接
|
||||||
|
//return d.linkAlbum(ctx, file, args)
|
||||||
}
|
}
|
||||||
return nil, errs.NotFile
|
return nil, errs.NotFile
|
||||||
}
|
}
|
||||||
@ -273,7 +261,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if i == count {
|
if i == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
_, err := utils.CopyWithBufferN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -298,12 +286,11 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 尝试获取之前的进度
|
// 尝试获取之前的进度
|
||||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, strconv.FormatInt(d.Uk, 10), contentMd5)
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
if !ok {
|
if !ok {
|
||||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetFormData(params)
|
r.SetFormData(params)
|
||||||
r.SetQueryParam("bdstoken", d.bdstoken)
|
|
||||||
}, &precreateResp)
|
}, &precreateResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -316,7 +303,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(time.Second),
|
retry.Delay(time.Second),
|
||||||
retry.DelayType(retry.BackOffDelay))
|
retry.DelayType(retry.BackOffDelay))
|
||||||
sem := semaphore.NewWeighted(3)
|
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
@ -328,22 +314,17 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer sem.Release(1)
|
|
||||||
uploadParams := map[string]string{
|
uploadParams := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
"path": params["path"],
|
"path": params["path"],
|
||||||
"partseq": fmt.Sprint(partseq),
|
"partseq": fmt.Sprint(partseq),
|
||||||
"uploadid": precreateResp.UploadID,
|
"uploadid": precreateResp.UploadID,
|
||||||
"app_id": "16051585",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetQueryParams(uploadParams)
|
r.SetQueryParams(uploadParams)
|
||||||
r.SetFileReader("file", stream.GetName(),
|
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
driver.NewLimitedUploadStream(ctx, io.NewSectionReader(tempFile, offset, byteSize)))
|
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -356,7 +337,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if err = threadG.Wait(); err != nil {
|
if err = threadG.Wait(); err != nil {
|
||||||
if errors.Is(err, context.Canceled) {
|
if errors.Is(err, context.Canceled) {
|
||||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
base.SaveUploadProgress(d, strconv.FormatInt(d.Uk, 10), contentMd5)
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -366,7 +347,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
r.SetFormData(params)
|
r.SetFormData(params)
|
||||||
r.SetQueryParam("bdstoken", d.bdstoken)
|
|
||||||
}, &precreateResp)
|
}, &precreateResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -6,14 +6,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
// RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
Cookie string `json:"cookie" required:"true"`
|
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
||||||
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
AlbumID string `json:"album_id"`
|
||||||
AlbumID string `json:"album_id"`
|
|
||||||
//AlbumPassword string `json:"album_password"`
|
//AlbumPassword string `json:"album_password"`
|
||||||
DeleteOrigin bool `json:"delete_origin"`
|
DeleteOrigin bool `json:"delete_origin"`
|
||||||
// ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
// ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ func (c *File) Thumb() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *File) GetHash() utils.HashInfo {
|
func (c *File) GetHash() utils.HashInfo {
|
||||||
return utils.NewHashInfo(utils.MD5, DecryptMd5(c.Md5))
|
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*相册部分*/
|
/*相册部分*/
|
||||||
|
@ -2,15 +2,13 @@ package baiduphoto
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
@ -23,10 +21,9 @@ const (
|
|||||||
FILE_API_URL_V2 = API_URL + "/file/v2"
|
FILE_API_URL_V2 = API_URL + "/file/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
req := client.R().
|
req := base.RestyClient.R().
|
||||||
// SetQueryParam("access_token", d.AccessToken)
|
SetQueryParam("access_token", d.AccessToken)
|
||||||
SetHeader("Cookie", d.Cookie)
|
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(req)
|
callback(req)
|
||||||
}
|
}
|
||||||
@ -48,10 +45,10 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
|
|||||||
return nil, fmt.Errorf("no shared albums found")
|
return nil, fmt.Errorf("no shared albums found")
|
||||||
case 50100:
|
case 50100:
|
||||||
return nil, fmt.Errorf("illegal title, only supports 50 characters")
|
return nil, fmt.Errorf("illegal title, only supports 50 characters")
|
||||||
// case -6:
|
case -6:
|
||||||
// if err = d.refreshToken(); err != nil {
|
if err = d.refreshToken(); err != nil {
|
||||||
// return nil, err
|
return nil, err
|
||||||
// }
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
||||||
}
|
}
|
||||||
@ -66,36 +63,36 @@ func (d *BaiduPhoto) Request(client *resty.Client, furl string, method string, c
|
|||||||
// return res.Body(), nil
|
// return res.Body(), nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
// func (d *BaiduPhoto) refreshToken() error {
|
func (d *BaiduPhoto) refreshToken() error {
|
||||||
// u := "https://openapi.baidu.com/oauth/2.0/token"
|
u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||||
// var resp base.TokenResp
|
var resp base.TokenResp
|
||||||
// var e TokenErrResp
|
var e TokenErrResp
|
||||||
// _, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{
|
_, err := base.RestyClient.R().SetResult(&resp).SetError(&e).SetQueryParams(map[string]string{
|
||||||
// "grant_type": "refresh_token",
|
"grant_type": "refresh_token",
|
||||||
// "refresh_token": d.RefreshToken,
|
"refresh_token": d.RefreshToken,
|
||||||
// "client_id": d.ClientID,
|
"client_id": d.ClientID,
|
||||||
// "client_secret": d.ClientSecret,
|
"client_secret": d.ClientSecret,
|
||||||
// }).Get(u)
|
}).Get(u)
|
||||||
// if err != nil {
|
if err != nil {
|
||||||
// return err
|
return err
|
||||||
// }
|
}
|
||||||
// if e.ErrorMsg != "" {
|
if e.ErrorMsg != "" {
|
||||||
// return &e
|
return &e
|
||||||
// }
|
}
|
||||||
// if resp.RefreshToken == "" {
|
if resp.RefreshToken == "" {
|
||||||
// return errs.EmptyToken
|
return errs.EmptyToken
|
||||||
// }
|
}
|
||||||
// d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken
|
d.AccessToken, d.RefreshToken = resp.AccessToken, resp.RefreshToken
|
||||||
// op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
// return nil
|
return nil
|
||||||
// }
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(base.RestyClient, furl, http.MethodGet, callback, resp)
|
return d.Request(furl, http.MethodGet, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(base.RestyClient, furl, http.MethodPost, callback, resp)
|
return d.Request(furl, http.MethodPost, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取所有文件
|
// 获取所有文件
|
||||||
@ -341,29 +338,24 @@ func (d *BaiduPhoto) linkAlbum(ctx context.Context, file *AlbumFile, args model.
|
|||||||
headers["X-Forwarded-For"] = args.IP
|
headers["X-Forwarded-For"] = args.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.Request(base.NoRedirectClient, ALBUM_API_URL+"/download", http.MethodHead, func(r *resty.Request) {
|
res, err := base.NoRedirectClient.R().
|
||||||
r.SetContext(ctx)
|
SetContext(ctx).
|
||||||
r.SetHeaders(headers)
|
SetHeaders(headers).
|
||||||
r.SetQueryParams(map[string]string{
|
SetQueryParams(map[string]string{
|
||||||
"fsid": fmt.Sprint(file.Fsid),
|
"access_token": d.AccessToken,
|
||||||
"album_id": file.AlbumID,
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
"tid": fmt.Sprint(file.Tid),
|
"album_id": file.AlbumID,
|
||||||
"uk": fmt.Sprint(file.Uk),
|
"tid": fmt.Sprint(file.Tid),
|
||||||
})
|
"uk": fmt.Sprint(file.Uk),
|
||||||
}, nil)
|
}).
|
||||||
|
Head(ALBUM_API_URL + "/download")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode() != 302 {
|
|
||||||
return nil, fmt.Errorf("not found 302 redirect")
|
|
||||||
}
|
|
||||||
|
|
||||||
location := resp.Header().Get("Location")
|
|
||||||
|
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
URL: location,
|
URL: res.Header().Get("location"),
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
"User-Agent": []string{headers["User-Agent"]},
|
"User-Agent": []string{headers["User-Agent"]},
|
||||||
"Referer": []string{"https://photo.baidu.com/"},
|
"Referer": []string{"https://photo.baidu.com/"},
|
||||||
@ -393,24 +385,10 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
|
|||||||
"fsid": fmt.Sprint(file.Fsid),
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
})
|
})
|
||||||
}, &downloadUrl)
|
}, &downloadUrl)
|
||||||
|
|
||||||
// resp, err := d.Request(base.NoRedirectClient, FILE_API_URL_V1+"/download", http.MethodHead, func(r *resty.Request) {
|
|
||||||
// r.SetContext(ctx)
|
|
||||||
// r.SetHeaders(headers)
|
|
||||||
// r.SetQueryParams(map[string]string{
|
|
||||||
// "fsid": fmt.Sprint(file.Fsid),
|
|
||||||
// })
|
|
||||||
// }, nil)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if resp.StatusCode() != 302 {
|
|
||||||
// return nil, fmt.Errorf("not found 302 redirect")
|
|
||||||
// }
|
|
||||||
|
|
||||||
// location := resp.Header().Get("Location")
|
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
URL: downloadUrl.Dlink,
|
URL: downloadUrl.Dlink,
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
@ -475,55 +453,3 @@ func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
|||||||
}
|
}
|
||||||
return &info, nil
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) getBDStoken() (string, error) {
|
|
||||||
var info struct {
|
|
||||||
Result struct {
|
|
||||||
Bdstoken string `json:"bdstoken"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
Uk int64 `json:"uk"`
|
|
||||||
} `json:"result"`
|
|
||||||
}
|
|
||||||
_, err := d.Get("https://pan.baidu.com/api/gettemplatevariable?fields=[%22bdstoken%22,%22token%22,%22uk%22]", nil, &info)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return info.Result.Bdstoken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func DecryptMd5(encryptMd5 string) string {
|
|
||||||
if _, err := hex.DecodeString(encryptMd5); err == nil {
|
|
||||||
return encryptMd5
|
|
||||||
}
|
|
||||||
|
|
||||||
var out strings.Builder
|
|
||||||
out.Grow(len(encryptMd5))
|
|
||||||
for i, n := 0, int64(0); i < len(encryptMd5); i++ {
|
|
||||||
if i == 9 {
|
|
||||||
n = int64(unicode.ToLower(rune(encryptMd5[i])) - 'g')
|
|
||||||
} else {
|
|
||||||
n, _ = strconv.ParseInt(encryptMd5[i:i+1], 16, 64)
|
|
||||||
}
|
|
||||||
out.WriteString(strconv.FormatInt(n^int64(15&i), 16))
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptMd5 = out.String()
|
|
||||||
return encryptMd5[8:16] + encryptMd5[:8] + encryptMd5[24:32] + encryptMd5[16:24]
|
|
||||||
}
|
|
||||||
|
|
||||||
func EncryptMd5(originalMd5 string) string {
|
|
||||||
reversed := originalMd5[8:16] + originalMd5[:8] + originalMd5[24:32] + originalMd5[16:24]
|
|
||||||
|
|
||||||
var out strings.Builder
|
|
||||||
out.Grow(len(reversed))
|
|
||||||
for i, n := 0, int64(0); i < len(reversed); i++ {
|
|
||||||
n, _ = strconv.ParseInt(reversed[i:i+1], 16, 64)
|
|
||||||
n ^= int64(15 & i)
|
|
||||||
if i == 9 {
|
|
||||||
out.WriteRune(rune(n) + 'g')
|
|
||||||
} else {
|
|
||||||
out.WriteString(strconv.FormatInt(n, 16))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out.String()
|
|
||||||
}
|
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/net"
|
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,15 +26,24 @@ func InitClient() {
|
|||||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
||||||
|
|
||||||
RestyClient = NewRestyClient()
|
RestyClient = NewRestyClient()
|
||||||
HttpClient = net.NewHttpClient()
|
HttpClient = NewHttpClient()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewRestyClient() *resty.Client {
|
func NewRestyClient() *resty.Client {
|
||||||
client := resty.New().
|
client := resty.New().
|
||||||
SetHeader("user-agent", UserAgent).
|
SetHeader("user-agent", UserAgent).
|
||||||
SetRetryCount(3).
|
SetRetryCount(3).
|
||||||
SetRetryResetReaders(true).
|
|
||||||
SetTimeout(DefaultTimeout).
|
SetTimeout(DefaultTimeout).
|
||||||
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewHttpClient() *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Timeout: time.Hour * 48,
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Proxy: http.ProxyFromEnvironment,
|
||||||
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,306 +0,0 @@
|
|||||||
package chaoxing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/cron"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"google.golang.org/appengine/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ChaoXing struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
cron *cron.Cron
|
|
||||||
config driver.Config
|
|
||||||
conf Conf
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Config() driver.Config {
|
|
||||||
return d.config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) refreshCookie() error {
|
|
||||||
cookie, err := d.Login()
|
|
||||||
if err != nil {
|
|
||||||
d.Status = err.Error()
|
|
||||||
op.MustSaveDriverStorage(d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
d.Addition.Cookie = cookie
|
|
||||||
op.MustSaveDriverStorage(d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Init(ctx context.Context) error {
|
|
||||||
err := d.refreshCookie()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf(ctx, err.Error())
|
|
||||||
}
|
|
||||||
d.cron = cron.NewCron(time.Hour * 12)
|
|
||||||
d.cron.Do(func() {
|
|
||||||
err = d.refreshCookie()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf(ctx, err.Error())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Drop(ctx context.Context) error {
|
|
||||||
if d.cron != nil {
|
|
||||||
d.cron.Stop()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
files, err := d.GetFiles(dir.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
|
||||||
return fileToObj(src), nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
var resp DownResp
|
|
||||||
ua := d.conf.ua
|
|
||||||
fileId := strings.Split(file.GetID(), "$")[1]
|
|
||||||
_, err := d.requestDownload("/screen/note_note/files/status/"+fileId, http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetHeader("User-Agent", ua)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
u := resp.Download
|
|
||||||
return &model.Link{
|
|
||||||
URL: u,
|
|
||||||
Header: http.Header{
|
|
||||||
"Cookie": []string{d.Cookie},
|
|
||||||
"Referer": []string{d.conf.referer},
|
|
||||||
"User-Agent": []string{ua},
|
|
||||||
},
|
|
||||||
Concurrency: 2,
|
|
||||||
PartSize: 10 * utils.MB,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"name": dirName,
|
|
||||||
"pid": parentDir.GetID(),
|
|
||||||
}
|
|
||||||
var resp ListFileResp
|
|
||||||
_, err := d.request("/pc/resource/addResourceFolder", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Result != 1 {
|
|
||||||
msg := fmt.Sprintf("error:%s", resp.Msg)
|
|
||||||
return errors.New(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"folderIds": srcObj.GetID(),
|
|
||||||
"targetId": dstDir.GetID(),
|
|
||||||
}
|
|
||||||
if !srcObj.IsDir() {
|
|
||||||
query = map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"recIds": strings.Split(srcObj.GetID(), "$")[0],
|
|
||||||
"targetId": dstDir.GetID(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var resp ListFileResp
|
|
||||||
_, err := d.request("/pc/resource/moveResource", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !resp.Status {
|
|
||||||
msg := fmt.Sprintf("error:%s", resp.Msg)
|
|
||||||
return errors.New(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"folderId": srcObj.GetID(),
|
|
||||||
"name": newName,
|
|
||||||
}
|
|
||||||
path := "/pc/resource/updateResourceFolderName"
|
|
||||||
if !srcObj.IsDir() {
|
|
||||||
// path = "/pc/resource/updateResourceFileName"
|
|
||||||
// query = map[string]string{
|
|
||||||
// "bbsid": d.Addition.Bbsid,
|
|
||||||
// "recIds": strings.Split(srcObj.GetID(), "$")[0],
|
|
||||||
// "name": newName,
|
|
||||||
// }
|
|
||||||
return errors.New("此网盘不支持修改文件名")
|
|
||||||
}
|
|
||||||
var resp ListFileResp
|
|
||||||
_, err := d.request(path, http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Result != 1 {
|
|
||||||
msg := fmt.Sprintf("error:%s", resp.Msg)
|
|
||||||
return errors.New(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
// TODO copy obj, optional
|
|
||||||
return errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"folderIds": obj.GetID(),
|
|
||||||
}
|
|
||||||
path := "/pc/resource/deleteResourceFolder"
|
|
||||||
var resp ListFileResp
|
|
||||||
if !obj.IsDir() {
|
|
||||||
path = "/pc/resource/deleteResourceFile"
|
|
||||||
query = map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"recIds": strings.Split(obj.GetID(), "$")[0],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err := d.request(path, http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Result != 1 {
|
|
||||||
msg := fmt.Sprintf("error:%s", resp.Msg)
|
|
||||||
return errors.New(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
var resp UploadDataRsp
|
|
||||||
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.Result != 1 {
|
|
||||||
return errors.New("get upload data error")
|
|
||||||
}
|
|
||||||
body := &bytes.Buffer{}
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
filePart, err := writer.CreateFormFile("file", file.GetName())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = utils.CopyWithBuffer(filePart, file)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writer.WriteField("_token", resp.Msg.Token)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Error writing param2 to request body:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = writer.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
r := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
|
||||||
Reader: &driver.SimpleReaderWithSize{
|
|
||||||
Reader: body,
|
|
||||||
Size: int64(body.Len()),
|
|
||||||
},
|
|
||||||
UpdateProgress: up,
|
|
||||||
})
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
||||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
|
||||||
resps, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resps.Body.Close()
|
|
||||||
bodys, err := io.ReadAll(resps.Body)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var fileRsp UploadFileDataRsp
|
|
||||||
err = json.Unmarshal(bodys, &fileRsp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if fileRsp.Msg != "success" {
|
|
||||||
return errors.New(fileRsp.Msg)
|
|
||||||
}
|
|
||||||
uploadDoneParam := UploadDoneParam{Key: fileRsp.ObjectID, Cataid: "100000019", Param: fileRsp.Data}
|
|
||||||
params, err := json.Marshal(uploadDoneParam)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"pid": dstDir.GetID(),
|
|
||||||
"type": "yunpan",
|
|
||||||
"params": url.QueryEscape("[" + string(params) + "]"),
|
|
||||||
}
|
|
||||||
var respd ListFileResp
|
|
||||||
_, err = d.request("/pc/resource/addResource", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &respd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if respd.Result != 1 {
|
|
||||||
msg := fmt.Sprintf("error:%v", resp.Msg)
|
|
||||||
return errors.New(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*ChaoXing)(nil)
|
|
@ -1,47 +0,0 @@
|
|||||||
package chaoxing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
// 此程序挂载的是超星小组网盘,需要代理才能使用;
|
|
||||||
// 登录超星后进入个人空间,进入小组,新建小组,点击进去。
|
|
||||||
// url中就有bbsid的参数,系统限制单文件大小2G,没有总容量限制
|
|
||||||
type Addition struct {
|
|
||||||
// 超星用户名及密码
|
|
||||||
UserName string `json:"user_name" required:"true"`
|
|
||||||
Password string `json:"password" required:"true"`
|
|
||||||
// 从自己新建的小组url里获取
|
|
||||||
Bbsid string `json:"bbsid" required:"true"`
|
|
||||||
driver.RootID
|
|
||||||
// 可不填,程序会自动登录获取
|
|
||||||
Cookie string `json:"cookie"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Conf struct {
|
|
||||||
ua string
|
|
||||||
referer string
|
|
||||||
api string
|
|
||||||
DowloadApi string
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &ChaoXing{
|
|
||||||
config: driver.Config{
|
|
||||||
Name: "ChaoXingGroupDrive",
|
|
||||||
OnlyProxy: true,
|
|
||||||
OnlyLocal: false,
|
|
||||||
DefaultRoot: "-1",
|
|
||||||
NoOverwriteUpload: true,
|
|
||||||
},
|
|
||||||
conf: Conf{
|
|
||||||
ua: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) quark-cloud-drive/2.5.20 Chrome/100.0.4896.160 Electron/18.3.5.4-b478491100 Safari/537.36 Channel/pckk_other_ch",
|
|
||||||
referer: "https://chaoxing.com/",
|
|
||||||
api: "https://groupweb.chaoxing.com",
|
|
||||||
DowloadApi: "https://noteyd.chaoxing.com",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,276 +0,0 @@
|
|||||||
package chaoxing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Resp struct {
|
|
||||||
Result int `json:"result"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UserAuth struct {
|
|
||||||
GroupAuth struct {
|
|
||||||
AddData int `json:"addData"`
|
|
||||||
AddDataFolder int `json:"addDataFolder"`
|
|
||||||
AddLebel int `json:"addLebel"`
|
|
||||||
AddManager int `json:"addManager"`
|
|
||||||
AddMem int `json:"addMem"`
|
|
||||||
AddTopicFolder int `json:"addTopicFolder"`
|
|
||||||
AnonymousAddReply int `json:"anonymousAddReply"`
|
|
||||||
AnonymousAddTopic int `json:"anonymousAddTopic"`
|
|
||||||
BatchOperation int `json:"batchOperation"`
|
|
||||||
DelData int `json:"delData"`
|
|
||||||
DelDataFolder int `json:"delDataFolder"`
|
|
||||||
DelMem int `json:"delMem"`
|
|
||||||
DelTopicFolder int `json:"delTopicFolder"`
|
|
||||||
Dismiss int `json:"dismiss"`
|
|
||||||
ExamEnc string `json:"examEnc"`
|
|
||||||
GroupChat int `json:"groupChat"`
|
|
||||||
IsShowCircleChatButton int `json:"isShowCircleChatButton"`
|
|
||||||
IsShowCircleCloudButton int `json:"isShowCircleCloudButton"`
|
|
||||||
IsShowCompanyButton int `json:"isShowCompanyButton"`
|
|
||||||
Join int `json:"join"`
|
|
||||||
MemberShowRankSet int `json:"memberShowRankSet"`
|
|
||||||
ModifyDataFolder int `json:"modifyDataFolder"`
|
|
||||||
ModifyExpose int `json:"modifyExpose"`
|
|
||||||
ModifyName int `json:"modifyName"`
|
|
||||||
ModifyShowPic int `json:"modifyShowPic"`
|
|
||||||
ModifyTopicFolder int `json:"modifyTopicFolder"`
|
|
||||||
ModifyVisibleState int `json:"modifyVisibleState"`
|
|
||||||
OnlyMgrScoreSet int `json:"onlyMgrScoreSet"`
|
|
||||||
Quit int `json:"quit"`
|
|
||||||
SendNotice int `json:"sendNotice"`
|
|
||||||
ShowActivityManage int `json:"showActivityManage"`
|
|
||||||
ShowActivitySet int `json:"showActivitySet"`
|
|
||||||
ShowAttentionSet int `json:"showAttentionSet"`
|
|
||||||
ShowAutoClearStatus int `json:"showAutoClearStatus"`
|
|
||||||
ShowBarcode int `json:"showBarcode"`
|
|
||||||
ShowChatRoomSet int `json:"showChatRoomSet"`
|
|
||||||
ShowCircleActivitySet int `json:"showCircleActivitySet"`
|
|
||||||
ShowCircleSet int `json:"showCircleSet"`
|
|
||||||
ShowCmem int `json:"showCmem"`
|
|
||||||
ShowDataFolder int `json:"showDataFolder"`
|
|
||||||
ShowDelReason int `json:"showDelReason"`
|
|
||||||
ShowForward int `json:"showForward"`
|
|
||||||
ShowGroupChat int `json:"showGroupChat"`
|
|
||||||
ShowGroupChatSet int `json:"showGroupChatSet"`
|
|
||||||
ShowGroupSquareSet int `json:"showGroupSquareSet"`
|
|
||||||
ShowLockAddSet int `json:"showLockAddSet"`
|
|
||||||
ShowManager int `json:"showManager"`
|
|
||||||
ShowManagerIdentitySet int `json:"showManagerIdentitySet"`
|
|
||||||
ShowNeedDelReasonSet int `json:"showNeedDelReasonSet"`
|
|
||||||
ShowNotice int `json:"showNotice"`
|
|
||||||
ShowOnlyManagerReplySet int `json:"showOnlyManagerReplySet"`
|
|
||||||
ShowRank int `json:"showRank"`
|
|
||||||
ShowRank2 int `json:"showRank2"`
|
|
||||||
ShowRecycleBin int `json:"showRecycleBin"`
|
|
||||||
ShowReplyByClass int `json:"showReplyByClass"`
|
|
||||||
ShowReplyNeedCheck int `json:"showReplyNeedCheck"`
|
|
||||||
ShowSignbanSet int `json:"showSignbanSet"`
|
|
||||||
ShowSpeechSet int `json:"showSpeechSet"`
|
|
||||||
ShowTopicCheck int `json:"showTopicCheck"`
|
|
||||||
ShowTopicNeedCheck int `json:"showTopicNeedCheck"`
|
|
||||||
ShowTransferSet int `json:"showTransferSet"`
|
|
||||||
} `json:"groupAuth"`
|
|
||||||
OperationAuth struct {
|
|
||||||
Add int `json:"add"`
|
|
||||||
AddTopicToFolder int `json:"addTopicToFolder"`
|
|
||||||
ChoiceSet int `json:"choiceSet"`
|
|
||||||
DelTopicFromFolder int `json:"delTopicFromFolder"`
|
|
||||||
Delete int `json:"delete"`
|
|
||||||
Reply int `json:"reply"`
|
|
||||||
ScoreSet int `json:"scoreSet"`
|
|
||||||
TopSet int `json:"topSet"`
|
|
||||||
Update int `json:"update"`
|
|
||||||
} `json:"operationAuth"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// 手机端学习通上传的文件的json内容(content字段)与网页端上传的有所不同
|
|
||||||
// 网页端json `"puid": 54321, "size": 12345`
|
|
||||||
// 手机端json `"puid": "54321". "size": "12345"`
|
|
||||||
type int_str int
|
|
||||||
|
|
||||||
// json 字符串数字和纯数字解析
|
|
||||||
func (ios *int_str) UnmarshalJSON(data []byte) error {
|
|
||||||
intValue, err := strconv.Atoi(string(bytes.Trim(data, "\"")))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*ios = int_str(intValue)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
Cataid int `json:"cataid"`
|
|
||||||
Cfid int `json:"cfid"`
|
|
||||||
Content struct {
|
|
||||||
Cfid int `json:"cfid"`
|
|
||||||
Pid int `json:"pid"`
|
|
||||||
FolderName string `json:"folderName"`
|
|
||||||
ShareType int `json:"shareType"`
|
|
||||||
Preview string `json:"preview"`
|
|
||||||
Filetype string `json:"filetype"`
|
|
||||||
PreviewURL string `json:"previewUrl"`
|
|
||||||
IsImg bool `json:"isImg"`
|
|
||||||
ParentPath string `json:"parentPath"`
|
|
||||||
Icon string `json:"icon"`
|
|
||||||
Suffix string `json:"suffix"`
|
|
||||||
Duration int `json:"duration"`
|
|
||||||
Pantype string `json:"pantype"`
|
|
||||||
Puid int_str `json:"puid"`
|
|
||||||
Filepath string `json:"filepath"`
|
|
||||||
Crc string `json:"crc"`
|
|
||||||
Isfile bool `json:"isfile"`
|
|
||||||
Residstr string `json:"residstr"`
|
|
||||||
ObjectID string `json:"objectId"`
|
|
||||||
Extinfo string `json:"extinfo"`
|
|
||||||
Thumbnail string `json:"thumbnail"`
|
|
||||||
Creator int `json:"creator"`
|
|
||||||
ResTypeValue int `json:"resTypeValue"`
|
|
||||||
UploadDateFormat string `json:"uploadDateFormat"`
|
|
||||||
DisableOpt bool `json:"disableOpt"`
|
|
||||||
DownPath string `json:"downPath"`
|
|
||||||
Sort int `json:"sort"`
|
|
||||||
Topsort int `json:"topsort"`
|
|
||||||
Restype string `json:"restype"`
|
|
||||||
Size int_str `json:"size"`
|
|
||||||
UploadDate int64 `json:"uploadDate"`
|
|
||||||
FileSize string `json:"fileSize"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
FileID string `json:"fileId"`
|
|
||||||
} `json:"content"`
|
|
||||||
CreatorID int `json:"creatorId"`
|
|
||||||
DesID string `json:"des_id"`
|
|
||||||
ID int `json:"id"`
|
|
||||||
Inserttime int64 `json:"inserttime"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
Norder int `json:"norder"`
|
|
||||||
OwnerID int `json:"ownerId"`
|
|
||||||
OwnerType int `json:"ownerType"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
Rid int `json:"rid"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
Topsign int `json:"topsign"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ListFileResp struct {
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Result int `json:"result"`
|
|
||||||
Status bool `json:"status"`
|
|
||||||
UserAuth UserAuth `json:"userAuth"`
|
|
||||||
List []File `json:"list"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type DownResp struct {
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Duration int `json:"duration"`
|
|
||||||
Download string `json:"download"`
|
|
||||||
FileStatus string `json:"fileStatus"`
|
|
||||||
URL string `json:"url"`
|
|
||||||
Status bool `json:"status"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadDataRsp struct {
|
|
||||||
Result int `json:"result"`
|
|
||||||
Msg struct {
|
|
||||||
Puid int `json:"puid"`
|
|
||||||
Token string `json:"token"`
|
|
||||||
} `json:"msg"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadFileDataRsp struct {
|
|
||||||
Result bool `json:"result"`
|
|
||||||
Msg string `json:"msg"`
|
|
||||||
Crc string `json:"crc"`
|
|
||||||
ObjectID string `json:"objectId"`
|
|
||||||
Resid int64 `json:"resid"`
|
|
||||||
Puid int `json:"puid"`
|
|
||||||
Data struct {
|
|
||||||
DisableOpt bool `json:"disableOpt"`
|
|
||||||
Resid int64 `json:"resid"`
|
|
||||||
Crc string `json:"crc"`
|
|
||||||
Puid int `json:"puid"`
|
|
||||||
Isfile bool `json:"isfile"`
|
|
||||||
Pantype string `json:"pantype"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
ObjectID string `json:"objectId"`
|
|
||||||
Restype string `json:"restype"`
|
|
||||||
UploadDate int64 `json:"uploadDate"`
|
|
||||||
ModifyDate int64 `json:"modifyDate"`
|
|
||||||
UploadDateFormat string `json:"uploadDateFormat"`
|
|
||||||
Residstr string `json:"residstr"`
|
|
||||||
Suffix string `json:"suffix"`
|
|
||||||
Preview string `json:"preview"`
|
|
||||||
Thumbnail string `json:"thumbnail"`
|
|
||||||
Creator int `json:"creator"`
|
|
||||||
Duration int `json:"duration"`
|
|
||||||
IsImg bool `json:"isImg"`
|
|
||||||
PreviewURL string `json:"previewUrl"`
|
|
||||||
Filetype string `json:"filetype"`
|
|
||||||
Filepath string `json:"filepath"`
|
|
||||||
Sort int `json:"sort"`
|
|
||||||
Topsort int `json:"topsort"`
|
|
||||||
ResTypeValue int `json:"resTypeValue"`
|
|
||||||
Extinfo string `json:"extinfo"`
|
|
||||||
} `json:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type UploadDoneParam struct {
|
|
||||||
Cataid string `json:"cataid"`
|
|
||||||
Key string `json:"key"`
|
|
||||||
Param struct {
|
|
||||||
DisableOpt bool `json:"disableOpt"`
|
|
||||||
Resid int64 `json:"resid"`
|
|
||||||
Crc string `json:"crc"`
|
|
||||||
Puid int `json:"puid"`
|
|
||||||
Isfile bool `json:"isfile"`
|
|
||||||
Pantype string `json:"pantype"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
ObjectID string `json:"objectId"`
|
|
||||||
Restype string `json:"restype"`
|
|
||||||
UploadDate int64 `json:"uploadDate"`
|
|
||||||
ModifyDate int64 `json:"modifyDate"`
|
|
||||||
UploadDateFormat string `json:"uploadDateFormat"`
|
|
||||||
Residstr string `json:"residstr"`
|
|
||||||
Suffix string `json:"suffix"`
|
|
||||||
Preview string `json:"preview"`
|
|
||||||
Thumbnail string `json:"thumbnail"`
|
|
||||||
Creator int `json:"creator"`
|
|
||||||
Duration int `json:"duration"`
|
|
||||||
IsImg bool `json:"isImg"`
|
|
||||||
PreviewURL string `json:"previewUrl"`
|
|
||||||
Filetype string `json:"filetype"`
|
|
||||||
Filepath string `json:"filepath"`
|
|
||||||
Sort int `json:"sort"`
|
|
||||||
Topsort int `json:"topsort"`
|
|
||||||
ResTypeValue int `json:"resTypeValue"`
|
|
||||||
Extinfo string `json:"extinfo"`
|
|
||||||
} `json:"param"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func fileToObj(f File) *model.Object {
|
|
||||||
if len(f.Content.FolderName) > 0 {
|
|
||||||
return &model.Object{
|
|
||||||
ID: fmt.Sprintf("%d", f.ID),
|
|
||||||
Name: f.Content.FolderName,
|
|
||||||
Size: 0,
|
|
||||||
Modified: time.UnixMilli(f.Inserttime),
|
|
||||||
IsFolder: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
paserTime := time.UnixMilli(f.Content.UploadDate)
|
|
||||||
return &model.Object{
|
|
||||||
ID: fmt.Sprintf("%d$%s", f.ID, f.Content.FileID),
|
|
||||||
Name: f.Content.Name,
|
|
||||||
Size: int64(f.Content.Size),
|
|
||||||
Modified: paserTime,
|
|
||||||
IsFolder: false,
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,183 +0,0 @@
|
|||||||
package chaoxing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/aes"
|
|
||||||
"crypto/cipher"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *ChaoXing) requestDownload(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
|
||||||
u := d.conf.DowloadApi + pathname
|
|
||||||
req := base.RestyClient.R()
|
|
||||||
req.SetHeaders(map[string]string{
|
|
||||||
"Cookie": d.Cookie,
|
|
||||||
"Accept": "application/json, text/plain, */*",
|
|
||||||
"Referer": d.conf.referer,
|
|
||||||
})
|
|
||||||
if callback != nil {
|
|
||||||
callback(req)
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
var e Resp
|
|
||||||
req.SetError(&e)
|
|
||||||
res, err := req.Execute(method, u)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Body(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
|
||||||
u := d.conf.api + pathname
|
|
||||||
if strings.Contains(pathname, "getUploadConfig") {
|
|
||||||
u = pathname
|
|
||||||
}
|
|
||||||
req := base.RestyClient.R()
|
|
||||||
req.SetHeaders(map[string]string{
|
|
||||||
"Cookie": d.Cookie,
|
|
||||||
"Accept": "application/json, text/plain, */*",
|
|
||||||
"Referer": d.conf.referer,
|
|
||||||
})
|
|
||||||
if callback != nil {
|
|
||||||
callback(req)
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
var e Resp
|
|
||||||
req.SetError(&e)
|
|
||||||
res, err := req.Execute(method, u)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Body(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) GetFiles(parent string) ([]File, error) {
|
|
||||||
files := make([]File, 0)
|
|
||||||
query := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"folderId": parent,
|
|
||||||
"recType": "1",
|
|
||||||
}
|
|
||||||
var resp ListFileResp
|
|
||||||
_, err := d.request("/pc/resource/getResourceList", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resp.Result != 1 {
|
|
||||||
msg := fmt.Sprintf("error code is:%d", resp.Result)
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
if len(resp.List) > 0 {
|
|
||||||
files = append(files, resp.List...)
|
|
||||||
}
|
|
||||||
querys := map[string]string{
|
|
||||||
"bbsid": d.Addition.Bbsid,
|
|
||||||
"folderId": parent,
|
|
||||||
"recType": "2",
|
|
||||||
}
|
|
||||||
var resps ListFileResp
|
|
||||||
_, err = d.request("/pc/resource/getResourceList", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(querys)
|
|
||||||
}, &resps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, file := range resps.List {
|
|
||||||
// 手机端超星上传的文件没有fileID字段,但ObjectID与fileID相同,可代替
|
|
||||||
if file.Content.FileID == "" {
|
|
||||||
file.Content.FileID = file.Content.ObjectID
|
|
||||||
}
|
|
||||||
files = append(files, file)
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func EncryptByAES(message, key string) (string, error) {
|
|
||||||
aesKey := []byte(key)
|
|
||||||
plainText := []byte(message)
|
|
||||||
block, err := aes.NewCipher(aesKey)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
iv := aesKey[:aes.BlockSize]
|
|
||||||
mode := cipher.NewCBCEncrypter(block, iv)
|
|
||||||
padding := aes.BlockSize - len(plainText)%aes.BlockSize
|
|
||||||
paddedText := append(plainText, byte(padding))
|
|
||||||
for i := 0; i < padding-1; i++ {
|
|
||||||
paddedText = append(paddedText, byte(padding))
|
|
||||||
}
|
|
||||||
ciphertext := make([]byte, len(paddedText))
|
|
||||||
mode.CryptBlocks(ciphertext, paddedText)
|
|
||||||
encrypted := base64.StdEncoding.EncodeToString(ciphertext)
|
|
||||||
return encrypted, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CookiesToString(cookies []*http.Cookie) string {
|
|
||||||
var cookieStr string
|
|
||||||
for _, cookie := range cookies {
|
|
||||||
cookieStr += cookie.Name + "=" + cookie.Value + "; "
|
|
||||||
}
|
|
||||||
if len(cookieStr) > 2 {
|
|
||||||
cookieStr = cookieStr[:len(cookieStr)-2]
|
|
||||||
}
|
|
||||||
return cookieStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *ChaoXing) Login() (string, error) {
|
|
||||||
transferKey := "u2oh6Vu^HWe4_AES"
|
|
||||||
body := &bytes.Buffer{}
|
|
||||||
writer := multipart.NewWriter(body)
|
|
||||||
uname, err := EncryptByAES(d.Addition.UserName, transferKey)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
password, err := EncryptByAES(d.Addition.Password, transferKey)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
err = writer.WriteField("uname", uname)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
err = writer.WriteField("password", password)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
err = writer.WriteField("t", "true")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
err = writer.Close()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// Create the request
|
|
||||||
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
|
||||||
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
|
|
||||||
resp, err := http.DefaultClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
return CookiesToString(resp.Cookies()), nil
|
|
||||||
|
|
||||||
}
|
|
@ -4,12 +4,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -72,9 +71,6 @@ func (d *Cloudreve) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(dUrl, "/api") {
|
|
||||||
dUrl = d.Address + dUrl
|
|
||||||
}
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: dUrl,
|
URL: dUrl,
|
||||||
}, nil
|
}, nil
|
||||||
@ -91,7 +87,7 @@ func (d *Cloudreve) MakeDir(ctx context.Context, parentDir model.Obj, dirName st
|
|||||||
func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Cloudreve) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
body := base.Json{
|
body := base.Json{
|
||||||
"action": "move",
|
"action": "move",
|
||||||
"src_dir": path.Dir(srcObj.GetPath()),
|
"src_dir": srcObj.GetPath(),
|
||||||
"dst": dstDir.GetPath(),
|
"dst": dstDir.GetPath(),
|
||||||
"src": convertSrc(srcObj),
|
"src": convertSrc(srcObj),
|
||||||
}
|
}
|
||||||
@ -113,7 +109,7 @@ func (d *Cloudreve) Rename(ctx context.Context, srcObj model.Obj, newName string
|
|||||||
|
|
||||||
func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Cloudreve) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
body := base.Json{
|
body := base.Json{
|
||||||
"src_dir": path.Dir(srcObj.GetPath()),
|
"src_dir": srcObj.GetPath(),
|
||||||
"dst": dstDir.GetPath(),
|
"dst": dstDir.GetPath(),
|
||||||
"src": convertSrc(srcObj),
|
"src": convertSrc(srcObj),
|
||||||
}
|
}
|
||||||
@ -134,8 +130,6 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
if io.ReadCloser(stream) == http.NoBody {
|
if io.ReadCloser(stream) == http.NoBody {
|
||||||
return d.create(ctx, dstDir, stream)
|
return d.create(ctx, dstDir, stream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取存储策略
|
|
||||||
var r DirectoryResp
|
var r DirectoryResp
|
||||||
err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r)
|
err := d.request(http.MethodGet, "/directory"+dstDir.GetPath(), nil, &r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -146,10 +140,8 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
"size": stream.GetSize(),
|
"size": stream.GetSize(),
|
||||||
"name": stream.GetName(),
|
"name": stream.GetName(),
|
||||||
"policy_id": r.Policy.Id,
|
"policy_id": r.Policy.Id,
|
||||||
"last_modified": stream.ModTime().UnixMilli(),
|
"last_modified": stream.ModTime().Unix(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取上传会话信息
|
|
||||||
var u UploadInfo
|
var u UploadInfo
|
||||||
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
err = d.request(http.MethodPut, "/file/upload", func(req *resty.Request) {
|
||||||
req.SetBody(uploadBody)
|
req.SetBody(uploadBody)
|
||||||
@ -157,26 +149,36 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
var chunkSize = u.ChunkSize
|
||||||
|
var buf []byte
|
||||||
|
var chunk int
|
||||||
|
for {
|
||||||
|
var n int
|
||||||
|
buf = make([]byte, chunkSize)
|
||||||
|
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
||||||
|
if err != nil && err != io.ErrUnexpectedEOF {
|
||||||
|
if err == io.EOF {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
buf = buf[:n]
|
||||||
|
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||||
|
req.SetHeader("Content-Type", "application/octet-stream")
|
||||||
|
req.SetHeader("Content-Length", strconv.Itoa(n))
|
||||||
|
req.SetBody(buf)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
chunk++
|
||||||
|
|
||||||
// 根据存储方式选择分片上传的方法
|
|
||||||
switch r.Policy.Type {
|
|
||||||
case "onedrive":
|
|
||||||
err = d.upOneDrive(ctx, stream, u, up)
|
|
||||||
case "s3":
|
|
||||||
err = d.upS3(ctx, stream, u, up)
|
|
||||||
case "remote": // 从机存储
|
|
||||||
err = d.upRemote(ctx, stream, u, up)
|
|
||||||
case "local": // 本机存储
|
|
||||||
err = d.upLocal(ctx, stream, u, up)
|
|
||||||
default:
|
|
||||||
err = errs.NotImplement
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
return err
|
||||||
// 删除失败的会话
|
|
||||||
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error {
|
func (d *Cloudreve) create(ctx context.Context, dir model.Obj, file model.Obj) error {
|
||||||
|
@ -21,12 +21,9 @@ type Policy struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type UploadInfo struct {
|
type UploadInfo struct {
|
||||||
SessionID string `json:"sessionID"`
|
SessionID string `json:"sessionID"`
|
||||||
ChunkSize int `json:"chunkSize"`
|
ChunkSize int `json:"chunkSize"`
|
||||||
Expires int `json:"expires"`
|
Expires int `json:"expires"`
|
||||||
UploadURLs []string `json:"uploadURLs"`
|
|
||||||
Credential string `json:"credential,omitempty"` // local
|
|
||||||
CompleteURL string `json:"completeURL,omitempty"` // s3
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DirectoryResp struct {
|
type DirectoryResp struct {
|
||||||
|
@ -1,23 +1,16 @@
|
|||||||
package cloudreve
|
package cloudreve
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
"github.com/alist-org/alist/v3/pkg/cookie"
|
"github.com/alist-org/alist/v3/pkg/cookie"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
json "github.com/json-iterator/go"
|
json "github.com/json-iterator/go"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
@ -27,20 +20,17 @@ import (
|
|||||||
|
|
||||||
const loginPath = "/user/session"
|
const loginPath = "/user/session"
|
||||||
|
|
||||||
func (d *Cloudreve) getUA() string {
|
|
||||||
if d.CustomUA != "" {
|
|
||||||
return d.CustomUA
|
|
||||||
}
|
|
||||||
return base.UserAgent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||||
u := d.Address + "/api/v3" + path
|
u := d.Address + "/api/v3" + path
|
||||||
|
ua := d.CustomUA
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"User-Agent": d.getUA(),
|
"User-Agent": ua,
|
||||||
})
|
})
|
||||||
|
|
||||||
var r Resp
|
var r Resp
|
||||||
@ -103,7 +93,7 @@ func (d *Cloudreve) login() error {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err.Error() != "CAPTCHA not match." {
|
if err != nil && err.Error() != "CAPTCHA not match." {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,11 +154,15 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
if !d.Addition.EnableThumbAndFolderSize {
|
if !d.Addition.EnableThumbAndFolderSize {
|
||||||
return model.Thumbnail{}, nil
|
return model.Thumbnail{}, nil
|
||||||
}
|
}
|
||||||
|
ua := d.CustomUA
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
req := base.NoRedirectClient.R()
|
req := base.NoRedirectClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||||
"User-Agent": d.getUA(),
|
"User-Agent": ua,
|
||||||
})
|
})
|
||||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -178,216 +172,3 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
Thumbnail: resp.Header().Get("Location"),
|
Thumbnail: resp.Header().Get("Location"),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
|
||||||
var finish int64 = 0
|
|
||||||
var chunk int = 0
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
for finish < stream.GetSize() {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
|
||||||
if left < DEFAULT {
|
|
||||||
byteSize = left
|
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
|
||||||
n, err := io.ReadFull(stream, byteData)
|
|
||||||
utils.Log.Debug(err, n)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
|
||||||
req.SetHeader("Content-Type", "application/octet-stream")
|
|
||||||
req.SetContentLength(true)
|
|
||||||
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
|
||||||
req.SetHeader("User-Agent", d.getUA())
|
|
||||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
finish += byteSize
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
chunk++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
|
||||||
uploadUrl := u.UploadURLs[0]
|
|
||||||
credential := u.Credential
|
|
||||||
var finish int64 = 0
|
|
||||||
var chunk int = 0
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
for finish < stream.GetSize() {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
utils.Log.Debugf("[Cloudreve-Remote] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
|
||||||
if left < DEFAULT {
|
|
||||||
byteSize = left
|
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
|
||||||
n, err := io.ReadFull(stream, byteData)
|
|
||||||
utils.Log.Debug(err, n)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("POST", uploadUrl+"?chunk="+strconv.Itoa(chunk),
|
|
||||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
req.ContentLength = byteSize
|
|
||||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
|
||||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
|
||||||
req.Header.Set("User-Agent", d.getUA())
|
|
||||||
finish += byteSize
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = res.Body.Close()
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
chunk++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
|
||||||
uploadUrl := u.UploadURLs[0]
|
|
||||||
var finish int64 = 0
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
for finish < stream.GetSize() {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
utils.Log.Debugf("[Cloudreve-OneDrive] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
|
||||||
if left < DEFAULT {
|
|
||||||
byteSize = left
|
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
|
||||||
n, err := io.ReadFull(stream, byteData)
|
|
||||||
utils.Log.Debug(err, n)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
req.ContentLength = byteSize
|
|
||||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
|
||||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
|
||||||
req.Header.Set("User-Agent", d.getUA())
|
|
||||||
finish += byteSize
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
|
||||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
|
||||||
data, _ := io.ReadAll(res.Body)
|
|
||||||
_ = res.Body.Close()
|
|
||||||
return errors.New(string(data))
|
|
||||||
}
|
|
||||||
_ = res.Body.Close()
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
}
|
|
||||||
// 上传成功发送回调请求
|
|
||||||
err := d.request(http.MethodPost, "/callback/onedrive/finish/"+u.SessionID, func(req *resty.Request) {
|
|
||||||
req.SetBody("{}")
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
|
||||||
var finish int64 = 0
|
|
||||||
var chunk int = 0
|
|
||||||
var etags []string
|
|
||||||
DEFAULT := int64(u.ChunkSize)
|
|
||||||
for finish < stream.GetSize() {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
|
|
||||||
var byteSize = DEFAULT
|
|
||||||
left := stream.GetSize() - finish
|
|
||||||
if left < DEFAULT {
|
|
||||||
byteSize = left
|
|
||||||
}
|
|
||||||
byteData := make([]byte, byteSize)
|
|
||||||
n, err := io.ReadFull(stream, byteData)
|
|
||||||
utils.Log.Debug(err, n)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
|
|
||||||
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
req.ContentLength = byteSize
|
|
||||||
finish += byteSize
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_ = res.Body.Close()
|
|
||||||
etags = append(etags, res.Header.Get("ETag"))
|
|
||||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
|
||||||
chunk++
|
|
||||||
}
|
|
||||||
|
|
||||||
// s3LikeFinishUpload
|
|
||||||
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
|
|
||||||
bodyBuilder := &strings.Builder{}
|
|
||||||
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
|
||||||
for i, etag := range etags {
|
|
||||||
bodyBuilder.WriteString(fmt.Sprintf(
|
|
||||||
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
|
||||||
i+1, // PartNumber 从 1 开始
|
|
||||||
etag,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
|
||||||
req, err := http.NewRequest(
|
|
||||||
"POST",
|
|
||||||
u.CompleteURL,
|
|
||||||
strings.NewReader(bodyBuilder.String()),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/xml")
|
|
||||||
req.Header.Set("User-Agent", d.getUA())
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode != http.StatusOK {
|
|
||||||
body, _ := io.ReadAll(res.Body)
|
|
||||||
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
// 上传成功发送回调请求
|
|
||||||
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
@ -3,6 +3,7 @@ package crypt
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -13,8 +14,6 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/fs"
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/sign"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
@ -125,9 +124,6 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
//filter illegal files
|
//filter illegal files
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objRes := model.Object{
|
objRes := model.Object{
|
||||||
Name: name,
|
Name: name,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
@ -149,9 +145,6 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
//filter illegal files
|
//filter illegal files
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !d.ShowHidden && strings.HasPrefix(name, ".") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
objRes := model.Object{
|
objRes := model.Object{
|
||||||
Name: name,
|
Name: name,
|
||||||
Size: size,
|
Size: size,
|
||||||
@ -161,11 +154,7 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
// discarding hash as it's encrypted
|
// discarding hash as it's encrypted
|
||||||
}
|
}
|
||||||
if d.Thumbnail && thumb == "" {
|
if d.Thumbnail && thumb == "" {
|
||||||
thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp")
|
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
|
||||||
thumb = fmt.Sprintf("%s/d%s?sign=%s",
|
|
||||||
common.GetApiUrl(common.GetHttpReq(ctx)),
|
|
||||||
utils.EncodePath(thumbPath, true),
|
|
||||||
sign.Sign(thumbPath))
|
|
||||||
}
|
}
|
||||||
if !ok && !d.Thumbnail {
|
if !ok && !d.Thumbnail {
|
||||||
result = append(result, &objRes)
|
result = append(result, &objRes)
|
||||||
@ -263,13 +252,19 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
}
|
}
|
||||||
rrc := remoteLink.RangeReadCloser
|
rrc := remoteLink.RangeReadCloser
|
||||||
if len(remoteLink.URL) > 0 {
|
if len(remoteLink.URL) > 0 {
|
||||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, remoteLink)
|
|
||||||
|
rangedRemoteLink := &model.Link{
|
||||||
|
URL: remoteLink.URL,
|
||||||
|
Header: remoteLink.Header,
|
||||||
|
}
|
||||||
|
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rrc = converted
|
rrc = converted
|
||||||
}
|
}
|
||||||
if rrc != nil {
|
if rrc != nil {
|
||||||
|
//remoteRangeReader, err :=
|
||||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||||
remoteClosers.AddClosers(rrc.GetClosers())
|
remoteClosers.AddClosers(rrc.GetClosers())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -282,6 +277,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
//remoteClosers.Add(remoteLink.MFile)
|
||||||
//keep reuse same MFile and close at last.
|
//keep reuse same MFile and close at last.
|
||||||
remoteClosers.Add(remoteLink.MFile)
|
remoteClosers.Add(remoteLink.MFile)
|
||||||
return io.NopCloser(remoteLink.MFile), nil
|
return io.NopCloser(remoteLink.MFile), nil
|
||||||
@ -300,6 +296,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
|
|
||||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||||
resultLink := &model.Link{
|
resultLink := &model.Link{
|
||||||
|
Header: remoteLink.Header,
|
||||||
RangeReadCloser: resultRangeReadCloser,
|
RangeReadCloser: resultRangeReadCloser,
|
||||||
Expiration: remoteLink.Expiration,
|
Expiration: remoteLink.Expiration,
|
||||||
}
|
}
|
||||||
@ -386,11 +383,10 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt
|
|||||||
Modified: streamer.ModTime(),
|
Modified: streamer.ModTime(),
|
||||||
IsFolder: streamer.IsDir(),
|
IsFolder: streamer.IsDir(),
|
||||||
},
|
},
|
||||||
Reader: wrappedIn,
|
Reader: wrappedIn,
|
||||||
Mimetype: "application/octet-stream",
|
Mimetype: "application/octet-stream",
|
||||||
WebPutAsTask: streamer.NeedStore(),
|
WebPutAsTask: streamer.NeedStore(),
|
||||||
ForceStreamUpload: true,
|
Exist: streamer.GetExist(),
|
||||||
Exist: streamer.GetExist(),
|
|
||||||
}
|
}
|
||||||
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -21,8 +21,6 @@ type Addition struct {
|
|||||||
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
||||||
|
|
||||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||||
|
|
||||||
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -1,174 +0,0 @@
|
|||||||
package doubao
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Doubao struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Config() driver.Config {
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Init(ctx context.Context) error {
|
|
||||||
// TODO login / refresh token
|
|
||||||
//op.MustSaveDriverStorage(d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Drop(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
var files []model.Obj
|
|
||||||
var r NodeInfoResp
|
|
||||||
_, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"node_id": dir.GetID(),
|
|
||||||
"need_full_path": false,
|
|
||||||
})
|
|
||||||
}, &r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, child := range r.Data.Children {
|
|
||||||
files = append(files, &Object{
|
|
||||||
Object: model.Object{
|
|
||||||
ID: child.ID,
|
|
||||||
Path: child.ParentID,
|
|
||||||
Name: child.Name,
|
|
||||||
Size: child.Size,
|
|
||||||
Modified: time.Unix(child.UpdateTime, 0),
|
|
||||||
Ctime: time.Unix(child.CreateTime, 0),
|
|
||||||
IsFolder: child.NodeType == 1,
|
|
||||||
},
|
|
||||||
Key: child.Key,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
if u, ok := file.(*Object); ok {
|
|
||||||
var r GetFileUrlResp
|
|
||||||
_, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"uris": []string{u.Key},
|
|
||||||
"type": "file",
|
|
||||||
})
|
|
||||||
}, &r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &model.Link{
|
|
||||||
URL: r.Data.FileUrls[0].MainURL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("can't convert obj to URL")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
var r UploadNodeResp
|
|
||||||
_, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"node_list": []base.Json{
|
|
||||||
{
|
|
||||||
"local_id": uuid.New().String(),
|
|
||||||
"name": dirName,
|
|
||||||
"parent_id": parentDir.GetID(),
|
|
||||||
"node_type": 1,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}, &r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
var r UploadNodeResp
|
|
||||||
_, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"node_list": []base.Json{
|
|
||||||
{"id": srcObj.GetID()},
|
|
||||||
},
|
|
||||||
"current_parent_id": srcObj.GetPath(),
|
|
||||||
"target_parent_id": dstDir.GetID(),
|
|
||||||
})
|
|
||||||
}, &r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
var r BaseResp
|
|
||||||
_, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"node_id": srcObj.GetID(),
|
|
||||||
"node_name": newName,
|
|
||||||
})
|
|
||||||
}, &r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
|
||||||
// TODO copy obj, optional
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
var r BaseResp
|
|
||||||
_, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
|
||||||
}, &r)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
|
||||||
// TODO upload file, optional
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
|
||||||
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
|
||||||
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
|
||||||
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
|
||||||
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
|
||||||
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
|
||||||
// return errs.NotImplement to use an internal archive tool
|
|
||||||
return nil, errs.NotImplement
|
|
||||||
}
|
|
||||||
|
|
||||||
//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
|
||||||
// return nil, errs.NotSupport
|
|
||||||
//}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Doubao)(nil)
|
|
@ -1,34 +0,0 @@
|
|||||||
package doubao
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
// Usually one of two
|
|
||||||
// driver.RootPath
|
|
||||||
driver.RootID
|
|
||||||
// define other
|
|
||||||
Cookie string `json:"cookie" type:"text"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "Doubao",
|
|
||||||
LocalSort: true,
|
|
||||||
OnlyLocal: false,
|
|
||||||
OnlyProxy: false,
|
|
||||||
NoCache: false,
|
|
||||||
NoUpload: true,
|
|
||||||
NeedMs: false,
|
|
||||||
DefaultRoot: "0",
|
|
||||||
CheckStatus: false,
|
|
||||||
Alert: "",
|
|
||||||
NoOverwriteUpload: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &Doubao{}
|
|
||||||
})
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user