Compare commits
193 Commits
v3.20.0
...
refactor/o
Author | SHA1 | Date | |
---|---|---|---|
3b90f591b5 | |||
5657b12b20 | |||
aba8bc0ec2 | |||
ce6e486666 | |||
4fc0a77565 | |||
aaffaee2b5 | |||
8ef8023c20 | |||
cdfbe6dcf2 | |||
94d028743a | |||
7f7335435c | |||
b9e192b29c | |||
69a98eaef6 | |||
1ebc96a4e5 | |||
66e2324cac | |||
7600dc28df | |||
8ef89ad0a4 | |||
35d672217d | |||
9fb9efb704 | |||
1a283bb272 | |||
1490da8b53 | |||
12dfb60a66 | |||
0380d7fff9 | |||
a008f54f4d | |||
0acb2d6073 | |||
ea9a3432ab | |||
7db3975b18 | |||
3d7f79cba8 | |||
9ff83a7950 | |||
e719a1a456 | |||
40a6fcbdff | |||
0fd51646f6 | |||
e8958019d9 | |||
e1ef690784 | |||
4024050dd0 | |||
eb918658f0 | |||
fb13dae136 | |||
6b67a36d63 | |||
a64dd4885e | |||
0f03a747d8 | |||
30977cdc6d | |||
106cf720c1 | |||
882112ed1c | |||
2a6ab77295 | |||
f0981a0c8d | |||
57eea4db17 | |||
234852ca61 | |||
809105b67e | |||
02e8c31506 | |||
19b39a5c04 | |||
28e2731594 | |||
b1a279cbcc | |||
352a6a741a | |||
109015567a | |||
9e0fa77ca2 | |||
335b11c698 | |||
8e433355e6 | |||
3504f017b9 | |||
cd2f8077fa | |||
d5b68a91d2 | |||
623c7dcea5 | |||
ecbd6d86cd | |||
7200344ace | |||
b313ac4daa | |||
f2f312b43a | |||
6f6d20e1ba | |||
3231c3d930 | |||
b604e21c69 | |||
3c66db9845 | |||
f6ab1f7f61 | |||
8e40465e86 | |||
37dffd0fce | |||
e7c0d94b44 | |||
8102142007 | |||
7c6dec5d47 | |||
dd10c0c5d0 | |||
34fadecc2c | |||
cb8867fcc1 | |||
092ed06833 | |||
6308f1c35d | |||
ce10c9f120 | |||
6c4736fc8f | |||
b301b791c7 | |||
19d34e2eb8 | |||
a3748af772 | |||
9b765ef696 | |||
8f493cccc4 | |||
31a033dff1 | |||
8c3337b88b | |||
7238243664 | |||
ba2b15ab24 | |||
28dc8822b7 | |||
358c5055e9 | |||
b6cd40e6d3 | |||
7d96d8070d | |||
d482fb5f26 | |||
60402ce1fc | |||
1e3950c847 | |||
ed550594da | |||
3bbae29f93 | |||
3b74f8cd9a | |||
e9bdb91e01 | |||
1aa024ed6b | |||
13e8d36e1a | |||
5606c23768 | |||
0b675d6c02 | |||
c1db3a36ad | |||
c59dbb4f9e | |||
df6b306fce | |||
9d45718e5f | |||
b91ed7a78a | |||
95386d777b | |||
635809c376 | |||
af6bb2a6aa | |||
a797494aa3 | |||
353dd7f796 | |||
1c00d64952 | |||
ff5cf3f4fa | |||
5b6b2f427a | |||
7877184bee | |||
e9cb37122e | |||
a425392a2b | |||
75acbcc115 | |||
30415cefbe | |||
1d06a0019f | |||
3686075a7f | |||
6c1c7e5cc0 | |||
c4f901b201 | |||
4b7acb1389 | |||
15b7169df4 | |||
861948bcf3 | |||
e5ffd39cf2 | |||
8b353da0d2 | |||
49bde82426 | |||
3e285aaec4 | |||
355fc576b1 | |||
a69d72aa20 | |||
e5d123c5d3 | |||
220eb33f88 | |||
5238850036 | |||
81ac963567 | |||
3c21a9a520 | |||
1dc1dd1f07 | |||
c9ea9bce81 | |||
9f08353d31 | |||
ce0c3626c2 | |||
06f46206db | |||
579f0c06af | |||
b12d92acc9 | |||
e700ce15e5 | |||
7dbef7d559 | |||
7e9cdd8b07 | |||
cee6bc6b5d | |||
cfd23c05b4 | |||
0c1acd72ca | |||
e2ca06dcca | |||
0828fd787d | |||
2e23ea68d4 | |||
4afa822bec | |||
f2ca9b40db | |||
4c2535cb22 | |||
d4ea8787c9 | |||
a4de04528a | |||
f60aae7499 | |||
de8f9e9eee | |||
cace9db12f | |||
ec2fb82836 | |||
afcfbf02ea | |||
cad04e07dd | |||
30f732138c | |||
04034bd03b | |||
6ec9a8d4c7 | |||
3f7882b467 | |||
a4511c1963 | |||
9d1f122717 | |||
5dd73d80d8 | |||
fce872bc1b | |||
df6c4c80c2 | |||
d2ff040cf8 | |||
a31af209cc | |||
3f8b3da52b | |||
6887f14ec6 | |||
3e0de5eaac | |||
61101a60f4 | |||
3529023bf9 | |||
d1d1a089a4 | |||
fa66358b1e | |||
2b533e4b91 | |||
d3530a8d80 | |||
6052eb3512 | |||
d17f7f7cad | |||
8bdc67ec3d | |||
4fabc27366 | |||
e4c7b0f17c |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -3,7 +3,7 @@
|
|||||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
patreon: # Replace with a single Patreon username
|
patreon: # Replace with a single Patreon username
|
||||||
open_collective: # Replace with a single Open Collective username
|
open_collective: # Replace with a single Open Collective username
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
ko_fi: xhofe # Replace with a single Ko-fi username
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -7,28 +7,44 @@ body:
|
|||||||
value: |
|
value: |
|
||||||
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
||||||
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
||||||
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Please make sure of the following things
|
label: Please make sure of the following things
|
||||||
description: You may select more than one, even select all.
|
description: |
|
||||||
|
You must check all the following, otherwise your issue may be closed directly. Or you can go to the [discussions](https://github.com/alist-org/alist/discussions)
|
||||||
|
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||||
options:
|
options:
|
||||||
- label: I have read the [documentation](https://alist.nn.ci).
|
- label: |
|
||||||
- label: I'm sure there are no duplicate issues or discussions.
|
I have read the [documentation](https://alist.nn.ci).
|
||||||
- label: I'm sure it's due to `alist` and not something else(such as `Dependencies` or `Operational`).
|
我已经阅读了[文档](https://alist.nn.ci)。
|
||||||
- label: I'm sure I'm using the latest version
|
- label: |
|
||||||
|
I'm sure there are no duplicate issues or discussions.
|
||||||
|
我确定没有重复的issue或讨论。
|
||||||
|
- label: |
|
||||||
|
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||||
|
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||||
|
- label: |
|
||||||
|
I'm sure this issue is not fixed in the latest version.
|
||||||
|
我确定这个问题在最新版本中没有被修复。
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: version
|
id: version
|
||||||
attributes:
|
attributes:
|
||||||
label: Alist Version / Alist 版本
|
label: AList Version / AList 版本
|
||||||
description: What version of our software are you running?
|
description: |
|
||||||
placeholder: v2.0.0
|
What version of our software are you running? Do not use `latest` or `master` as an answer.
|
||||||
|
您使用的是哪个版本的软件?请不要使用`latest`或`master`作为答案。
|
||||||
|
placeholder: v3.xx.xx
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
id: driver
|
id: driver
|
||||||
attributes:
|
attributes:
|
||||||
label: Driver used / 使用的存储驱动
|
label: Driver used / 使用的存储驱动
|
||||||
description: What storage driver are you using?
|
description: |
|
||||||
|
What storage driver are you using?
|
||||||
|
您使用的是哪个存储驱动?
|
||||||
placeholder: "for example: Onedrive"
|
placeholder: "for example: Onedrive"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@ -47,6 +63,15 @@ body:
|
|||||||
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: config
|
||||||
|
attributes:
|
||||||
|
label: Config / 配置
|
||||||
|
description: |
|
||||||
|
Please provide the configuration file of your `AList` application and take a screenshot of the relevant storage configuration. (hide privacy field)
|
||||||
|
请提供您的`AList`应用的配置文件,并截图相关存储配置。(隐藏隐私字段)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: logs
|
id: logs
|
||||||
attributes:
|
attributes:
|
||||||
|
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@ -6,6 +6,8 @@ daysUntilClose: 20
|
|||||||
exemptLabels:
|
exemptLabels:
|
||||||
- accepted
|
- accepted
|
||||||
- security
|
- security
|
||||||
|
- working
|
||||||
|
- pr-welcome
|
||||||
# Label to use when marking an issue as stale
|
# Label to use when marking an issue as stale
|
||||||
staleLabel: stale
|
staleLabel: stale
|
||||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||||
|
8
.github/workflows/auto_lang.yml
vendored
8
.github/workflows/auto_lang.yml
vendored
@ -11,6 +11,10 @@ on:
|
|||||||
- 'cmd/lang.go'
|
- 'cmd/lang.go'
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
auto_lang:
|
auto_lang:
|
||||||
strategy:
|
strategy:
|
||||||
@ -53,8 +57,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd alist-web
|
cd alist-web
|
||||||
git add .
|
git add .
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@ -6,6 +6,10 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
branches: [ 'main' ]
|
branches: [ 'main' ]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
strategy:
|
strategy:
|
||||||
|
8
.github/workflows/build_docker.yml
vendored
8
.github/workflows/build_docker.yml
vendored
@ -4,6 +4,10 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [ main ]
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build_docker:
|
build_docker:
|
||||||
name: Build docker
|
name: Build docker
|
||||||
@ -53,8 +57,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Commit
|
- name: Commit
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Noah Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||||
|
|
||||||
- name: Push commit
|
- name: Push commit
|
||||||
|
17
.github/workflows/issue_on_close.yml
vendored
Normal file
17
.github/workflows/issue_on_close.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
name: Remove working label when issue closed
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [closed]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
rm-working:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Remove working label
|
||||||
|
uses: actions-cool/issues-helper@v3
|
||||||
|
with:
|
||||||
|
actions: 'remove-labels'
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
issue-number: ${{ github.event.issue.number }}
|
||||||
|
labels: 'working,pr-welcome'
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
if: github.event.label.name == 'question'
|
if: github.event.label.name == 'question'
|
||||||
steps:
|
steps:
|
||||||
- name: Create comment
|
- name: Create comment
|
||||||
uses: actions-cool/issues-helper@v3.4.0
|
uses: actions-cool/issues-helper@v3.5.2
|
||||||
with:
|
with:
|
||||||
actions: 'create-comment'
|
actions: 'create-comment'
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@ -41,17 +41,11 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
bash build.sh release
|
bash build.sh release
|
||||||
|
|
||||||
- name: Release latest
|
|
||||||
uses: irongut/EditRelease@v1.2.0
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.MY_TOKEN }}
|
|
||||||
id: ${{ github.event.release.id }}
|
|
||||||
prerelease: false
|
|
||||||
|
|
||||||
- name: Upload assets
|
- name: Upload assets
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
|
prerelease: false
|
||||||
|
|
||||||
release_desktop:
|
release_desktop:
|
||||||
needs: release
|
needs: release
|
||||||
@ -68,8 +62,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Add tag
|
- name: Add tag
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||||
git tag -a $version -m "release $version"
|
git tag -a $version -m "release $version"
|
||||||
|
|
||||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -56,8 +56,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Add tag
|
- name: Add tag
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
||||||
|
|
||||||
- name: Push tags
|
- name: Push tags
|
||||||
|
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
name: release_linux_musl
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [ published ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release_linux_musl:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [ ubuntu-latest ]
|
||||||
|
go-version: [ '1.20' ]
|
||||||
|
name: Release
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
bash build.sh release linux_musl
|
||||||
|
|
||||||
|
- name: Upload assets
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
files: build/compress/*
|
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
name: release_linux_musl_arm
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [ published ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release_linux_musl_arm:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [ ubuntu-latest ]
|
||||||
|
go-version: [ '1.20' ]
|
||||||
|
name: Release
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
bash build.sh release linux_musl_arm
|
||||||
|
|
||||||
|
- name: Upload assets
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
files: build/compress/*
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -29,3 +29,5 @@ output/
|
|||||||
/daemon/
|
/daemon/
|
||||||
/public/dist/*
|
/public/dist/*
|
||||||
/!public/dist/README.md
|
/!public/dist/README.md
|
||||||
|
|
||||||
|
.VSCodeCounter
|
@ -7,7 +7,7 @@
|
|||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
- [git](https://git-scm.com)
|
- [git](https://git-scm.com)
|
||||||
- [Go 1.19+](https://golang.org/doc/install)
|
- [Go 1.20+](https://golang.org/doc/install)
|
||||||
- [gcc](https://gcc.gnu.org/)
|
- [gcc](https://gcc.gnu.org/)
|
||||||
- [nodejs](https://nodejs.org/)
|
- [nodejs](https://nodejs.org/)
|
||||||
|
|
||||||
|
16
README.md
Executable file → Normal file
16
README.md
Executable file → Normal file
@ -39,11 +39,11 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
- [x] Multiple storage
|
- [x] Multiple storages
|
||||||
- [x] Local storage
|
- [x] Local storage
|
||||||
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||||
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
@ -86,11 +86,12 @@ English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_O
|
|||||||
- [x] Protected routes (password protection and authentication)
|
- [x] Protected routes (password protection and authentication)
|
||||||
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
||||||
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
||||||
- [x] Cloudflare workers proxy
|
- [x] Cloudflare Workers proxy
|
||||||
- [x] File/Folder package download
|
- [x] File/Folder package download
|
||||||
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
||||||
- [x] Offline download
|
- [x] Offline download
|
||||||
- [x] Copy files between two storage
|
- [x] Copy files between two storage
|
||||||
|
- [x] Multi-thread downloading acceleration for single-thread download/stream
|
||||||
|
|
||||||
## Document
|
## Document
|
||||||
|
|
||||||
@ -102,7 +103,7 @@ English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_O
|
|||||||
|
|
||||||
## Discussion
|
## Discussion
|
||||||
|
|
||||||
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature request only.**
|
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
|
||||||
|
|
||||||
## Sponsor
|
## Sponsor
|
||||||
|
|
||||||
@ -112,22 +113,21 @@ https://alist.nn.ci/guide/sponsor.html
|
|||||||
### Special sponsors
|
### Special sponsors
|
||||||
|
|
||||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
|
||||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
|
||||||
Thanks goes to these wonderful people:
|
Thanks goes to these wonderful people:
|
||||||
|
|
||||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
The `AList` is open-source software licensed under the AGPL-3.0 license.
|
The `AList` is open-source software licensed under the AGPL-3.0 license.
|
||||||
|
|
||||||
## Disclaimer
|
## Disclaimer
|
||||||
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
|
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning Golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
|
||||||
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
|
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
|
||||||
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
|
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
|
||||||
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;
|
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[English](./README.md) | 中文 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
[English](./README.md) | 中文 | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
## 功能
|
## 功能
|
||||||
|
|
||||||
@ -90,6 +90,7 @@
|
|||||||
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
||||||
- [x] 离线下载
|
- [x] 离线下载
|
||||||
- [x] 跨存储复制文件
|
- [x] 跨存储复制文件
|
||||||
|
- [x] 单线程下载/串流的多线程下载加速
|
||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
@ -110,15 +111,14 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
|
|||||||
### 特别赞助
|
### 特别赞助
|
||||||
|
|
||||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
||||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
|
||||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## 贡献者
|
## 贡献者
|
||||||
|
|
||||||
Thanks goes to these wonderful people:
|
Thanks goes to these wonderful people:
|
||||||
|
|
||||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||||
|
|
||||||
## 许可
|
## 许可
|
||||||
|
|
||||||
|
138
README_ja.md
Normal file
138
README_ja.md
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
|
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||||
|
<div>
|
||||||
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
|
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||||
|
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||||
|
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
|
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||||
|
</a>
|
||||||
|
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||||
|
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<a href="https://github.com/Xhofe/alist/discussions">
|
||||||
|
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||||
|
</a>
|
||||||
|
<a href="https://discord.gg/F4ymsH4xv2">
|
||||||
|
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
|
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
|
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||||
|
</a>
|
||||||
|
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||||
|
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
[English](./README.md) | [中文](./README_cn.md) | 日本語 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
|
## 特徴
|
||||||
|
|
||||||
|
- [x] マルチストレージ
|
||||||
|
- [x] ローカルストレージ
|
||||||
|
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||||
|
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
|
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||||
|
- [x] [GoogleDrive](https://drive.google.com/)
|
||||||
|
- [x] [123pan](https://www.123pan.com/)
|
||||||
|
- [x] FTP / SFTP
|
||||||
|
- [x] [PikPak](https://www.mypikpak.com/)
|
||||||
|
- [x] [S3](https://aws.amazon.com/s3/)
|
||||||
|
- [x] [Seafile](https://seafile.com/)
|
||||||
|
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||||
|
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||||
|
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||||
|
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||||
|
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||||
|
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||||
|
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||||
|
- [x] [Terabox](https://www.terabox.com/main)
|
||||||
|
- [x] [UC](https://drive.uc.cn)
|
||||||
|
- [x] [Quark](https://pan.quark.cn)
|
||||||
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
|
- [x] [Lanzou](https://www.lanzou.com/)
|
||||||
|
- [x] [Aliyundrive share](https://www.aliyundrive.com/)
|
||||||
|
- [x] [Google photo](https://photos.google.com/)
|
||||||
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
|
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||||
|
- [x] SMB
|
||||||
|
- [x] [115](https://115.com/)
|
||||||
|
- [X] Cloudreve
|
||||||
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
|
- [x] デプロイが簡単で、すぐに使える
|
||||||
|
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
||||||
|
- [x] ギャラリーモードでの画像プレビュー
|
||||||
|
- [x] ビデオとオーディオのプレビュー、歌詞と字幕のサポート
|
||||||
|
- [x] Office ドキュメントのプレビュー (docx, pptx, xlsx, ...)
|
||||||
|
- [x] `README.md` のプレビューレンダリング
|
||||||
|
- [x] ファイルのパーマリンクコピーと直接ダウンロード
|
||||||
|
- [x] ダークモード
|
||||||
|
- [x] 国際化
|
||||||
|
- [x] 保護されたルート (パスワード保護と認証)
|
||||||
|
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
||||||
|
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||||
|
- [x] Cloudflare ワーカープロキシ
|
||||||
|
- [x] ファイル/フォルダパッケージのダウンロード
|
||||||
|
- [x] ウェブアップロード(訪問者にアップロードを許可できる), 削除, mkdir, 名前変更, 移動, コピー
|
||||||
|
- [x] オフラインダウンロード
|
||||||
|
- [x] 二つのストレージ間でファイルをコピー
|
||||||
|
- [x] シングルスレッドのダウンロード/ストリーム向けのマルチスレッド ダウンロード アクセラレーション
|
||||||
|
|
||||||
|
## ドキュメント
|
||||||
|
|
||||||
|
<https://alist.nn.ci/>
|
||||||
|
|
||||||
|
## デモ
|
||||||
|
|
||||||
|
<https://al.nn.ci>
|
||||||
|
|
||||||
|
## ディスカッション
|
||||||
|
|
||||||
|
一般的なご質問は[ディスカッションフォーラム](https://github.com/Xhofe/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
||||||
|
|
||||||
|
## スポンサー
|
||||||
|
|
||||||
|
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||||
|
https://alist.nn.ci/guide/sponsor.html
|
||||||
|
|
||||||
|
### スペシャルスポンサー
|
||||||
|
|
||||||
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
|
## コントリビューター
|
||||||
|
|
||||||
|
これらの素晴らしい人々に感謝します:
|
||||||
|
|
||||||
|
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||||
|
|
||||||
|
## ライセンス
|
||||||
|
|
||||||
|
`AList` は AGPL-3.0 ライセンスの下でライセンスされたオープンソースソフトウェアです。
|
||||||
|
|
||||||
|
## 免責事項
|
||||||
|
- このプログラムはフリーでオープンソースのプロジェクトです。ネットワークディスク上でファイルを共有するように設計されており、golang のダウンロードや学習に便利です。利用にあたっては関連法規を遵守し、悪用しないようお願いします;
|
||||||
|
- このプログラムは、公式インターフェースの動作を破壊することなく、公式 sdk/インターフェースを呼び出すことで実装されています;
|
||||||
|
- このプログラムは、302リダイレクト/トラフィック転送のみを行い、いかなるユーザーデータも傍受、保存、改ざんしません;
|
||||||
|
- このプログラムを使用する前に、アカウントの禁止、ダウンロード速度の制限など、対応するリスクを理解し、負担する必要があります;
|
||||||
|
- もし侵害があれば、[メール](mailto:i@nn.ci)で私に連絡してください。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
71
build.sh
71
build.sh
@ -89,18 +89,31 @@ BuildDocker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
BuildRelease() {
|
BuildRelease() {
|
||||||
|
rm -rf .git/
|
||||||
|
mkdir -p "build"
|
||||||
|
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||||
|
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||||
|
# why? Because some target platforms seem to have issues with upx compression
|
||||||
|
upx -9 ./alist-linux-amd64
|
||||||
|
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||||
|
upx -9 ./alist-windows-amd64-upx.exe
|
||||||
|
mv alist-* build
|
||||||
|
}
|
||||||
|
|
||||||
|
BuildReleaseLinuxMusl() {
|
||||||
rm -rf .git/
|
rm -rf .git/
|
||||||
mkdir -p "build"
|
mkdir -p "build"
|
||||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||||
BASE="https://musl.nn.ci/"
|
BASE="https://musl.nn.ci/"
|
||||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross arm-linux-musleabihf-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||||
for i in "${FILES[@]}"; do
|
for i in "${FILES[@]}"; do
|
||||||
url="${BASE}${i}.tgz"
|
url="${BASE}${i}.tgz"
|
||||||
curl -L -o "${i}.tgz" "${url}"
|
curl -L -o "${i}.tgz" "${url}"
|
||||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||||
|
rm -f "${i}.tgz"
|
||||||
done
|
done
|
||||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-arm linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc arm-linux-musleabihf-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||||
for i in "${!OS_ARCHES[@]}"; do
|
for i in "${!OS_ARCHES[@]}"; do
|
||||||
os_arch=${OS_ARCHES[$i]}
|
os_arch=${OS_ARCHES[$i]}
|
||||||
cgo_cc=${CGO_ARGS[$i]}
|
cgo_cc=${CGO_ARGS[$i]}
|
||||||
@ -111,13 +124,39 @@ BuildRelease() {
|
|||||||
export CGO_ENABLED=1
|
export CGO_ENABLED=1
|
||||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||||
done
|
done
|
||||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
}
|
||||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
|
||||||
# why? Because some target platforms seem to have issues with upx compression
|
BuildReleaseLinuxMuslArm() {
|
||||||
upx -9 ./alist-linux-amd64
|
rm -rf .git/
|
||||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
mkdir -p "build"
|
||||||
upx -9 ./alist-windows-amd64-upx.exe
|
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||||
mv alist-* build
|
BASE="https://musl.nn.ci/"
|
||||||
|
# FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armeb-linux-musleabi-cross armeb-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||||
|
FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||||
|
for i in "${FILES[@]}"; do
|
||||||
|
url="${BASE}${i}.tgz"
|
||||||
|
curl -L -o "${i}.tgz" "${url}"
|
||||||
|
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||||
|
rm -f "${i}.tgz"
|
||||||
|
done
|
||||||
|
# OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armeb linux-musleabihf-armeb linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||||
|
# CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armeb-linux-musleabi-gcc armeb-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||||
|
# GOARMS=('' '' '' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||||
|
OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||||
|
CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||||
|
GOARMS=('' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||||
|
for i in "${!OS_ARCHES[@]}"; do
|
||||||
|
os_arch=${OS_ARCHES[$i]}
|
||||||
|
cgo_cc=${CGO_ARGS[$i]}
|
||||||
|
arm=${GOARMS[$i]}
|
||||||
|
echo building for ${os_arch}
|
||||||
|
export GOOS=linux
|
||||||
|
export GOARCH=arm
|
||||||
|
export CC=${cgo_cc}
|
||||||
|
export CGO_ENABLED=1
|
||||||
|
export GOARM=${arm}
|
||||||
|
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||||
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
MakeRelease() {
|
MakeRelease() {
|
||||||
@ -139,8 +178,8 @@ MakeRelease() {
|
|||||||
rm -f alist.exe
|
rm -f alist.exe
|
||||||
done
|
done
|
||||||
cd compress
|
cd compress
|
||||||
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
find . -type f -print0 | xargs -0 md5sum >"$1"
|
||||||
cat md5.txt
|
cat "$1"
|
||||||
cd ../..
|
cd ../..
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,9 +194,15 @@ elif [ "$1" = "release" ]; then
|
|||||||
FetchWebRelease
|
FetchWebRelease
|
||||||
if [ "$2" = "docker" ]; then
|
if [ "$2" = "docker" ]; then
|
||||||
BuildDocker
|
BuildDocker
|
||||||
|
elif [ "$2" = "linux_musl_arm" ]; then
|
||||||
|
BuildReleaseLinuxMuslArm
|
||||||
|
MakeRelease "md5-linux-musl-arm.txt"
|
||||||
|
elif [ "$2" = "linux_musl" ]; then
|
||||||
|
BuildReleaseLinuxMusl
|
||||||
|
MakeRelease "md5-linux-musl.txt"
|
||||||
else
|
else
|
||||||
BuildRelease
|
BuildRelease
|
||||||
MakeRelease
|
MakeRelease "md5.txt"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo -e "Parameter error"
|
echo -e "Parameter error"
|
||||||
|
72
cmd/admin.go
72
cmd/admin.go
@ -4,30 +4,90 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PasswordCmd represents the password command
|
// AdminCmd represents the password command
|
||||||
var PasswordCmd = &cobra.Command{
|
var AdminCmd = &cobra.Command{
|
||||||
Use: "admin",
|
Use: "admin",
|
||||||
Aliases: []string{"password"},
|
Aliases: []string{"password"},
|
||||||
Short: "Show admin user's info",
|
Short: "Show admin user's info and some operations about admin user's password",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
Init()
|
Init()
|
||||||
|
defer Release()
|
||||||
admin, err := op.GetAdmin()
|
admin, err := op.GetAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
utils.Log.Infof("admin user's info: \nusername: %s\npassword: %s", admin.Username, admin.Password)
|
utils.Log.Infof("Admin user's username: %s", admin.Username)
|
||||||
|
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
|
||||||
|
utils.Log.Infof("You can reset the password with a random string by running [alist admin random]")
|
||||||
|
utils.Log.Infof("You can also set a new password by running [alist admin set NEW_PASSWORD]")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
var RandomPasswordCmd = &cobra.Command{
|
||||||
RootCmd.AddCommand(PasswordCmd)
|
Use: "random",
|
||||||
|
Short: "Reset admin user's password to a random string",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
newPwd := random.String(8)
|
||||||
|
setAdminPassword(newPwd)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var SetPasswordCmd = &cobra.Command{
|
||||||
|
Use: "set",
|
||||||
|
Short: "Set admin user's password",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
utils.Log.Errorf("Please enter the new password")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
setAdminPassword(args[0])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var ShowTokenCmd = &cobra.Command{
|
||||||
|
Use: "token",
|
||||||
|
Short: "Show admin token",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
Init()
|
||||||
|
defer Release()
|
||||||
|
token := setting.GetStr(conf.Token)
|
||||||
|
utils.Log.Infof("Admin token: %s", token)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func setAdminPassword(pwd string) {
|
||||||
|
Init()
|
||||||
|
defer Release()
|
||||||
|
admin, err := op.GetAdmin()
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
admin.SetPassword(pwd)
|
||||||
|
if err := op.UpdateUser(admin); err != nil {
|
||||||
|
utils.Log.Errorf("failed update admin user: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.Log.Infof("admin user has been updated:")
|
||||||
|
utils.Log.Infof("username: %s", admin.Username)
|
||||||
|
utils.Log.Infof("password: %s", pwd)
|
||||||
|
DelAdminCacheOnline()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RootCmd.AddCommand(AdminCmd)
|
||||||
|
AdminCmd.AddCommand(RandomPasswordCmd)
|
||||||
|
AdminCmd.AddCommand(SetPasswordCmd)
|
||||||
|
AdminCmd.AddCommand(ShowTokenCmd)
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
// Cobra supports Persistent Flags which will work for this command
|
||||||
|
@ -15,6 +15,7 @@ var Cancel2FACmd = &cobra.Command{
|
|||||||
Short: "Delete 2FA of admin user",
|
Short: "Delete 2FA of admin user",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
Init()
|
Init()
|
||||||
|
defer Release()
|
||||||
admin, err := op.GetAdmin()
|
admin, err := op.GetAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Errorf("failed to get admin user: %+v", err)
|
utils.Log.Errorf("failed to get admin user: %+v", err)
|
||||||
@ -24,6 +25,7 @@ var Cancel2FACmd = &cobra.Command{
|
|||||||
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
utils.Log.Info("2FA canceled")
|
utils.Log.Info("2FA canceled")
|
||||||
|
DelAdminCacheOnline()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||||
|
"github.com/alist-org/alist/v3/internal/db"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
@ -19,6 +20,10 @@ func Init() {
|
|||||||
bootstrap.InitIndex()
|
bootstrap.InitIndex()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Release() {
|
||||||
|
db.Close()
|
||||||
|
}
|
||||||
|
|
||||||
var pid = -1
|
var pid = -1
|
||||||
var pidFile string
|
var pidFile string
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers"
|
||||||
|
_ "github.com/alist-org/alist/v3/internal/offline_download"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -7,12 +7,12 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/cmd/flags"
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
_ "github.com/alist-org/alist/v3/drivers"
|
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
@ -34,8 +34,7 @@ the address is defined in config file`,
|
|||||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||||
}
|
}
|
||||||
bootstrap.InitAria2()
|
bootstrap.InitOfflineDownloadTools()
|
||||||
bootstrap.InitQbittorrent()
|
|
||||||
bootstrap.LoadStorages()
|
bootstrap.LoadStorages()
|
||||||
if !flags.Debug && !flags.Dev {
|
if !flags.Debug && !flags.Dev {
|
||||||
gin.SetMode(gin.ReleaseMode)
|
gin.SetMode(gin.ReleaseMode)
|
||||||
@ -56,7 +55,7 @@ the address is defined in config file`,
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
if conf.Conf.Scheme.HttpsPort != -1 {
|
if conf.Conf.Scheme.HttpsPort != -1 {
|
||||||
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpPort)
|
httpsBase := fmt.Sprintf("%s:%d", conf.Conf.Scheme.Address, conf.Conf.Scheme.HttpsPort)
|
||||||
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
utils.Log.Infof("start HTTPS server @ %s", httpsBase)
|
||||||
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
|
||||||
go func() {
|
go func() {
|
||||||
@ -74,6 +73,16 @@ the address is defined in config file`,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||||
}
|
}
|
||||||
|
// set socket file permission
|
||||||
|
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||||
|
} else {
|
||||||
|
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
err = unixSrv.Serve(listener)
|
err = unixSrv.Serve(listener)
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||||
@ -89,7 +98,7 @@ the address is defined in config file`,
|
|||||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||||
<-quit
|
<-quit
|
||||||
utils.Log.Println("Shutdown server...")
|
utils.Log.Println("Shutdown server...")
|
||||||
|
Release()
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
153
cmd/storage.go
153
cmd/storage.go
@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/db"
|
"github.com/alist-org/alist/v3/internal/db"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/charmbracelet/bubbles/table"
|
||||||
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
|
"github.com/charmbracelet/lipgloss"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,31 +21,136 @@ var storageCmd = &cobra.Command{
|
|||||||
Short: "Manage storage",
|
Short: "Manage storage",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
var disableStorageCmd = &cobra.Command{
|
||||||
var mountPath string
|
Use: "disable",
|
||||||
var disable = &cobra.Command{
|
Short: "Disable a storage",
|
||||||
Use: "disable",
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
Short: "Disable a storage",
|
if len(args) < 1 {
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
utils.Log.Errorf("mount path is required")
|
||||||
Init()
|
return
|
||||||
storage, err := db.GetStorageByMountPath(mountPath)
|
}
|
||||||
|
mountPath := args[0]
|
||||||
|
Init()
|
||||||
|
defer Release()
|
||||||
|
storage, err := db.GetStorageByMountPath(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||||
|
} else {
|
||||||
|
storage.Disabled = true
|
||||||
|
err = db.UpdateStorage(storage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
storage.Disabled = true
|
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||||
err = db.UpdateStorage(storage)
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
|
||||||
} else {
|
|
||||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
}
|
},
|
||||||
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
|
}
|
||||||
RootCmd.AddCommand(storageCmd)
|
|
||||||
storageCmd.AddCommand(disable)
|
|
||||||
|
|
||||||
|
var baseStyle = lipgloss.NewStyle().
|
||||||
|
BorderStyle(lipgloss.NormalBorder()).
|
||||||
|
BorderForeground(lipgloss.Color("240"))
|
||||||
|
|
||||||
|
type model struct {
|
||||||
|
table table.Model
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m model) Init() tea.Cmd { return nil }
|
||||||
|
|
||||||
|
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||||
|
var cmd tea.Cmd
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case tea.KeyMsg:
|
||||||
|
switch msg.String() {
|
||||||
|
case "esc":
|
||||||
|
if m.table.Focused() {
|
||||||
|
m.table.Blur()
|
||||||
|
} else {
|
||||||
|
m.table.Focus()
|
||||||
|
}
|
||||||
|
case "q", "ctrl+c":
|
||||||
|
return m, tea.Quit
|
||||||
|
//case "enter":
|
||||||
|
// return m, tea.Batch(
|
||||||
|
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
|
||||||
|
// )
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.table, cmd = m.table.Update(msg)
|
||||||
|
return m, cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m model) View() string {
|
||||||
|
return baseStyle.Render(m.table.View()) + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageTableHeight int
|
||||||
|
var listStorageCmd = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List all storages",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
Init()
|
||||||
|
defer Release()
|
||||||
|
storages, _, err := db.GetStorages(1, -1)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||||
|
} else {
|
||||||
|
utils.Log.Infof("Found %d storages", len(storages))
|
||||||
|
columns := []table.Column{
|
||||||
|
{Title: "ID", Width: 4},
|
||||||
|
{Title: "Driver", Width: 16},
|
||||||
|
{Title: "Mount Path", Width: 30},
|
||||||
|
{Title: "Enabled", Width: 7},
|
||||||
|
}
|
||||||
|
|
||||||
|
var rows []table.Row
|
||||||
|
for i := range storages {
|
||||||
|
storage := storages[i]
|
||||||
|
enabled := "true"
|
||||||
|
if storage.Disabled {
|
||||||
|
enabled = "false"
|
||||||
|
}
|
||||||
|
rows = append(rows, table.Row{
|
||||||
|
strconv.Itoa(int(storage.ID)),
|
||||||
|
storage.Driver,
|
||||||
|
storage.MountPath,
|
||||||
|
enabled,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
t := table.New(
|
||||||
|
table.WithColumns(columns),
|
||||||
|
table.WithRows(rows),
|
||||||
|
table.WithFocused(true),
|
||||||
|
table.WithHeight(storageTableHeight),
|
||||||
|
)
|
||||||
|
|
||||||
|
s := table.DefaultStyles()
|
||||||
|
s.Header = s.Header.
|
||||||
|
BorderStyle(lipgloss.NormalBorder()).
|
||||||
|
BorderForeground(lipgloss.Color("240")).
|
||||||
|
BorderBottom(true).
|
||||||
|
Bold(false)
|
||||||
|
s.Selected = s.Selected.
|
||||||
|
Foreground(lipgloss.Color("229")).
|
||||||
|
Background(lipgloss.Color("57")).
|
||||||
|
Bold(false)
|
||||||
|
t.SetStyles(s)
|
||||||
|
|
||||||
|
m := model{t}
|
||||||
|
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||||
|
utils.Log.Errorf("failed to run program: %+v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
RootCmd.AddCommand(storageCmd)
|
||||||
|
storageCmd.AddCommand(disableStorageCmd)
|
||||||
|
storageCmd.AddCommand(listStorageCmd)
|
||||||
|
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
// Cobra supports Persistent Flags which will work for this command
|
||||||
|
52
cmd/user.go
Normal file
52
cmd/user.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DelAdminCacheOnline() {
|
||||||
|
admin, err := op.GetAdmin()
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("[del_admin_cache] get admin error: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
DelUserCacheOnline(admin.Username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DelUserCacheOnline(username string) {
|
||||||
|
client := resty.New().SetTimeout(1 * time.Second).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
|
token := setting.GetStr(conf.Token)
|
||||||
|
port := conf.Conf.Scheme.HttpPort
|
||||||
|
u := fmt.Sprintf("http://localhost:%d/api/admin/user/del_cache", port)
|
||||||
|
if port == -1 {
|
||||||
|
if conf.Conf.Scheme.HttpsPort == -1 {
|
||||||
|
utils.Log.Warnf("[del_user_cache] no open port")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
u = fmt.Sprintf("https://localhost:%d/api/admin/user/del_cache", conf.Conf.Scheme.HttpsPort)
|
||||||
|
}
|
||||||
|
res, err := client.R().SetHeader("Authorization", token).SetQueryParam("username", username).Post(u)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("[del_user_cache_online] failed: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if res.StatusCode() != 200 {
|
||||||
|
utils.Log.Warnf("[del_user_cache_online] failed: %+v", res.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||||
|
msg := utils.Json.Get(res.Body(), "message").ToString()
|
||||||
|
if code != 200 {
|
||||||
|
utils.Log.Errorf("[del_user_cache_online] error: %s", msg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.Log.Debugf("[del_user_cache_online] del user [%s] cache success", username)
|
||||||
|
}
|
@ -2,19 +2,22 @@ package _115
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"strings"
|
||||||
|
|
||||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"golang.org/x/time/rate"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Pan115 struct {
|
type Pan115 struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
client *driver115.Pan115Client
|
client *driver115.Pan115Client
|
||||||
|
limiter *rate.Limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Config() driver.Config {
|
func (d *Pan115) Config() driver.Config {
|
||||||
@ -26,29 +29,42 @@ func (d *Pan115) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Init(ctx context.Context) error {
|
func (d *Pan115) Init(ctx context.Context) error {
|
||||||
|
if d.LimitRate > 0 {
|
||||||
|
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||||
|
}
|
||||||
return d.login()
|
return d.login()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan115) WaitLimit(ctx context.Context) error {
|
||||||
|
if d.limiter != nil {
|
||||||
|
return d.limiter.Wait(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Pan115) Drop(ctx context.Context) error {
|
func (d *Pan115) Drop(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
files, err := d.getFiles(dir.GetID())
|
files, err := d.getFiles(dir.GetID())
|
||||||
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
|
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
|
||||||
return src, nil
|
return &src, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
downloadInfo, err := d.client.
|
downloadInfo, err := d.client.
|
||||||
SetUserAgent(driver115.UA115Browser).
|
DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
|
||||||
Download(file.(driver115.File).PickCode)
|
|
||||||
// recover for upload
|
|
||||||
d.client.SetUserAgent(driver115.UA115Desktop)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -60,6 +76,9 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
|
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -67,31 +86,99 @@ func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return d.client.Move(dstDir.GetID(), srcObj.GetID())
|
return d.client.Move(dstDir.GetID(), srcObj.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return d.client.Rename(srcObj.GetID(), newName)
|
return d.client.Rename(srcObj.GetID(), newName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return d.client.Copy(dstDir.GetID(), srcObj.GetID())
|
return d.client.Copy(dstDir.GetID(), srcObj.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return d.client.Delete(obj.GetID())
|
return d.client.Delete(obj.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
if err := d.WaitLimit(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
fastInfo *driver115.UploadInitResp
|
||||||
|
dirID = dstDir.GetID()
|
||||||
|
)
|
||||||
|
|
||||||
|
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||||
|
return driver115.ErrUploadTooLarge
|
||||||
|
}
|
||||||
|
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||||
|
// return err
|
||||||
|
//}
|
||||||
|
|
||||||
|
const PreHashSize int64 = 128 * utils.KB
|
||||||
|
hashSize := PreHashSize
|
||||||
|
if stream.GetSize() < PreHashSize {
|
||||||
|
hashSize = stream.GetSize()
|
||||||
|
}
|
||||||
|
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||||
_ = tempFile.Close()
|
if err != nil {
|
||||||
_ = os.Remove(tempFile.Name())
|
return err
|
||||||
}()
|
}
|
||||||
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
|
preHash = strings.ToUpper(preHash)
|
||||||
|
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||||
|
if len(fullHash) <= 0 {
|
||||||
|
tmpF, err := stream.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fullHash = strings.ToUpper(fullHash)
|
||||||
|
|
||||||
|
// rapid-upload
|
||||||
|
// note that 115 add timeout for rapid-upload,
|
||||||
|
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||||
|
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if matched, err := fastInfo.Ok(); err != nil {
|
||||||
|
return err
|
||||||
|
} else if matched {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 闪传失败,上传
|
||||||
|
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||||
|
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||||
|
}
|
||||||
|
// 分片上传
|
||||||
|
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Pan115)(nil)
|
var _ driver.Driver = (*Pan115)(nil)
|
||||||
|
@ -6,17 +6,18 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||||
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
||||||
|
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "115 Cloud",
|
Name: "115 Cloud",
|
||||||
DefaultRoot: "0",
|
DefaultRoot: "0",
|
||||||
OnlyProxy: true,
|
OnlyProxy: true,
|
||||||
OnlyLocal: true,
|
//OnlyLocal: true,
|
||||||
NoOverwriteUpload: true,
|
NoOverwriteUpload: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,20 @@ package _115
|
|||||||
import (
|
import (
|
||||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ model.Obj = (*driver.File)(nil)
|
var _ model.Obj = (*FileObj)(nil)
|
||||||
|
|
||||||
|
type FileObj struct {
|
||||||
|
driver.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileObj) CreateTime() time.Time {
|
||||||
|
return f.File.CreateTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileObj) GetHash() utils.HashInfo {
|
||||||
|
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||||
|
}
|
||||||
|
@ -1,10 +1,26 @@
|
|||||||
package _115
|
package _115
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
|
"github.com/orzogc/fake115uploader/cipher"
|
||||||
|
"io"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,9 +30,11 @@ func (d *Pan115) login() error {
|
|||||||
var err error
|
var err error
|
||||||
opts := []driver.Option{
|
opts := []driver.Option{
|
||||||
driver.UA(UserAgent),
|
driver.UA(UserAgent),
|
||||||
|
func(c *driver.Pan115Client) {
|
||||||
|
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
|
},
|
||||||
}
|
}
|
||||||
d.client = driver.New(opts...)
|
d.client = driver.New(opts...)
|
||||||
d.client.SetHttpClient(base.HttpClient)
|
|
||||||
cr := &driver.Credential{}
|
cr := &driver.Credential{}
|
||||||
if d.Addition.QRCodeToken != "" {
|
if d.Addition.QRCodeToken != "" {
|
||||||
s := &driver.QRCodeSession{
|
s := &driver.QRCodeSession{
|
||||||
@ -38,8 +56,8 @@ func (d *Pan115) login() error {
|
|||||||
return d.client.LoginCheck()
|
return d.client.LoginCheck()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||||
res := make([]driver.File, 0)
|
res := make([]FileObj, 0)
|
||||||
if d.PageSize <= 0 {
|
if d.PageSize <= 0 {
|
||||||
d.PageSize = driver.FileListLimit
|
d.PageSize = driver.FileListLimit
|
||||||
}
|
}
|
||||||
@ -48,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for _, file := range *files {
|
for _, file := range *files {
|
||||||
res = append(res, file)
|
res = append(res, FileObj{file})
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
appVer = "2.0.3.6"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||||
|
var (
|
||||||
|
ecdhCipher *cipher.EcdhCipher
|
||||||
|
encrypted []byte
|
||||||
|
decrypted []byte
|
||||||
|
encodedToken string
|
||||||
|
err error
|
||||||
|
target = "U_1_" + dirID
|
||||||
|
bodyBytes []byte
|
||||||
|
result = driver115.UploadInitResp{}
|
||||||
|
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||||||
|
)
|
||||||
|
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
userID := strconv.FormatInt(d.client.UserID, 10)
|
||||||
|
form := url.Values{}
|
||||||
|
form.Set("appid", "0")
|
||||||
|
form.Set("appversion", appVer)
|
||||||
|
form.Set("userid", userID)
|
||||||
|
form.Set("filename", fileName)
|
||||||
|
form.Set("filesize", fileSizeStr)
|
||||||
|
form.Set("fileid", fileID)
|
||||||
|
form.Set("target", target)
|
||||||
|
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||||||
|
|
||||||
|
signKey, signVal := "", ""
|
||||||
|
for retry := true; retry; {
|
||||||
|
t := driver115.Now()
|
||||||
|
|
||||||
|
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
params := map[string]string{
|
||||||
|
"k_ec": encodedToken,
|
||||||
|
}
|
||||||
|
|
||||||
|
form.Set("t", t.String())
|
||||||
|
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||||
|
if signKey != "" && signVal != "" {
|
||||||
|
form.Set("sign_key", signKey)
|
||||||
|
form.Set("sign_val", signVal)
|
||||||
|
}
|
||||||
|
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := d.client.NewRequest().
|
||||||
|
SetQueryParams(params).
|
||||||
|
SetBody(encrypted).
|
||||||
|
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||||||
|
SetDoNotParseResponse(true)
|
||||||
|
resp, err := req.Post(driver115.ApiUploadInit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data := resp.RawBody()
|
||||||
|
defer data.Close()
|
||||||
|
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if result.Status == 7 {
|
||||||
|
// Update signKey & signVal
|
||||||
|
signKey = result.SignKey
|
||||||
|
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
retry = false
|
||||||
|
}
|
||||||
|
result.SHA1 = fileID
|
||||||
|
}
|
||||||
|
|
||||||
|
return &result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||||||
|
var start, end int64
|
||||||
|
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
length := end - start + 1
|
||||||
|
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||||
|
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
result = strings.ToUpper(hashStr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadByMultipart upload by mutipart blocks
|
||||||
|
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||||
|
var (
|
||||||
|
chunks []oss.FileChunk
|
||||||
|
parts []oss.UploadPart
|
||||||
|
imur oss.InitiateMultipartUploadResult
|
||||||
|
ossClient *oss.Client
|
||||||
|
bucket *oss.Bucket
|
||||||
|
ossToken *driver115.UploadOSSTokenResp
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
tmpF, err := stream.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
options := driver115.DefalutUploadMultipartOptions()
|
||||||
|
if len(opts) > 0 {
|
||||||
|
for _, f := range opts {
|
||||||
|
f(options)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||||
|
ticker := time.NewTicker(options.TokenRefreshTime)
|
||||||
|
defer ticker.Stop()
|
||||||
|
// 设置超时
|
||||||
|
timeout := time.NewTimer(options.Timeout)
|
||||||
|
|
||||||
|
if chunks, err = SplitFile(fileSize); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||||
|
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||||
|
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||||
|
); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(len(chunks))
|
||||||
|
|
||||||
|
chunksCh := make(chan oss.FileChunk)
|
||||||
|
errCh := make(chan error)
|
||||||
|
UploadedPartsCh := make(chan oss.UploadPart)
|
||||||
|
quit := make(chan struct{})
|
||||||
|
|
||||||
|
// producer
|
||||||
|
go chunksProducer(chunksCh, chunks)
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
quit <- struct{}{}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// consumers
|
||||||
|
for i := 0; i < options.ThreadsNum; i++ {
|
||||||
|
go func(threadId int) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
for chunk := range chunksCh {
|
||||||
|
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||||
|
for retry := 0; retry < 3; retry++ {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||||
|
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := make([]byte, chunk.Size)
|
||||||
|
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(buf)
|
||||||
|
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||||
|
}
|
||||||
|
UploadedPartsCh <- part
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for part := range UploadedPartsCh {
|
||||||
|
parts = append(parts, part)
|
||||||
|
wg.Done()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
LOOP:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
// 到时重新获取ossToken
|
||||||
|
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case <-quit:
|
||||||
|
break LOOP
|
||||||
|
case <-errCh:
|
||||||
|
return err
|
||||||
|
case <-timeout.C:
|
||||||
|
return fmt.Errorf("time out")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||||
|
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||||
|
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||||
|
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.checkUploadStatus(dirID, params.SHA1)
|
||||||
|
}
|
||||||
|
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||||
|
for _, chunk := range chunks {
|
||||||
|
ch <- chunk
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||||
|
// 验证上传是否成功
|
||||||
|
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||||
|
opts := []driver115.GetFileOptions{
|
||||||
|
driver115.WithOrder(driver115.FileOrderByTime),
|
||||||
|
driver115.WithShowDirEnable(false),
|
||||||
|
driver115.WithAsc(false),
|
||||||
|
driver115.WithLimit(500),
|
||||||
|
}
|
||||||
|
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, fileInfo := range fResp.Files {
|
||||||
|
if fileInfo.Sha1 == sha1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return driver115.ErrUploadFailed
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||||
|
for i := int64(1); i < 10; i++ {
|
||||||
|
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||||
|
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||||||
|
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 单个分片大小不能小于100KB
|
||||||
|
if chunks[0].Size < 100*utils.KB {
|
||||||
|
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||||
|
// Split the file with specified parts count, returns the split result when error is nil.
|
||||||
|
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||||||
|
if chunkNum <= 0 || chunkNum > 10000 {
|
||||||
|
return nil, errors.New("chunkNum invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(chunkNum) > fileSize {
|
||||||
|
return nil, errors.New("oss: chunkNum invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunks []oss.FileChunk
|
||||||
|
var chunk = oss.FileChunk{}
|
||||||
|
var chunkN = (int64)(chunkNum)
|
||||||
|
for i := int64(0); i < chunkN; i++ {
|
||||||
|
chunk.Number = int(i + 1)
|
||||||
|
chunk.Offset = i * (fileSize / chunkN)
|
||||||
|
if i == chunkN-1 {
|
||||||
|
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||||||
|
} else {
|
||||||
|
chunk.Size = fileSize / chunkN
|
||||||
|
}
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||||
|
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||||
|
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||||||
|
if chunkSize <= 0 {
|
||||||
|
return nil, errors.New("chunkSize invalid")
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunkN = fileSize / chunkSize
|
||||||
|
if chunkN >= 10000 {
|
||||||
|
return nil, errors.New("Too many parts, please increase part size")
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunks []oss.FileChunk
|
||||||
|
var chunk = oss.FileChunk{}
|
||||||
|
for i := int64(0); i < chunkN; i++ {
|
||||||
|
chunk.Number = int(i + 1)
|
||||||
|
chunk.Offset = i * chunkSize
|
||||||
|
chunk.Size = chunkSize
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileSize%chunkSize > 0 {
|
||||||
|
chunk.Number = len(chunks) + 1
|
||||||
|
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||||
|
chunk.Size = fileSize % chunkSize
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunks, nil
|
||||||
|
}
|
||||||
|
@ -1,18 +1,11 @@
|
|||||||
package _123
|
package _123
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -24,6 +17,9 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Pan123 struct {
|
type Pan123 struct {
|
||||||
@ -45,6 +41,9 @@ func (d *Pan123) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Drop(ctx context.Context) error {
|
func (d *Pan123) Drop(ctx context.Context) error {
|
||||||
|
_, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{})
|
||||||
|
}, nil)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +108,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
log.Debugln("res code: ", res.StatusCode())
|
log.Debugln("res code: ", res.StatusCode())
|
||||||
if res.StatusCode() == 302 {
|
if res.StatusCode() == 302 {
|
||||||
link.URL = res.Header().Get("location")
|
link.URL = res.Header().Get("location")
|
||||||
} else if res.StatusCode() == 200 {
|
} else if res.StatusCode() < 300 {
|
||||||
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||||
}
|
}
|
||||||
link.Header = http.Header{
|
link.Header = http.Header{
|
||||||
@ -180,40 +179,22 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
const DEFAULT int64 = 10485760
|
// const DEFAULT int64 = 10485760
|
||||||
var uploadFile io.Reader
|
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
if d.StreamUpload && stream.GetSize() > DEFAULT {
|
// need to calculate md5 of the full content
|
||||||
// 只计算前10MIB
|
tempFile, err := stream.CacheFullInTempFile()
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, DEFAULT))
|
if err != nil {
|
||||||
if n, err := io.CopyN(io.MultiWriter(buf, h), stream, DEFAULT); err != io.EOF && n == 0 {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
defer func() {
|
||||||
// 增加额外参数防止MD5碰撞
|
_ = tempFile.Close()
|
||||||
h.Write([]byte(stream.GetName()))
|
}()
|
||||||
num := make([]byte, 8)
|
if _, err = io.Copy(h, tempFile); err != nil {
|
||||||
binary.BigEndian.PutUint64(num, uint64(stream.GetSize()))
|
return err
|
||||||
h.Write(num)
|
}
|
||||||
// 拼装
|
_, err = tempFile.Seek(0, io.SeekStart)
|
||||||
uploadFile = io.MultiReader(buf, stream)
|
if err != nil {
|
||||||
} else {
|
return err
|
||||||
// 计算完整文件MD5
|
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
_ = os.Remove(tempFile.Name())
|
|
||||||
}()
|
|
||||||
if _, err = io.Copy(h, tempFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
uploadFile = tempFile
|
|
||||||
}
|
}
|
||||||
etag := hex.EncodeToString(h.Sum(nil))
|
etag := hex.EncodeToString(h.Sum(nil))
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
@ -237,7 +218,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||||
err = d.newUpload(ctx, &resp, stream, uploadFile, up)
|
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
@ -254,7 +235,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: uploadFile,
|
Body: tempFile,
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,6 @@ type Addition struct {
|
|||||||
driver.RootID
|
driver.RootID
|
||||||
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
StreamUpload bool `json:"stream_upload"`
|
|
||||||
AccessToken string
|
AccessToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,11 @@
|
|||||||
package _123
|
package _123
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -18,6 +22,14 @@ type File struct {
|
|||||||
DownloadUrl string `json:"DownloadUrl"`
|
DownloadUrl string `json:"DownloadUrl"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f File) CreateTime() time.Time {
|
||||||
|
return f.UpdateAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetHash() utils.HashInfo {
|
||||||
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
func (f File) GetPath() string {
|
func (f File) GetPath() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@ -42,7 +54,30 @@ func (f File) GetID() string {
|
|||||||
return strconv.FormatInt(f.FileId, 10)
|
return strconv.FormatInt(f.FileId, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f File) Thumb() string {
|
||||||
|
if f.DownloadUrl == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du, err := url.Parse(f.DownloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||||
|
query := du.Query()
|
||||||
|
query.Set("w", "70")
|
||||||
|
query.Set("h", "70")
|
||||||
|
if !query.Has("type") {
|
||||||
|
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||||
|
}
|
||||||
|
if !query.Has("trade_key") {
|
||||||
|
query.Set("trade_key", "123pan-thumbnail")
|
||||||
|
}
|
||||||
|
du.RawQuery = query.Encode()
|
||||||
|
return du.String()
|
||||||
|
}
|
||||||
|
|
||||||
var _ model.Obj = (*File)(nil)
|
var _ model.Obj = (*File)(nil)
|
||||||
|
var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
//func (f File) Thumb() string {
|
//func (f File) Thumb() string {
|
||||||
//
|
//
|
||||||
|
@ -34,6 +34,25 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star
|
|||||||
return &s3PreSignedUrls, nil
|
return &s3PreSignedUrls, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||||
|
data := base.Json{
|
||||||
|
"StorageNode": upReq.Data.StorageNode,
|
||||||
|
"bucket": upReq.Data.Bucket,
|
||||||
|
"key": upReq.Data.Key,
|
||||||
|
"partNumberEnd": end,
|
||||||
|
"partNumberStart": start,
|
||||||
|
"uploadId": upReq.Data.UploadId,
|
||||||
|
}
|
||||||
|
var s3PreSignedUrls S3PreSignedURLs
|
||||||
|
_, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data).SetContext(ctx)
|
||||||
|
}, &s3PreSignedUrls)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &s3PreSignedUrls, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"StorageNode": upReq.Data.StorageNode,
|
"StorageNode": upReq.Data.StorageNode,
|
||||||
@ -51,11 +70,17 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
||||||
chunkSize := int64(1024 * 1024 * 5)
|
chunkSize := int64(1024 * 1024 * 16)
|
||||||
// fetch s3 pre signed urls
|
// fetch s3 pre signed urls
|
||||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||||
// upload 10 chunks each batch
|
// only 1 batch is allowed
|
||||||
batchSize := 10
|
isMultipart := chunkCount > 1
|
||||||
|
batchSize := 1
|
||||||
|
getS3UploadUrl := d.getS3Auth
|
||||||
|
if isMultipart {
|
||||||
|
batchSize = 10
|
||||||
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
|
}
|
||||||
for i := 1; i <= chunkCount; i += batchSize {
|
for i := 1; i <= chunkCount; i += batchSize {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -65,7 +90,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if end > chunkCount+1 {
|
if end > chunkCount+1 {
|
||||||
end = chunkCount + 1
|
end = chunkCount + 1
|
||||||
}
|
}
|
||||||
s3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, start, end)
|
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -78,18 +103,18 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up(j * 100 / chunkCount)
|
up(float64(j) * 100 / float64(chunkCount))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// complete s3 upload
|
// complete s3 upload
|
||||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool) error {
|
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||||
if uploadUrl == "" {
|
if uploadUrl == "" {
|
||||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||||
@ -111,13 +136,13 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
|
|||||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||||
}
|
}
|
||||||
// refresh s3 pre signed urls
|
// refresh s3 pre signed urls
|
||||||
newS3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, cur, end)
|
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||||
// retry
|
// retry
|
||||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true)
|
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||||
}
|
}
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
body, err := io.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
|
@ -15,10 +15,12 @@ import (
|
|||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Api = "https://www.123pan.com/api"
|
||||||
AApi = "https://www.123pan.com/a/api"
|
AApi = "https://www.123pan.com/a/api"
|
||||||
BApi = "https://www.123pan.com/b/api"
|
BApi = "https://www.123pan.com/b/api"
|
||||||
MainApi = AApi
|
MainApi = Api
|
||||||
SignIn = MainApi + "/user/sign_in"
|
SignIn = MainApi + "/user/sign_in"
|
||||||
|
Logout = MainApi + "/user/logout"
|
||||||
UserInfo = MainApi + "/user/info"
|
UserInfo = MainApi + "/user/info"
|
||||||
FileList = MainApi + "/file/list/new"
|
FileList = MainApi + "/file/list/new"
|
||||||
DownloadInfo = MainApi + "/file/download_info"
|
DownloadInfo = MainApi + "/file/download_info"
|
||||||
@ -32,6 +34,7 @@ const (
|
|||||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||||
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Pan123) login() error {
|
func (d *Pan123) login() error {
|
||||||
@ -50,6 +53,14 @@ func (d *Pan123) login() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
res, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
|
SetHeaders(map[string]string{
|
||||||
|
"origin": "https://www.123pan.com",
|
||||||
|
"referer": "https://www.123pan.com/",
|
||||||
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
//"user-agent": base.UserAgent,
|
||||||
|
}).
|
||||||
SetBody(body).Post(SignIn)
|
SetBody(body).Post(SignIn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -62,14 +73,30 @@ func (d *Pan123) login() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//func authKey(reqUrl string) (*string, error) {
|
||||||
|
// reqURL, err := url.Parse(reqUrl)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// nowUnix := time.Now().Unix()
|
||||||
|
// random := rand.Intn(0x989680)
|
||||||
|
//
|
||||||
|
// p4 := fmt.Sprintf("%d|%d|%s|%s|%s|%s", nowUnix, random, reqURL.Path, "web", "3", AuthKeySalt)
|
||||||
|
// authKey := fmt.Sprintf("%d-%d-%x", nowUnix, random, md5.Sum([]byte(p4)))
|
||||||
|
// return &authKey, nil
|
||||||
|
//}
|
||||||
|
|
||||||
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"authorization": "Bearer " + d.AccessToken,
|
||||||
"platform": "web",
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
"app-version": "1.2",
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
//"user-agent": base.UserAgent,
|
||||||
})
|
})
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(req)
|
callback(req)
|
||||||
@ -77,6 +104,11 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r
|
|||||||
if resp != nil {
|
if resp != nil {
|
||||||
req.SetResult(resp)
|
req.SetResult(resp)
|
||||||
}
|
}
|
||||||
|
//authKey, err := authKey(url)
|
||||||
|
//if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
//}
|
||||||
|
//req.SetQueryParam("auth-key", *authKey)
|
||||||
res, err := req.Execute(method, url)
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
77
drivers/123_link/driver.go
Normal file
77
drivers/123_link/driver.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
package _123Link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
stdpath "path"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pan123Link struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
root *Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) Init(ctx context.Context) error {
|
||||||
|
node, err := BuildTree(d.OriginURLs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
node.calSize()
|
||||||
|
d.root = node
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
node := GetNodeFromRootByPath(d.root, path)
|
||||||
|
return nodeToObj(node, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
node := GetNodeFromRootByPath(d.root, dir.GetPath())
|
||||||
|
if node == nil {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
if node.isFile() {
|
||||||
|
return nil, errs.NotFolder
|
||||||
|
}
|
||||||
|
return utils.SliceConvert(node.Children, func(node *Node) (model.Obj, error) {
|
||||||
|
return nodeToObj(node, stdpath.Join(dir.GetPath(), node.Name))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Link) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
node := GetNodeFromRootByPath(d.root, file.GetPath())
|
||||||
|
if node == nil {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
if node.isFile() {
|
||||||
|
signUrl, err := SignURL(node.Url, d.PrivateKey, d.UID, time.Duration(d.ValidDuration)*time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: signUrl,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errs.NotFile
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Pan123Link)(nil)
|
23
drivers/123_link/meta.go
Normal file
23
drivers/123_link/meta.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package _123Link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
OriginURLs string `json:"origin_urls" type:"text" required:"true" default:"https://vip.123pan.com/29/folder/file.mp3" help:"structure:FolderName:\n [FileSize:][Modified:]Url"`
|
||||||
|
PrivateKey string `json:"private_key"`
|
||||||
|
UID uint64 `json:"uid" type:"number"`
|
||||||
|
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "123PanLink",
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Pan123Link{}
|
||||||
|
})
|
||||||
|
}
|
152
drivers/123_link/parse.go
Normal file
152
drivers/123_link/parse.go
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
package _123Link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
url2 "net/url"
|
||||||
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// build tree from text, text structure definition:
|
||||||
|
/**
|
||||||
|
* FolderName:
|
||||||
|
* [FileSize:][Modified:]Url
|
||||||
|
*/
|
||||||
|
/**
|
||||||
|
* For example:
|
||||||
|
* folder1:
|
||||||
|
* name1:url1
|
||||||
|
* url2
|
||||||
|
* folder2:
|
||||||
|
* url3
|
||||||
|
* url4
|
||||||
|
* url5
|
||||||
|
* folder3:
|
||||||
|
* url6
|
||||||
|
* url7
|
||||||
|
* url8
|
||||||
|
*/
|
||||||
|
// if there are no name, use the last segment of url as name
|
||||||
|
func BuildTree(text string) (*Node, error) {
|
||||||
|
lines := strings.Split(text, "\n")
|
||||||
|
var root = &Node{Level: -1, Name: "root"}
|
||||||
|
stack := []*Node{root}
|
||||||
|
for _, line := range lines {
|
||||||
|
// calculate indent
|
||||||
|
indent := 0
|
||||||
|
for i := 0; i < len(line); i++ {
|
||||||
|
if line[i] != ' ' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
indent++
|
||||||
|
}
|
||||||
|
// if indent is not a multiple of 2, it is an error
|
||||||
|
if indent%2 != 0 {
|
||||||
|
return nil, fmt.Errorf("the line '%s' is not a multiple of 2", line)
|
||||||
|
}
|
||||||
|
// calculate level
|
||||||
|
level := indent / 2
|
||||||
|
line = strings.TrimSpace(line[indent:])
|
||||||
|
// if the line is empty, skip
|
||||||
|
if line == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if level isn't greater than the level of the top of the stack
|
||||||
|
// it is not the child of the top of the stack
|
||||||
|
for level <= stack[len(stack)-1].Level {
|
||||||
|
// pop the top of the stack
|
||||||
|
stack = stack[:len(stack)-1]
|
||||||
|
}
|
||||||
|
// if the line is a folder
|
||||||
|
if isFolder(line) {
|
||||||
|
// create a new node
|
||||||
|
node := &Node{
|
||||||
|
Level: level,
|
||||||
|
Name: strings.TrimSuffix(line, ":"),
|
||||||
|
}
|
||||||
|
// add the node to the top of the stack
|
||||||
|
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||||
|
// push the node to the stack
|
||||||
|
stack = append(stack, node)
|
||||||
|
} else {
|
||||||
|
// if the line is a file
|
||||||
|
// create a new node
|
||||||
|
node, err := parseFileLine(line)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
node.Level = level
|
||||||
|
// add the node to the top of the stack
|
||||||
|
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFolder(line string) bool {
|
||||||
|
return strings.HasSuffix(line, ":")
|
||||||
|
}
|
||||||
|
|
||||||
|
// line definition:
|
||||||
|
// [FileSize:][Modified:]Url
|
||||||
|
func parseFileLine(line string) (*Node, error) {
|
||||||
|
// if there is no url, it is an error
|
||||||
|
if !strings.Contains(line, "http://") && !strings.Contains(line, "https://") {
|
||||||
|
return nil, fmt.Errorf("invalid line: %s, because url is required for file", line)
|
||||||
|
}
|
||||||
|
index := strings.Index(line, "http://")
|
||||||
|
if index == -1 {
|
||||||
|
index = strings.Index(line, "https://")
|
||||||
|
}
|
||||||
|
url := line[index:]
|
||||||
|
info := line[:index]
|
||||||
|
node := &Node{
|
||||||
|
Url: url,
|
||||||
|
}
|
||||||
|
name := stdpath.Base(url)
|
||||||
|
unescape, err := url2.PathUnescape(name)
|
||||||
|
if err == nil {
|
||||||
|
name = unescape
|
||||||
|
}
|
||||||
|
node.Name = name
|
||||||
|
if index > 0 {
|
||||||
|
if !strings.HasSuffix(info, ":") {
|
||||||
|
return nil, fmt.Errorf("invalid line: %s, because file info must end with ':'", line)
|
||||||
|
}
|
||||||
|
info = info[:len(info)-1]
|
||||||
|
if info == "" {
|
||||||
|
return nil, fmt.Errorf("invalid line: %s, because file name can't be empty", line)
|
||||||
|
}
|
||||||
|
infoParts := strings.Split(info, ":")
|
||||||
|
size, err := strconv.ParseInt(infoParts[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid line: %s, because file size must be an integer", line)
|
||||||
|
}
|
||||||
|
node.Size = size
|
||||||
|
if len(infoParts) > 1 {
|
||||||
|
modified, err := strconv.ParseInt(infoParts[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid line: %s, because file modified must be an unix timestamp", line)
|
||||||
|
}
|
||||||
|
node.Modified = modified
|
||||||
|
} else {
|
||||||
|
node.Modified = time.Now().Unix()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitPath(path string) []string {
|
||||||
|
if path == "/" {
|
||||||
|
return []string{"root"}
|
||||||
|
}
|
||||||
|
parts := strings.Split(path, "/")
|
||||||
|
parts[0] = "root"
|
||||||
|
return parts
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetNodeFromRootByPath(root *Node, path string) *Node {
|
||||||
|
return root.getByPath(splitPath(path))
|
||||||
|
}
|
66
drivers/123_link/types.go
Normal file
66
drivers/123_link/types.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
package _123Link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Node is a node in the folder tree
|
||||||
|
type Node struct {
|
||||||
|
Url string
|
||||||
|
Name string
|
||||||
|
Level int
|
||||||
|
Modified int64
|
||||||
|
Size int64
|
||||||
|
Children []*Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *Node) getByPath(paths []string) *Node {
|
||||||
|
if len(paths) == 0 || node == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if node.Name != paths[0] {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(paths) == 1 {
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
for _, child := range node.Children {
|
||||||
|
tmp := child.getByPath(paths[1:])
|
||||||
|
if tmp != nil {
|
||||||
|
return tmp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *Node) isFile() bool {
|
||||||
|
return node.Url != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (node *Node) calSize() int64 {
|
||||||
|
if node.isFile() {
|
||||||
|
return node.Size
|
||||||
|
}
|
||||||
|
var size int64 = 0
|
||||||
|
for _, child := range node.Children {
|
||||||
|
size += child.calSize()
|
||||||
|
}
|
||||||
|
node.Size = size
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
func nodeToObj(node *Node, path string) (model.Obj, error) {
|
||||||
|
if node == nil {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
return &model.Object{
|
||||||
|
Name: node.Name,
|
||||||
|
Size: node.Size,
|
||||||
|
Modified: time.Unix(node.Modified, 0),
|
||||||
|
IsFolder: !node.isFile(),
|
||||||
|
Path: path,
|
||||||
|
}, nil
|
||||||
|
}
|
30
drivers/123_link/util.go
Normal file
30
drivers/123_link/util.go
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
package _123Link
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||||
|
if privateKey == "" {
|
||||||
|
return originURL, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
ts = time.Now().Add(validDuration).Unix() // 有效时间戳
|
||||||
|
rInt = rand.Int() // 随机正整数
|
||||||
|
objURL *url.URL
|
||||||
|
)
|
||||||
|
objURL, err = url.Parse(originURL)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
authKey := fmt.Sprintf("%d-%d-%d-%x", ts, rInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
|
||||||
|
objURL.Path, ts, rInt, uid, privateKey))))
|
||||||
|
v := objURL.Query()
|
||||||
|
v.Add("auth_key", authKey)
|
||||||
|
objURL.RawQuery = v.Encode()
|
||||||
|
return objURL.String(), nil
|
||||||
|
}
|
149
drivers/123_share/driver.go
Normal file
149
drivers/123_share/driver.go
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pan123Share struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Init(ctx context.Context) error {
|
||||||
|
// TODO login / refresh token
|
||||||
|
//op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
// TODO return the files list, required
|
||||||
|
files, err := d.getFiles(dir.GetID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||||
|
return src, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file, required
|
||||||
|
if f, ok := file.(File); ok {
|
||||||
|
//var resp DownResp
|
||||||
|
var headers map[string]string
|
||||||
|
if !utils.IsLocalIPAddr(args.IP) {
|
||||||
|
headers = map[string]string{
|
||||||
|
//"X-Real-IP": "1.1.1.1",
|
||||||
|
"X-Forwarded-For": args.IP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data := base.Json{
|
||||||
|
"shareKey": d.ShareKey,
|
||||||
|
"SharePwd": d.SharePwd,
|
||||||
|
"etag": f.Etag,
|
||||||
|
"fileId": f.FileId,
|
||||||
|
"s3keyFlag": f.S3KeyFlag,
|
||||||
|
"size": f.Size,
|
||||||
|
}
|
||||||
|
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data).SetHeaders(headers)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
|
||||||
|
u, err := url.Parse(downloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nu := u.Query().Get("params")
|
||||||
|
if nu != "" {
|
||||||
|
du, _ := base64.StdEncoding.DecodeString(nu)
|
||||||
|
u, err = url.Parse(string(du))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u_ := u.String()
|
||||||
|
log.Debug("download url: ", u_)
|
||||||
|
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debug(res.String())
|
||||||
|
link := model.Link{
|
||||||
|
URL: u_,
|
||||||
|
}
|
||||||
|
log.Debugln("res code: ", res.StatusCode())
|
||||||
|
if res.StatusCode() == 302 {
|
||||||
|
link.URL = res.Header().Get("location")
|
||||||
|
} else if res.StatusCode() < 300 {
|
||||||
|
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||||
|
}
|
||||||
|
link.Header = http.Header{
|
||||||
|
"Referer": []string{"https://www.123pan.com/"},
|
||||||
|
}
|
||||||
|
return &link, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
// TODO create folder, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
// TODO move obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
// TODO rename obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
// TODO copy obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
// TODO remove obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
// TODO upload file, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Pan123Share) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Pan123Share)(nil)
|
34
drivers/123_share/meta.go
Normal file
34
drivers/123_share/meta.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
ShareKey string `json:"sharekey" required:"true"`
|
||||||
|
SharePwd string `json:"sharepassword" required:"true"`
|
||||||
|
driver.RootID
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "123PanShare",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: true,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Pan123Share{}
|
||||||
|
})
|
||||||
|
}
|
99
drivers/123_share/types.go
Normal file
99
drivers/123_share/types.go
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
FileName string `json:"FileName"`
|
||||||
|
Size int64 `json:"Size"`
|
||||||
|
UpdateAt time.Time `json:"UpdateAt"`
|
||||||
|
FileId int64 `json:"FileId"`
|
||||||
|
Type int `json:"Type"`
|
||||||
|
Etag string `json:"Etag"`
|
||||||
|
S3KeyFlag string `json:"S3KeyFlag"`
|
||||||
|
DownloadUrl string `json:"DownloadUrl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetHash() utils.HashInfo {
|
||||||
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetSize() int64 {
|
||||||
|
return f.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetName() string {
|
||||||
|
return f.FileName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) ModTime() time.Time {
|
||||||
|
return f.UpdateAt
|
||||||
|
}
|
||||||
|
func (f File) CreateTime() time.Time {
|
||||||
|
return f.UpdateAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) IsDir() bool {
|
||||||
|
return f.Type == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetID() string {
|
||||||
|
return strconv.FormatInt(f.FileId, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) Thumb() string {
|
||||||
|
if f.DownloadUrl == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du, err := url.Parse(f.DownloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||||
|
query := du.Query()
|
||||||
|
query.Set("w", "70")
|
||||||
|
query.Set("h", "70")
|
||||||
|
if !query.Has("type") {
|
||||||
|
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||||
|
}
|
||||||
|
if !query.Has("trade_key") {
|
||||||
|
query.Set("trade_key", "123pan-thumbnail")
|
||||||
|
}
|
||||||
|
du.RawQuery = query.Encode()
|
||||||
|
return du.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ model.Obj = (*File)(nil)
|
||||||
|
var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
|
//func (f File) Thumb() string {
|
||||||
|
//
|
||||||
|
//}
|
||||||
|
//var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
|
type Files struct {
|
||||||
|
//BaseResp
|
||||||
|
Data struct {
|
||||||
|
InfoList []File `json:"InfoList"`
|
||||||
|
Next string `json:"Next"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//type DownResp struct {
|
||||||
|
// //BaseResp
|
||||||
|
// Data struct {
|
||||||
|
// DownloadUrl string `json:"DownloadUrl"`
|
||||||
|
// } `json:"data"`
|
||||||
|
//}
|
81
drivers/123_share/util.go
Normal file
81
drivers/123_share/util.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Api = "https://www.123pan.com/api"
|
||||||
|
AApi = "https://www.123pan.com/a/api"
|
||||||
|
BApi = "https://www.123pan.com/b/api"
|
||||||
|
MainApi = Api
|
||||||
|
FileList = MainApi + "/share/get"
|
||||||
|
DownloadInfo = MainApi + "/share/download/info"
|
||||||
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"origin": "https://www.123pan.com",
|
||||||
|
"referer": "https://www.123pan.com/",
|
||||||
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
})
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
req.SetResult(resp)
|
||||||
|
}
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body := res.Body()
|
||||||
|
code := utils.Json.Get(body, "code").ToInt()
|
||||||
|
if code != 0 {
|
||||||
|
return nil, errors.New(jsoniter.Get(body, "message").ToString())
|
||||||
|
}
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
|
||||||
|
page := 1
|
||||||
|
res := make([]File, 0)
|
||||||
|
for {
|
||||||
|
var resp Files
|
||||||
|
query := map[string]string{
|
||||||
|
"limit": "100",
|
||||||
|
"next": "0",
|
||||||
|
"orderBy": d.OrderBy,
|
||||||
|
"orderDirection": d.OrderDirection,
|
||||||
|
"parentFileId": parentId,
|
||||||
|
"Page": strconv.Itoa(page),
|
||||||
|
"shareKey": d.ShareKey,
|
||||||
|
"SharePwd": d.SharePwd,
|
||||||
|
}
|
||||||
|
_, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(query)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
res = append(res, resp.Data.InfoList...)
|
||||||
|
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
@ -103,9 +103,9 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
if d.isFamily() {
|
if d.isFamily() {
|
||||||
return errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
var contentInfoList []string
|
var contentInfoList []string
|
||||||
var catalogInfoList []string
|
var catalogInfoList []string
|
||||||
@ -131,7 +131,10 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
|
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
|
||||||
_, err := d.post(pathname, data, nil)
|
_, err := d.post(pathname, data, nil)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
@ -300,6 +303,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
|
|
||||||
var partSize = getPartSize(stream.GetSize())
|
var partSize = getPartSize(stream.GetSize())
|
||||||
part := (stream.GetSize() + partSize - 1) / partSize
|
part := (stream.GetSize() + partSize - 1) / partSize
|
||||||
|
if part == 0 {
|
||||||
|
part = 1
|
||||||
|
}
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -331,13 +337,11 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
_ = res.Body.Close()
|
||||||
log.Debugf("%+v", res)
|
log.Debugf("%+v", res)
|
||||||
|
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Body.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -10,7 +10,7 @@ type Catalog struct {
|
|||||||
CatalogID string `json:"catalogID"`
|
CatalogID string `json:"catalogID"`
|
||||||
CatalogName string `json:"catalogName"`
|
CatalogName string `json:"catalogName"`
|
||||||
//CatalogType int `json:"catalogType"`
|
//CatalogType int `json:"catalogType"`
|
||||||
//CreateTime string `json:"createTime"`
|
CreateTime string `json:"createTime"`
|
||||||
UpdateTime string `json:"updateTime"`
|
UpdateTime string `json:"updateTime"`
|
||||||
//IsShared bool `json:"isShared"`
|
//IsShared bool `json:"isShared"`
|
||||||
//CatalogLevel int `json:"catalogLevel"`
|
//CatalogLevel int `json:"catalogLevel"`
|
||||||
@ -63,7 +63,7 @@ type Content struct {
|
|||||||
//ParentCatalogID string `json:"parentCatalogId"`
|
//ParentCatalogID string `json:"parentCatalogId"`
|
||||||
//Channel string `json:"channel"`
|
//Channel string `json:"channel"`
|
||||||
//GeoLocFlag string `json:"geoLocFlag"`
|
//GeoLocFlag string `json:"geoLocFlag"`
|
||||||
//Digest string `json:"digest"`
|
Digest string `json:"digest"`
|
||||||
//Version string `json:"version"`
|
//Version string `json:"version"`
|
||||||
//FileEtag string `json:"fileEtag"`
|
//FileEtag string `json:"fileEtag"`
|
||||||
//FileVersion string `json:"fileVersion"`
|
//FileVersion string `json:"fileVersion"`
|
||||||
@ -141,7 +141,7 @@ type CloudContent struct {
|
|||||||
//ContentSuffix string `json:"contentSuffix"`
|
//ContentSuffix string `json:"contentSuffix"`
|
||||||
ContentSize int64 `json:"contentSize"`
|
ContentSize int64 `json:"contentSize"`
|
||||||
//ContentDesc string `json:"contentDesc"`
|
//ContentDesc string `json:"contentDesc"`
|
||||||
//CreateTime string `json:"createTime"`
|
CreateTime string `json:"createTime"`
|
||||||
//Shottime interface{} `json:"shottime"`
|
//Shottime interface{} `json:"shottime"`
|
||||||
LastUpdateTime string `json:"lastUpdateTime"`
|
LastUpdateTime string `json:"lastUpdateTime"`
|
||||||
ThumbnailURL string `json:"thumbnailURL"`
|
ThumbnailURL string `json:"thumbnailURL"`
|
||||||
@ -165,7 +165,7 @@ type CloudCatalog struct {
|
|||||||
CatalogID string `json:"catalogID"`
|
CatalogID string `json:"catalogID"`
|
||||||
CatalogName string `json:"catalogName"`
|
CatalogName string `json:"catalogName"`
|
||||||
//CloudID string `json:"cloudID"`
|
//CloudID string `json:"cloudID"`
|
||||||
//CreateTime string `json:"createTime"`
|
CreateTime string `json:"createTime"`
|
||||||
LastUpdateTime string `json:"lastUpdateTime"`
|
LastUpdateTime string `json:"lastUpdateTime"`
|
||||||
//Creator string `json:"creator"`
|
//Creator string `json:"creator"`
|
||||||
//CreatorNickname string `json:"creatorNickname"`
|
//CreatorNickname string `json:"creatorNickname"`
|
||||||
|
@ -42,13 +42,13 @@ func calSign(body, ts, randStr string) string {
|
|||||||
sort.Strings(strs)
|
sort.Strings(strs)
|
||||||
body = strings.Join(strs, "")
|
body = strings.Join(strs, "")
|
||||||
body = base64.StdEncoding.EncodeToString([]byte(body))
|
body = base64.StdEncoding.EncodeToString([]byte(body))
|
||||||
res := utils.GetMD5Encode(body) + utils.GetMD5Encode(ts+":"+randStr)
|
res := utils.GetMD5EncodeStr(body) + utils.GetMD5EncodeStr(ts+":"+randStr)
|
||||||
res = strings.ToUpper(utils.GetMD5Encode(res))
|
res = strings.ToUpper(utils.GetMD5EncodeStr(res))
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTime(t string) time.Time {
|
func getTime(t string) time.Time {
|
||||||
stamp, _ := time.ParseInLocation("20060102150405", t, time.Local)
|
stamp, _ := time.ParseInLocation("20060102150405", t, utils.CNLoc)
|
||||||
return stamp
|
return stamp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,6 +139,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
Name: catalog.CatalogName,
|
Name: catalog.CatalogName,
|
||||||
Size: 0,
|
Size: 0,
|
||||||
Modified: getTime(catalog.UpdateTime),
|
Modified: getTime(catalog.UpdateTime),
|
||||||
|
Ctime: getTime(catalog.CreateTime),
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
}
|
}
|
||||||
files = append(files, &f)
|
files = append(files, &f)
|
||||||
@ -150,6 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
Name: content.ContentName,
|
Name: content.ContentName,
|
||||||
Size: content.ContentSize,
|
Size: content.ContentSize,
|
||||||
Modified: getTime(content.UpdateTime),
|
Modified: getTime(content.UpdateTime),
|
||||||
|
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||||
//Thumbnail: content.BigthumbnailURL,
|
//Thumbnail: content.BigthumbnailURL,
|
||||||
@ -202,6 +204,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
Size: 0,
|
Size: 0,
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
Modified: getTime(catalog.LastUpdateTime),
|
Modified: getTime(catalog.LastUpdateTime),
|
||||||
|
Ctime: getTime(catalog.CreateTime),
|
||||||
}
|
}
|
||||||
files = append(files, &f)
|
files = append(files, &f)
|
||||||
}
|
}
|
||||||
@ -212,6 +215,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
|||||||
Name: content.ContentName,
|
Name: content.ContentName,
|
||||||
Size: content.ContentSize,
|
Size: content.ContentSize,
|
||||||
Modified: getTime(content.LastUpdateTime),
|
Modified: getTime(content.LastUpdateTime),
|
||||||
|
Ctime: getTime(content.CreateTime),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||||
//Thumbnail: content.BigthumbnailURL,
|
//Thumbnail: content.BigthumbnailURL,
|
||||||
|
@ -380,12 +380,12 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up(int(i * 100 / count))
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||||
sliceMd5 := fileMd5
|
sliceMd5 := fileMd5
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > DEFAULT {
|
||||||
sliceMd5 = utils.GetMD5Encode(strings.Join(md5s, "\n"))
|
sliceMd5 = utils.GetMD5EncodeStr(strings.Join(md5s, "\n"))
|
||||||
}
|
}
|
||||||
res, err = d.uploadRequest("/person/commitMultiUploadFile", map[string]string{
|
res, err = d.uploadRequest("/person/commitMultiUploadFile", map[string]string{
|
||||||
"uploadFileId": uploadFileId,
|
"uploadFileId": uploadFileId,
|
||||||
|
@ -3,10 +3,13 @@ package _189pc
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -22,10 +25,17 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
loginParam *LoginParam
|
loginParam *LoginParam
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
|
|
||||||
|
storageConfig driver.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Config() driver.Config {
|
func (y *Cloud189PC) Config() driver.Config {
|
||||||
return config
|
if y.storageConfig.Name == "" {
|
||||||
|
y.storageConfig = config
|
||||||
|
}
|
||||||
|
return y.storageConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) GetAddition() driver.Additional {
|
func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||||
@ -33,6 +43,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||||
|
// 兼容旧上传接口
|
||||||
|
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
||||||
|
|
||||||
// 处理个人云和家庭云参数
|
// 处理个人云和家庭云参数
|
||||||
if y.isFamily() && y.RootFolderID == "-11" {
|
if y.isFamily() && y.RootFolderID == "-11" {
|
||||||
y.RootFolderID = ""
|
y.RootFolderID = ""
|
||||||
@ -42,6 +55,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
y.FamilyID = ""
|
y.FamilyID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 限制上传线程数
|
||||||
|
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
|
||||||
|
if y.uploadThread < 1 || y.uploadThread > 32 {
|
||||||
|
y.uploadThread, y.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
// 初始化请求客户端
|
// 初始化请求客户端
|
||||||
if y.client == nil {
|
if y.client == nil {
|
||||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||||
@ -51,7 +70,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 避免重复登陆
|
// 避免重复登陆
|
||||||
identity := utils.GetMD5Encode(y.Username + y.Password)
|
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||||
if !y.isLogin() || y.identity != identity {
|
if !y.isLogin() || y.identity != identity {
|
||||||
y.identity = identity
|
y.identity = identity
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
@ -107,10 +126,11 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
|
|
||||||
// 重定向获取真实链接
|
// 重定向获取真实链接
|
||||||
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&", "&"), "http://", "https://", 1)
|
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&", "&"), "http://", "https://", 1)
|
||||||
res, err := base.NoRedirectClient.R().SetContext(ctx).Get(downloadUrl.URL)
|
res, err := base.NoRedirectClient.R().SetContext(ctx).SetDoNotParseResponse(true).Get(downloadUrl.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
defer res.RawBody().Close()
|
||||||
if res.StatusCode() == 302 {
|
if res.StatusCode() == 302 {
|
||||||
downloadUrl.URL = res.Header().Get("location")
|
downloadUrl.URL = res.Header().Get("location")
|
||||||
}
|
}
|
||||||
@ -135,13 +155,14 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
return like, nil
|
return like, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
fullUrl += "/family/file"
|
fullUrl += "/family/file"
|
||||||
}
|
}
|
||||||
fullUrl += "/createFolder.action"
|
fullUrl += "/createFolder.action"
|
||||||
|
|
||||||
|
var newFolder Cloud189Folder
|
||||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
@ -158,11 +179,15 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
|||||||
"parentFolderId": parentDir.GetID(),
|
"parentFolderId": parentDir.GetID(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &newFolder)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &newFolder, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -182,11 +207,17 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
queryParam := make(map[string]string)
|
queryParam := make(map[string]string)
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
method := http.MethodPost
|
method := http.MethodPost
|
||||||
@ -195,23 +226,34 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
|||||||
method = http.MethodGet
|
method = http.MethodGet
|
||||||
queryParam["familyId"] = y.FamilyID
|
queryParam["familyId"] = y.FamilyID
|
||||||
}
|
}
|
||||||
if srcObj.IsDir() {
|
|
||||||
fullUrl += "/renameFolder.action"
|
var newObj model.Obj
|
||||||
queryParam["folderId"] = srcObj.GetID()
|
switch f := srcObj.(type) {
|
||||||
queryParam["destFolderName"] = newName
|
case *Cloud189File:
|
||||||
} else {
|
|
||||||
fullUrl += "/renameFile.action"
|
fullUrl += "/renameFile.action"
|
||||||
queryParam["fileId"] = srcObj.GetID()
|
queryParam["fileId"] = srcObj.GetID()
|
||||||
queryParam["destFileName"] = newName
|
queryParam["destFileName"] = newName
|
||||||
|
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
|
||||||
|
case *Cloud189Folder:
|
||||||
|
fullUrl += "/renameFolder.action"
|
||||||
|
queryParam["folderId"] = srcObj.GetID()
|
||||||
|
queryParam["destFolderName"] = newName
|
||||||
|
newObj = &Cloud189Folder{}
|
||||||
|
default:
|
||||||
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx).SetQueryParams(queryParam)
|
||||||
req.SetQueryParams(queryParam)
|
}, nil, newObj)
|
||||||
}, nil, nil)
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -232,11 +274,15 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return y.WaitBatchTask("COPY", resp.TaskID, time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -256,19 +302,33 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 批量任务数量限制,过快会导致无法删除
|
||||||
|
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 响应时间长,按需启用
|
||||||
|
if y.Addition.RapidUpload {
|
||||||
|
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
|
||||||
|
return newObj, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch y.UploadMethod {
|
switch y.UploadMethod {
|
||||||
case "stream":
|
|
||||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
|
||||||
case "old":
|
case "old":
|
||||||
return y.OldUpload(ctx, dstDir, stream, up)
|
return y.OldUpload(ctx, dstDir, stream, up)
|
||||||
case "rapid":
|
case "rapid":
|
||||||
return y.FastUpload(ctx, dstDir, stream, up)
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
|
case "stream":
|
||||||
|
if stream.GetSize() == 0 {
|
||||||
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
default:
|
default:
|
||||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
return y.StreamUpload(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -83,6 +84,55 @@ func MustParseTime(str string) *time.Time {
|
|||||||
return &lastOpTime
|
return &lastOpTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
func (t *Time) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||||
|
func (t *Time) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||||
|
b, err := e.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b, ok := b.(xml.CharData); ok {
|
||||||
|
if err = t.Unmarshal(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.Skip()
|
||||||
|
}
|
||||||
|
func (t *Time) Unmarshal(b []byte) error {
|
||||||
|
bs := strings.Trim(string(b), "\"")
|
||||||
|
var v time.Time
|
||||||
|
var err error
|
||||||
|
for _, f := range []string{"2006-01-02 15:04:05 -07", "Jan 2, 2006 15:04:05 PM -07"} {
|
||||||
|
v, err = time.ParseInLocation(f, bs+" +08", time.Local)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*t = Time(v)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type String string
|
||||||
|
|
||||||
|
func (t *String) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||||
|
func (t *String) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||||
|
b, err := e.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b, ok := b.(xml.CharData); ok {
|
||||||
|
if err = t.Unmarshal(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.Skip()
|
||||||
|
}
|
||||||
|
func (s *String) Unmarshal(b []byte) error {
|
||||||
|
*s = String(bytes.Trim(b, "\""))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func toFamilyOrderBy(o string) string {
|
func toFamilyOrderBy(o string) string {
|
||||||
switch o {
|
switch o {
|
||||||
case "filename":
|
case "filename":
|
||||||
@ -110,9 +160,8 @@ func toDesc(o string) string {
|
|||||||
func ParseHttpHeader(str string) map[string]string {
|
func ParseHttpHeader(str string) map[string]string {
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
for _, value := range strings.Split(str, "&") {
|
for _, value := range strings.Split(str, "&") {
|
||||||
i := strings.Index(value, "=")
|
if k, v, found := strings.Cut(value, "="); found {
|
||||||
if i > 0 {
|
header[k] = v
|
||||||
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
@ -122,10 +171,6 @@ func MustString(str string, err error) string {
|
|||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
func MustToBytes(b []byte, err error) []byte {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func BoolToNumber(b bool) int {
|
func BoolToNumber(b bool) int {
|
||||||
if b {
|
if b {
|
||||||
return 1
|
return 1
|
||||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
|||||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||||
FamilyID string `json:"family_id"`
|
FamilyID string `json:"family_id"`
|
||||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
|
RapidUpload bool `json:"rapid_upload"`
|
||||||
NoUseOcr bool `json:"no_use_ocr"`
|
NoUseOcr bool `json:"no_use_ocr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ package _189pc
|
|||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -151,8 +152,13 @@ type FamilyInfoResp struct {
|
|||||||
/*文件部分*/
|
/*文件部分*/
|
||||||
// 文件
|
// 文件
|
||||||
type Cloud189File struct {
|
type Cloud189File struct {
|
||||||
CreateDate string `json:"createDate"`
|
ID String `json:"id"`
|
||||||
FileCata int64 `json:"fileCata"`
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
|
|
||||||
|
LastOpTime Time `json:"lastOpTime"`
|
||||||
|
CreateDate Time `json:"createDate"`
|
||||||
Icon struct {
|
Icon struct {
|
||||||
//iconOption 5
|
//iconOption 5
|
||||||
SmallUrl string `json:"smallUrl"`
|
SmallUrl string `json:"smallUrl"`
|
||||||
@ -162,62 +168,60 @@ type Cloud189File struct {
|
|||||||
Max600 string `json:"max600"`
|
Max600 string `json:"max600"`
|
||||||
MediumURL string `json:"mediumUrl"`
|
MediumURL string `json:"mediumUrl"`
|
||||||
} `json:"icon"`
|
} `json:"icon"`
|
||||||
ID int64 `json:"id"`
|
|
||||||
LastOpTime string `json:"lastOpTime"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
MediaType int `json:"mediaType"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Orientation int64 `json:"orientation"`
|
|
||||||
Rev string `json:"rev"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
StarLabel int64 `json:"starLabel"`
|
|
||||||
|
|
||||||
parseTime *time.Time
|
// Orientation int64 `json:"orientation"`
|
||||||
|
// FileCata int64 `json:"fileCata"`
|
||||||
|
// MediaType int `json:"mediaType"`
|
||||||
|
// Rev string `json:"rev"`
|
||||||
|
// StarLabel int64 `json:"starLabel"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
func (c *Cloud189File) CreateTime() time.Time {
|
||||||
func (c *Cloud189File) GetName() string { return c.Name }
|
return time.Time(c.CreateDate)
|
||||||
func (c *Cloud189File) ModTime() time.Time {
|
|
||||||
if c.parseTime == nil {
|
|
||||||
c.parseTime = MustParseTime(c.LastOpTime)
|
|
||||||
}
|
|
||||||
return *c.parseTime
|
|
||||||
}
|
}
|
||||||
func (c *Cloud189File) IsDir() bool { return false }
|
|
||||||
func (c *Cloud189File) GetID() string { return fmt.Sprint(c.ID) }
|
func (c *Cloud189File) GetHash() utils.HashInfo {
|
||||||
func (c *Cloud189File) GetPath() string { return "" }
|
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||||
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
}
|
||||||
|
|
||||||
|
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||||
|
func (c *Cloud189File) GetName() string { return c.Name }
|
||||||
|
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||||
|
func (c *Cloud189File) IsDir() bool { return false }
|
||||||
|
func (c *Cloud189File) GetID() string { return string(c.ID) }
|
||||||
|
func (c *Cloud189File) GetPath() string { return "" }
|
||||||
|
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
||||||
|
|
||||||
// 文件夹
|
// 文件夹
|
||||||
type Cloud189Folder struct {
|
type Cloud189Folder struct {
|
||||||
ID int64 `json:"id"`
|
ID String `json:"id"`
|
||||||
ParentID int64 `json:"parentId"`
|
ParentID int64 `json:"parentId"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
||||||
FileCata int64 `json:"fileCata"`
|
LastOpTime Time `json:"lastOpTime"`
|
||||||
FileCount int64 `json:"fileCount"`
|
CreateDate Time `json:"createDate"`
|
||||||
|
|
||||||
LastOpTime string `json:"lastOpTime"`
|
// FileListSize int64 `json:"fileListSize"`
|
||||||
CreateDate string `json:"createDate"`
|
// FileCount int64 `json:"fileCount"`
|
||||||
|
// FileCata int64 `json:"fileCata"`
|
||||||
FileListSize int64 `json:"fileListSize"`
|
// Rev string `json:"rev"`
|
||||||
Rev string `json:"rev"`
|
// StarLabel int64 `json:"starLabel"`
|
||||||
StarLabel int64 `json:"starLabel"`
|
|
||||||
|
|
||||||
parseTime *time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
func (c *Cloud189Folder) CreateTime() time.Time {
|
||||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
return time.Time(c.CreateDate)
|
||||||
func (c *Cloud189Folder) ModTime() time.Time {
|
|
||||||
if c.parseTime == nil {
|
|
||||||
c.parseTime = MustParseTime(c.LastOpTime)
|
|
||||||
}
|
|
||||||
return *c.parseTime
|
|
||||||
}
|
}
|
||||||
func (c *Cloud189Folder) IsDir() bool { return true }
|
|
||||||
func (c *Cloud189Folder) GetID() string { return fmt.Sprint(c.ID) }
|
func (c *Cloud189Folder) GetHash() utils.HashInfo {
|
||||||
func (c *Cloud189Folder) GetPath() string { return "" }
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||||
|
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||||
|
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||||
|
func (c *Cloud189Folder) IsDir() bool { return true }
|
||||||
|
func (c *Cloud189Folder) GetID() string { return string(c.ID) }
|
||||||
|
func (c *Cloud189Folder) GetPath() string { return "" }
|
||||||
|
|
||||||
type Cloud189FilesResp struct {
|
type Cloud189FilesResp struct {
|
||||||
//ResCode int `json:"res_code"`
|
//ResCode int `json:"res_code"`
|
||||||
@ -252,14 +256,25 @@ type InitMultiUploadResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
type UploadUrlsResp struct {
|
type UploadUrlsResp struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
UploadUrls map[string]Part `json:"uploadUrls"`
|
Data map[string]UploadUrlsData `json:"uploadUrls"`
|
||||||
}
|
}
|
||||||
type Part struct {
|
type UploadUrlsData struct {
|
||||||
RequestURL string `json:"requestURL"`
|
RequestURL string `json:"requestURL"`
|
||||||
RequestHeader string `json:"requestHeader"`
|
RequestHeader string `json:"requestHeader"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UploadUrlInfo struct {
|
||||||
|
PartNumber int
|
||||||
|
Headers map[string]string
|
||||||
|
UploadUrlsData
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadProgress struct {
|
||||||
|
UploadInfo InitMultiUploadResp
|
||||||
|
UploadParts []string
|
||||||
|
}
|
||||||
|
|
||||||
/* 第二种上传方式 */
|
/* 第二种上传方式 */
|
||||||
type CreateUploadFileResp struct {
|
type CreateUploadFileResp struct {
|
||||||
// 上传文件请求ID
|
// 上传文件请求ID
|
||||||
@ -284,15 +299,60 @@ func (r *GetUploadFileStatusResp) GetSize() int64 {
|
|||||||
return r.DataSize + r.Size
|
return r.DataSize + r.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommitUploadFileResp struct {
|
type CommitMultiUploadFileResp struct {
|
||||||
|
File struct {
|
||||||
|
UserFileID String `json:"userFileId"`
|
||||||
|
FileName string `json:"fileName"`
|
||||||
|
FileSize int64 `json:"fileSize"`
|
||||||
|
FileMd5 string `json:"fileMd5"`
|
||||||
|
CreateDate Time `json:"createDate"`
|
||||||
|
} `json:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *CommitMultiUploadFileResp) toFile() *Cloud189File {
|
||||||
|
return &Cloud189File{
|
||||||
|
ID: f.File.UserFileID,
|
||||||
|
Name: f.File.FileName,
|
||||||
|
Size: f.File.FileSize,
|
||||||
|
Md5: f.File.FileMd5,
|
||||||
|
LastOpTime: f.File.CreateDate,
|
||||||
|
CreateDate: f.File.CreateDate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type OldCommitUploadFileResp struct {
|
||||||
XMLName xml.Name `xml:"file"`
|
XMLName xml.Name `xml:"file"`
|
||||||
Id string `xml:"id"`
|
ID String `xml:"id"`
|
||||||
Name string `xml:"name"`
|
Name string `xml:"name"`
|
||||||
Size string `xml:"size"`
|
Size int64 `xml:"size"`
|
||||||
Md5 string `xml:"md5"`
|
Md5 string `xml:"md5"`
|
||||||
CreateDate string `xml:"createDate"`
|
CreateDate Time `xml:"createDate"`
|
||||||
Rev string `xml:"rev"`
|
}
|
||||||
UserId string `xml:"userId"`
|
|
||||||
|
func (f *OldCommitUploadFileResp) toFile() *Cloud189File {
|
||||||
|
return &Cloud189File{
|
||||||
|
ID: f.ID,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: f.Size,
|
||||||
|
Md5: f.Md5,
|
||||||
|
CreateDate: f.CreateDate,
|
||||||
|
LastOpTime: f.CreateDate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateBatchTaskResp struct {
|
||||||
|
TaskID string `json:"taskId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BatchTaskStateResp struct {
|
||||||
|
FailedCount int `json:"failedCount"`
|
||||||
|
Process int `json:"process"`
|
||||||
|
SkipCount int `json:"skipCount"`
|
||||||
|
SubTaskCount int `json:"subTaskCount"`
|
||||||
|
SuccessedCount int `json:"successedCount"`
|
||||||
|
SuccessedFileIDList []int64 `json:"successedFileIdList"`
|
||||||
|
TaskID string `json:"taskId"`
|
||||||
|
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中,4 完成
|
||||||
}
|
}
|
||||||
|
|
||||||
/* query 加密参数*/
|
/* query 加密参数*/
|
||||||
|
@ -13,8 +13,9 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/http/cookiejar"
|
"net/http/cookiejar"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -24,6 +25,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
@ -268,7 +270,7 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
"validateCode": y.VCode,
|
"validateCode": y.VCode,
|
||||||
"captchaToken": param.CaptchaToken,
|
"captchaToken": param.CaptchaToken,
|
||||||
"returnUrl": RETURN_URL,
|
"returnUrl": RETURN_URL,
|
||||||
"mailSuffix": "@189.cn",
|
// "mailSuffix": "@189.cn",
|
||||||
"dynamicCheck": "FALSE",
|
"dynamicCheck": "FALSE",
|
||||||
"clientType": CLIENT_TYPE,
|
"clientType": CLIENT_TYPE,
|
||||||
"cb_SaveName": "1",
|
"cb_SaveName": "1",
|
||||||
@ -434,15 +436,20 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 普通上传
|
// 普通上传
|
||||||
func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
// 无法上传大小为0的文件
|
||||||
var DEFAULT = partSize(file.GetSize())
|
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
var sliceSize = partSize(file.GetSize())
|
||||||
|
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||||
|
lastPartSize := file.GetSize() % sliceSize
|
||||||
|
if file.GetSize() > 0 && lastPartSize == 0 {
|
||||||
|
lastPartSize = sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
params := Params{
|
params := Params{
|
||||||
"parentFolderId": dstDir.GetID(),
|
"parentFolderId": dstDir.GetID(),
|
||||||
"fileName": url.QueryEscape(file.GetName()),
|
"fileName": url.QueryEscape(file.GetName()),
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
"fileSize": fmt.Sprint(file.GetSize()),
|
||||||
"sliceSize": fmt.Sprint(DEFAULT),
|
"sliceSize": fmt.Sprint(sliceSize),
|
||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,72 +464,71 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
|
|
||||||
// 初始化上传
|
// 初始化上传
|
||||||
var initMultiUpload InitMultiUploadResp
|
var initMultiUpload InitMultiUploadResp
|
||||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, params, &initMultiUpload)
|
}, params, &initMultiUpload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
silceMd5Hexs := make([]string, 0, count)
|
||||||
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
|
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
return ctx.Err()
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
byteData := make([]byte, sliceSize)
|
||||||
|
if i == count {
|
||||||
|
byteData = byteData[:lastPartSize]
|
||||||
}
|
}
|
||||||
|
|
||||||
// 读取块
|
// 读取块
|
||||||
byteData.Reset()
|
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
|
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||||
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 计算块md5并进行hex和base64编码
|
// 计算块md5并进行hex和base64编码
|
||||||
md5Bytes := silceMd5.Sum(nil)
|
md5Bytes := silceMd5.Sum(nil)
|
||||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||||
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
// 获取上传链接
|
threadG.Go(func(ctx context.Context) error {
|
||||||
var uploadUrl UploadUrlsResp
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
if err != nil {
|
||||||
func(req *resty.Request) {
|
return err
|
||||||
req.SetContext(ctx)
|
}
|
||||||
}, Params{
|
|
||||||
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
|
|
||||||
"uploadFileId": initMultiUpload.Data.UploadFileID,
|
|
||||||
}, &uploadUrl)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 开始上传
|
// step.4 上传切片
|
||||||
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
|
uploadUrl := uploadUrls[0]
|
||||||
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||||
err = retry.Do(func() error {
|
if err != nil {
|
||||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
|
return err
|
||||||
return err
|
}
|
||||||
},
|
up(float64(threadG.Success()) * 100 / float64(count))
|
||||||
retry.Context(ctx),
|
return nil
|
||||||
retry.Attempts(3),
|
})
|
||||||
retry.Delay(time.Second),
|
}
|
||||||
retry.MaxDelay(5*time.Second))
|
if err = threadG.Wait(); err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
|
||||||
up(int(i * 100 / count))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交上传
|
// 提交上传
|
||||||
|
var resp CommitMultiUploadFileResp
|
||||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||||
func(req *resty.Request) {
|
func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -533,199 +539,240 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": "3",
|
"opertype": "3",
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||||
|
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||||
|
if len(fileMd5) < utils.MD5.Width {
|
||||||
|
return nil, errors.New("invalid hash")
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadInfo.FileDataExists != 1 {
|
||||||
|
return nil, errors.New("rapid upload fail")
|
||||||
|
}
|
||||||
|
|
||||||
|
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 快传
|
// 快传
|
||||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
_ = os.Remove(tempFile.Name())
|
|
||||||
}()
|
|
||||||
|
|
||||||
var DEFAULT = partSize(file.GetSize())
|
var sliceSize = partSize(file.GetSize())
|
||||||
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||||
|
lastSliceSize := file.GetSize() % sliceSize
|
||||||
|
if file.GetSize() > 0 && lastSliceSize == 0 {
|
||||||
|
lastSliceSize = sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
// 优先计算所需信息
|
//step.1 优先计算所需信息
|
||||||
|
byteSize := sliceSize
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
silceMd5Hexs := make([]string, 0, count)
|
||||||
silceMd5Base64s := make([]string, 0, count)
|
partInfos := make([]string, 0, count)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == count {
|
||||||
|
byteSize = lastSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
md5Byte := silceMd5.Sum(nil)
|
md5Byte := silceMd5.Sum(nil)
|
||||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||||
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||||
}
|
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||||
}
|
|
||||||
|
|
||||||
// 检测是否支持快传
|
|
||||||
params := Params{
|
|
||||||
"parentFolderId": dstDir.GetID(),
|
|
||||||
"fileName": url.QueryEscape(file.GetName()),
|
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
|
||||||
"fileMd5": fileMd5Hex,
|
|
||||||
"sliceSize": fmt.Sprint(DEFAULT),
|
|
||||||
"sliceMd5": sliceMd5Hex,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
params.Set("familyId", y.FamilyID)
|
|
||||||
fullUrl += "/family"
|
fullUrl += "/family"
|
||||||
} else {
|
} else {
|
||||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||||
fullUrl += "/person"
|
fullUrl += "/person"
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadInfo InitMultiUploadResp
|
// 尝试恢复进度
|
||||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
req.SetContext(ctx)
|
if !ok {
|
||||||
}, params, &uploadInfo)
|
//step.2 预上传
|
||||||
if err != nil {
|
params := Params{
|
||||||
return err
|
"parentFolderId": dstDir.GetID(),
|
||||||
}
|
"fileName": url.QueryEscape(file.GetName()),
|
||||||
|
"fileSize": fmt.Sprint(file.GetSize()),
|
||||||
// 网盘中不存在该文件,开始上传
|
"fileMd5": fileMd5Hex,
|
||||||
if uploadInfo.Data.FileDataExists != 1 {
|
"sliceSize": fmt.Sprint(sliceSize),
|
||||||
var uploadUrls UploadUrlsResp
|
"sliceMd5": sliceMd5Hex,
|
||||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
}
|
||||||
func(req *resty.Request) {
|
if y.isFamily() {
|
||||||
req.SetContext(ctx)
|
params.Set("familyId", y.FamilyID)
|
||||||
}, Params{
|
}
|
||||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
var uploadInfo InitMultiUploadResp
|
||||||
"partInfo": strings.Join(silceMd5Base64s, ","),
|
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
}, &uploadUrls)
|
req.SetContext(ctx)
|
||||||
|
}, params, &uploadInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
uploadProgress = &UploadProgress{
|
||||||
buf := make([]byte, DEFAULT)
|
UploadInfo: uploadInfo,
|
||||||
for i := 1; i <= count; i++ {
|
UploadParts: partInfos,
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := io.ReadFull(tempFile, buf)
|
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
|
|
||||||
err = retry.Do(func() error {
|
|
||||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
retry.Context(ctx),
|
|
||||||
retry.Attempts(3),
|
|
||||||
retry.Delay(time.Second),
|
|
||||||
retry.MaxDelay(5*time.Second))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
up(int(i * 100 / count))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交
|
uploadInfo := uploadProgress.UploadInfo.Data
|
||||||
|
// 网盘中不存在该文件,开始上传
|
||||||
|
if uploadInfo.FileDataExists != 1 {
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
for i, uploadPart := range uploadProgress.UploadParts {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i, uploadPart := i, uploadPart
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
// step.3 获取上传链接
|
||||||
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
uploadUrl := uploadUrls[0]
|
||||||
|
|
||||||
|
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
|
||||||
|
if uploadUrl.PartNumber == count {
|
||||||
|
byteSize = lastSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.4 上传切片
|
||||||
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
up(float64(threadG.Success()) * 100 / float64(len(uploadUrls)))
|
||||||
|
uploadProgress.UploadParts[i] = ""
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||||
|
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.5 提交
|
||||||
|
var resp CommitMultiUploadFileResp
|
||||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||||
func(req *resty.Request) {
|
func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, Params{
|
}, Params{
|
||||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
"uploadFileId": uploadInfo.UploadFileID,
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": "3",
|
"opertype": "3",
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
// 获取上传切片信息
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
// 对http body有大小限制,分片信息太多会出错
|
||||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||||
if err != nil {
|
fullUrl := UPLOAD_URL
|
||||||
return err
|
if y.isFamily() {
|
||||||
|
fullUrl += "/family"
|
||||||
|
} else {
|
||||||
|
fullUrl += "/person"
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
_ = os.Remove(tempFile.Name())
|
|
||||||
}()
|
|
||||||
|
|
||||||
// 计算md5
|
var uploadUrlsResp UploadUrlsResp
|
||||||
fileMd5 := md5.New()
|
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||||
if _, err := io.Copy(fileMd5, tempFile); err != nil {
|
func(req *resty.Request) {
|
||||||
return err
|
req.SetContext(ctx)
|
||||||
|
}, Params{
|
||||||
|
"uploadFileId": uploadFileId,
|
||||||
|
"partInfo": strings.Join(partInfo, ","),
|
||||||
|
}, &uploadUrlsResp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
uploadUrls := uploadUrlsResp.Data
|
||||||
return err
|
|
||||||
|
if len(uploadUrls) != len(partInfo) {
|
||||||
|
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
|
||||||
|
for k, uploadUrl := range uploadUrls {
|
||||||
|
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
|
||||||
|
PartNumber: partNumber,
|
||||||
|
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
|
||||||
|
UploadUrlsData: uploadUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Slice(uploadUrlInfos, func(i, j int) bool {
|
||||||
|
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
|
||||||
|
})
|
||||||
|
return uploadUrlInfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
|
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
|
||||||
|
|
||||||
// 创建上传会话
|
// 创建上传会话
|
||||||
var uploadInfo CreateUploadFileResp
|
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
|
||||||
|
|
||||||
fullUrl := API_URL + "/createUploadFile.action"
|
|
||||||
if y.isFamily() {
|
|
||||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
|
||||||
}
|
|
||||||
_, err = y.post(fullUrl, func(req *resty.Request) {
|
|
||||||
req.SetContext(ctx)
|
|
||||||
if y.isFamily() {
|
|
||||||
req.SetQueryParams(map[string]string{
|
|
||||||
"familyId": y.FamilyID,
|
|
||||||
"fileMd5": fileMd5Hex,
|
|
||||||
"fileName": file.GetName(),
|
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
|
||||||
"parentId": dstDir.GetID(),
|
|
||||||
"resumePolicy": "1",
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
req.SetFormData(map[string]string{
|
|
||||||
"parentFolderId": dstDir.GetID(),
|
|
||||||
"fileName": file.GetName(),
|
|
||||||
"size": fmt.Sprint(file.GetSize()),
|
|
||||||
"md5": fileMd5Hex,
|
|
||||||
"opertype": "3",
|
|
||||||
"flag": "1",
|
|
||||||
"resumePolicy": "1",
|
|
||||||
"isLog": "0",
|
|
||||||
// "baseFileId": "",
|
|
||||||
// "lastWrite":"",
|
|
||||||
// "localPath": strings.ReplaceAll(param.LocalPath, "\\", "/"),
|
|
||||||
// "fileExt": "",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}, &uploadInfo)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 网盘中不存在该文件,开始上传
|
// 网盘中不存在该文件,开始上传
|
||||||
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
|
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
||||||
for status.Size < file.GetSize() && status.FileDataExists != 1 {
|
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
header := map[string]string{
|
header := map[string]string{
|
||||||
@ -742,7 +789,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取断点状态
|
// 获取断点状态
|
||||||
@ -760,35 +807,80 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
}
|
}
|
||||||
}, &status)
|
}, &status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
up(int(status.Size / file.GetSize()))
|
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交
|
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
|
||||||
var resp CommitUploadFileResp
|
}
|
||||||
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
|
|
||||||
|
// 创建上传会话
|
||||||
|
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
|
||||||
|
var uploadInfo CreateUploadFileResp
|
||||||
|
|
||||||
|
fullUrl := API_URL + "/createUploadFile.action"
|
||||||
|
if y.isFamily() {
|
||||||
|
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||||
|
}
|
||||||
|
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
if y.isFamily() {
|
||||||
|
req.SetQueryParams(map[string]string{
|
||||||
|
"familyId": y.FamilyID,
|
||||||
|
"parentId": parentID,
|
||||||
|
"fileMd5": fileMd5,
|
||||||
|
"fileName": fileName,
|
||||||
|
"fileSize": fileSize,
|
||||||
|
"resumePolicy": "1",
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"parentFolderId": parentID,
|
||||||
|
"fileName": fileName,
|
||||||
|
"size": fileSize,
|
||||||
|
"md5": fileMd5,
|
||||||
|
"opertype": "3",
|
||||||
|
"flag": "1",
|
||||||
|
"resumePolicy": "1",
|
||||||
|
"isLog": "0",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, &uploadInfo)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &uploadInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 提交上传文件
|
||||||
|
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
|
||||||
|
var resp OldCommitUploadFileResp
|
||||||
|
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"ResumePolicy": "1",
|
"ResumePolicy": "1",
|
||||||
"UploadFileId": fmt.Sprint(status.UploadFileId),
|
"UploadFileId": fmt.Sprint(uploadFileID),
|
||||||
"FamilyId": fmt.Sprint(y.FamilyID),
|
"FamilyId": fmt.Sprint(y.FamilyID),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"opertype": "3",
|
"opertype": "3",
|
||||||
"resumePolicy": "1",
|
"resumePolicy": "1",
|
||||||
"uploadFileId": fmt.Sprint(status.UploadFileId),
|
"uploadFileId": fmt.Sprint(uploadFileID),
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &resp)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) isFamily() bool {
|
func (y *Cloud189PC) isFamily() bool {
|
||||||
@ -829,3 +921,33 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
|
|||||||
}
|
}
|
||||||
return fmt.Sprint(infos[0].FamilyID), nil
|
return fmt.Sprint(infos[0].FamilyID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
|
||||||
|
var resp BatchTaskStateResp
|
||||||
|
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"type": aType,
|
||||||
|
"taskId": taskID,
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
|
||||||
|
for {
|
||||||
|
state, err := y.CheckBatchTask(aType, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch state.TaskStatus {
|
||||||
|
case 2:
|
||||||
|
return errors.New("there is a conflict with the target object")
|
||||||
|
case 4:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -3,6 +3,7 @@ package alist_v3
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
|||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Modified: f.Modified,
|
Modified: f.Modified,
|
||||||
|
Ctime: f.Created,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
IsFolder: f.IsDir,
|
IsFolder: f.IsDir,
|
||||||
|
HashInfo: utils.FromString(f.HashInfo),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||||
}
|
}
|
||||||
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
SetHeader("Password", d.MetaPassword).
|
SetHeader("Password", d.MetaPassword).
|
||||||
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||||
SetContentLength(true).
|
SetContentLength(true).
|
||||||
SetBody(stream.GetReadCloser())
|
SetBody(io.ReadCloser(stream))
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -18,9 +18,11 @@ type ObjResp struct {
|
|||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
IsDir bool `json:"is_dir"`
|
IsDir bool `json:"is_dir"`
|
||||||
Modified time.Time `json:"modified"`
|
Modified time.Time `json:"modified"`
|
||||||
|
Created time.Time `json:"created"`
|
||||||
Sign string `json:"sign"`
|
Sign string `json:"sign"`
|
||||||
Thumb string `json:"thumb"`
|
Thumb string `json:"thumb"`
|
||||||
Type int `json:"type"`
|
Type int `json:"type"`
|
||||||
|
HashInfo string `json:"hashinfo"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FsListResp struct {
|
type FsListResp struct {
|
||||||
|
@ -14,6 +14,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -67,7 +69,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// init deviceID
|
// init deviceID
|
||||||
deviceID := utils.GetSHA256Encode(d.UserID)
|
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
|
||||||
// init privateKey
|
// init privateKey
|
||||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||||
state := State{
|
state := State{
|
||||||
@ -163,14 +165,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
file := model.FileStream{
|
file := stream.FileStream{
|
||||||
Obj: stream,
|
Obj: streamer,
|
||||||
ReadCloser: stream,
|
Reader: streamer,
|
||||||
Mimetype: stream.GetMimetype(),
|
Mimetype: streamer.GetMimetype(),
|
||||||
}
|
}
|
||||||
const DEFAULT int64 = 10485760
|
const DEFAULT int64 = 10485760
|
||||||
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||||
|
|
||||||
partInfoList := make([]base.Json, 0, count)
|
partInfoList := make([]base.Json, 0, count)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
@ -187,25 +189,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
}
|
}
|
||||||
|
|
||||||
var localFile *os.File
|
var localFile *os.File
|
||||||
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
|
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
|
||||||
localFile, _ = fileStream.ReadCloser.(*os.File)
|
localFile, _ = fileStream.Reader.(*os.File)
|
||||||
}
|
}
|
||||||
if d.RapidUpload {
|
if d.RapidUpload {
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
io.CopyN(buf, file, 1024)
|
io.CopyN(buf, file, 1024)
|
||||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.String())
|
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||||
if localFile != nil {
|
if localFile != nil {
|
||||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 把头部拼接回去
|
// 把头部拼接回去
|
||||||
file.ReadCloser = struct {
|
file.Reader = struct {
|
||||||
io.Reader
|
io.Reader
|
||||||
io.Closer
|
io.Closer
|
||||||
}{
|
}{
|
||||||
Reader: io.MultiReader(buf, file),
|
Reader: io.MultiReader(buf, file),
|
||||||
Closer: file,
|
Closer: &file,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -259,7 +261,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
(t.file.slice(o.toNumber(), Math.min(o.plus(8).toNumber(), t.file.size)))
|
(t.file.slice(o.toNumber(), Math.min(o.plus(8).toNumber(), t.file.size)))
|
||||||
*/
|
*/
|
||||||
buf := make([]byte, 8)
|
buf := make([]byte, 8)
|
||||||
r, _ := new(big.Int).SetString(utils.GetMD5Encode(d.AccessToken)[:16], 16)
|
r, _ := new(big.Int).SetString(utils.GetMD5EncodeStr(d.AccessToken)[:16], 16)
|
||||||
i := new(big.Int).SetInt64(file.GetSize())
|
i := new(big.Int).SetInt64(file.GetSize())
|
||||||
o := new(big.Int).SetInt64(0)
|
o := new(big.Int).SetInt64(0)
|
||||||
if file.GetSize() > 0 {
|
if file.GetSize() > 0 {
|
||||||
@ -281,7 +283,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
file.ReadCloser = localFile
|
file.Reader = localFile
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, partInfo := range resp.PartInfoList {
|
for i, partInfo := range resp.PartInfoList {
|
||||||
@ -303,7 +305,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
}
|
}
|
||||||
res.Body.Close()
|
res.Body.Close()
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
up(i * 100 / count)
|
up(float64(i) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var resp2 base.Json
|
var resp2 base.Json
|
||||||
|
@ -2,11 +2,12 @@ package aliyundrive_open
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"errors"
|
||||||
"math"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/rateg"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -35,13 +36,25 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
||||||
|
if d.LIVPDownloadFormat == "" {
|
||||||
|
d.LIVPDownloadFormat = "jpeg"
|
||||||
|
}
|
||||||
|
if d.DriveType == "" {
|
||||||
|
d.DriveType = "default"
|
||||||
|
}
|
||||||
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
|
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
|
||||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
Limit: 4,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
|
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||||
|
Limit: 1,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,6 +63,9 @@ func (d *AliyundriveOpen) Drop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AliyundriveOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if d.limitList == nil {
|
||||||
|
return nil, fmt.Errorf("driver not init")
|
||||||
|
}
|
||||||
files, err := d.getFiles(ctx, dir.GetID())
|
files, err := d.getFiles(ctx, dir.GetID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -71,6 +87,12 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
url := utils.Json.Get(res, "url").ToString()
|
url := utils.Json.Get(res, "url").ToString()
|
||||||
|
if url == "" {
|
||||||
|
if utils.Ext(file.GetName()) != "livp" {
|
||||||
|
return nil, errors.New("get download url failed: " + string(res))
|
||||||
|
}
|
||||||
|
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
|
||||||
|
}
|
||||||
exp := time.Hour
|
exp := time.Hour
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: url,
|
URL: url,
|
||||||
@ -79,10 +101,15 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if d.limitLink == nil {
|
||||||
|
return nil, fmt.Errorf("driver not init")
|
||||||
|
}
|
||||||
return d.limitLink(ctx, file)
|
return d.limitLink(ctx, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
nowTime, _ := getNowTime()
|
||||||
|
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -90,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
|||||||
"name": dirName,
|
"name": dirName,
|
||||||
"type": "folder",
|
"type": "folder",
|
||||||
"check_name_mode": "refuse",
|
"check_name_mode": "refuse",
|
||||||
})
|
}).SetResult(&newDir)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newDir), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
var resp MoveOrCopyResp
|
||||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -103,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
|
|||||||
"to_parent_file_id": dstDir.GetID(),
|
"to_parent_file_id": dstDir.GetID(),
|
||||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
||||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||||
})
|
}).SetResult(&resp)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.Exist {
|
||||||
|
return nil, errors.New("existence of files with the same name")
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.ID = resp.FileID
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
var newFile File
|
||||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": srcObj.GetID(),
|
"file_id": srcObj.GetID(),
|
||||||
"name": newName,
|
"name": newName,
|
||||||
})
|
}).SetResult(&newFile)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -145,75 +192,8 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// rapid_upload is not currently supported
|
return d.upload(ctx, dstDir, stream, up)
|
||||||
// 1. create
|
|
||||||
// Part Size Unit: Bytes, Default: 20MB,
|
|
||||||
// Maximum number of slices 10,000, ≈195.3125GB
|
|
||||||
var partSize int64 = 20*1024*1024
|
|
||||||
createData := base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"parent_file_id": dstDir.GetID(),
|
|
||||||
"name": stream.GetName(),
|
|
||||||
"type": "file",
|
|
||||||
"check_name_mode": "ignore",
|
|
||||||
}
|
|
||||||
count := 1
|
|
||||||
if stream.GetSize() > partSize {
|
|
||||||
if stream.GetSize() > 1*1024*1024*1024*1024 { // file Size over 1TB
|
|
||||||
partSize = 5*1024*1024*1024 // file part size 5GB
|
|
||||||
} else if stream.GetSize() > 768*1024*1024*1024 { // over 768GB
|
|
||||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
|
||||||
} else if stream.GetSize() > 512*1024*1024*1024 { // over 512GB
|
|
||||||
partSize = 82463373 // ≈ 78.6432MB
|
|
||||||
} else if stream.GetSize() > 384*1024*1024*1024 { // over 384GB
|
|
||||||
partSize = 54975582 // ≈ 52.4288MB
|
|
||||||
} else if stream.GetSize() > 256*1024*1024*1024 { // over 256GB
|
|
||||||
partSize = 41231687 // ≈ 39.3216MB
|
|
||||||
} else if stream.GetSize() > 128*1024*1024*1024 { // over 128GB
|
|
||||||
partSize = 27487791 // ≈ 26.2144MB
|
|
||||||
}
|
|
||||||
count = int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
|
||||||
createData["part_info_list"] = makePartInfos(count)
|
|
||||||
}
|
|
||||||
var createResp CreateResp
|
|
||||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(createData).SetResult(&createResp)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 2. upload
|
|
||||||
preTime := time.Now()
|
|
||||||
for i := 1; i <= len(createResp.PartInfoList); i++ {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
err = d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if count > 0 {
|
|
||||||
up(i * 100 / count)
|
|
||||||
}
|
|
||||||
// refresh upload url if 50 minutes passed
|
|
||||||
if time.Since(preTime) > 50*time.Minute {
|
|
||||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
preTime = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// 3. complete
|
|
||||||
_, err = d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"file_id": createResp.FileId,
|
|
||||||
"upload_id": createResp.UploadId,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
@ -241,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||||
|
@ -6,16 +6,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
|
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
|
||||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||||
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
RapidUpload bool `json:"rapid_upload" help:"If you enable this option, the file will be uploaded to the server first, so the progress will be incorrect"`
|
||||||
AccessToken string
|
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
||||||
|
LIVPDownloadFormat string `json:"livp_download_format" type:"select" options:"jpeg,mov" default:"jpeg"`
|
||||||
|
AccessToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package aliyundrive_open
|
package aliyundrive_open
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -17,22 +18,28 @@ type Files struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
DriveId string `json:"drive_id"`
|
DriveId string `json:"drive_id"`
|
||||||
FileId string `json:"file_id"`
|
FileId string `json:"file_id"`
|
||||||
ParentFileId string `json:"parent_file_id"`
|
ParentFileId string `json:"parent_file_id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
FileExtension string `json:"file_extension"`
|
FileExtension string `json:"file_extension"`
|
||||||
ContentHash string `json:"content_hash"`
|
ContentHash string `json:"content_hash"`
|
||||||
Category string `json:"category"`
|
Category string `json:"category"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Thumbnail string `json:"thumbnail"`
|
Thumbnail string `json:"thumbnail"`
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
CreatedAt *time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
|
||||||
|
// create only
|
||||||
|
FileName string `json:"file_name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
func fileToObj(f File) *model.ObjThumb {
|
||||||
|
if f.Name == "" {
|
||||||
|
f.Name = f.FileName
|
||||||
|
}
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
ID: f.FileId,
|
ID: f.FileId,
|
||||||
@ -40,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
|
|||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Modified: f.UpdatedAt,
|
Modified: f.UpdatedAt,
|
||||||
IsFolder: f.Type == "folder",
|
IsFolder: f.Type == "folder",
|
||||||
|
Ctime: f.CreatedAt,
|
||||||
|
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||||
}
|
}
|
||||||
@ -67,3 +76,9 @@ type CreateResp struct {
|
|||||||
RapidUpload bool `json:"rapid_upload"`
|
RapidUpload bool `json:"rapid_upload"`
|
||||||
PartInfoList []PartInfo `json:"part_info_list"`
|
PartInfoList []PartInfo `json:"part_info_list"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MoveOrCopyResp struct {
|
||||||
|
Exist bool `json:"exist"`
|
||||||
|
DriveID string `json:"drive_id"`
|
||||||
|
FileID string `json:"file_id"`
|
||||||
|
}
|
||||||
|
270
drivers/aliyundrive_open/upload.go
Normal file
270
drivers/aliyundrive_open/upload.go
Normal file
@ -0,0 +1,270 @@
|
|||||||
|
package aliyundrive_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func makePartInfos(size int) []base.Json {
|
||||||
|
partInfoList := make([]base.Json, size)
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
partInfoList[i] = base.Json{"part_number": 1 + i}
|
||||||
|
}
|
||||||
|
return partInfoList
|
||||||
|
}
|
||||||
|
|
||||||
|
func calPartSize(fileSize int64) int64 {
|
||||||
|
var partSize int64 = 20 * utils.MB
|
||||||
|
if fileSize > partSize {
|
||||||
|
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||||
|
partSize = 5 * utils.GB // file part size 5GB
|
||||||
|
} else if fileSize > 768*utils.GB { // over 768GB
|
||||||
|
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||||
|
} else if fileSize > 512*utils.GB { // over 512GB
|
||||||
|
partSize = 82463373 // ≈ 78.6432MB
|
||||||
|
} else if fileSize > 384*utils.GB { // over 384GB
|
||||||
|
partSize = 54975582 // ≈ 52.4288MB
|
||||||
|
} else if fileSize > 256*utils.GB { // over 256GB
|
||||||
|
partSize = 41231687 // ≈ 39.3216MB
|
||||||
|
} else if fileSize > 128*utils.GB { // over 128GB
|
||||||
|
partSize = 27487791 // ≈ 26.2144MB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return partSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||||
|
partInfoList := makePartInfos(count)
|
||||||
|
var resp CreateResp
|
||||||
|
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"file_id": fileId,
|
||||||
|
"part_info_list": partInfoList,
|
||||||
|
"upload_id": uploadId,
|
||||||
|
}).SetResult(&resp)
|
||||||
|
})
|
||||||
|
return resp.PartInfoList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
||||||
|
uploadUrl := partInfo.UploadUrl
|
||||||
|
if d.InternalUpload {
|
||||||
|
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||||
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||||
|
// 3. complete
|
||||||
|
var newFile File
|
||||||
|
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"file_id": fileId,
|
||||||
|
"upload_id": uploadId,
|
||||||
|
}).SetResult(&newFile)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProofRange struct {
|
||||||
|
Start int64
|
||||||
|
End int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProofRange(input string, size int64) (*ProofRange, error) {
|
||||||
|
if size == 0 {
|
||||||
|
return &ProofRange{}, nil
|
||||||
|
}
|
||||||
|
tmpStr := utils.GetMD5EncodeStr(input)[0:16]
|
||||||
|
tmpInt, err := strconv.ParseUint(tmpStr, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
index := tmpInt % uint64(size)
|
||||||
|
pr := &ProofRange{
|
||||||
|
Start: int64(index),
|
||||||
|
End: int64(index) + 8,
|
||||||
|
}
|
||||||
|
if pr.End >= size {
|
||||||
|
pr.End = size
|
||||||
|
}
|
||||||
|
return pr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||||
|
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
length := proofRange.End - proofRange.Start
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, length))
|
||||||
|
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
_, err = io.CopyN(buf, reader, length)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 1. create
|
||||||
|
// Part Size Unit: Bytes, Default: 20MB,
|
||||||
|
// Maximum number of slices 10,000, ≈195.3125GB
|
||||||
|
var partSize = calPartSize(stream.GetSize())
|
||||||
|
const dateFormat = "2006-01-02T15:04:05.000Z"
|
||||||
|
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
|
||||||
|
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||||
|
|
||||||
|
createData := base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"parent_file_id": dstDir.GetID(),
|
||||||
|
"name": stream.GetName(),
|
||||||
|
"type": "file",
|
||||||
|
"check_name_mode": "ignore",
|
||||||
|
"local_modified_at": mtimeStr,
|
||||||
|
"local_created_at": ctimeStr,
|
||||||
|
}
|
||||||
|
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||||
|
createData["part_info_list"] = makePartInfos(count)
|
||||||
|
// rapid upload
|
||||||
|
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||||
|
if rapidUpload {
|
||||||
|
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||||
|
// read 1024 bytes to calculate pre hash
|
||||||
|
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash, err := utils.HashReader(utils.SHA1, reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
createData["size"] = stream.GetSize()
|
||||||
|
createData["pre_hash"] = hash
|
||||||
|
}
|
||||||
|
var createResp CreateResp
|
||||||
|
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(createData).SetResult(&createResp)
|
||||||
|
})
|
||||||
|
var tmpF model.File
|
||||||
|
if err != nil {
|
||||||
|
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||||
|
|
||||||
|
hi := stream.GetHash()
|
||||||
|
hash := hi.GetHash(utils.SHA1)
|
||||||
|
if len(hash) <= 0 {
|
||||||
|
tmpF, err = stream.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(createData, "pre_hash")
|
||||||
|
createData["proof_version"] = "v1"
|
||||||
|
createData["content_hash_name"] = "sha1"
|
||||||
|
createData["content_hash"] = hash
|
||||||
|
createData["proof_code"], err = d.calProofCode(stream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||||
|
}
|
||||||
|
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(createData).SetResult(&createResp)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !createResp.RapidUpload {
|
||||||
|
// 2. normal upload
|
||||||
|
log.Debugf("[aliyundive_open] normal upload")
|
||||||
|
|
||||||
|
preTime := time.Now()
|
||||||
|
var offset, length int64 = 0, partSize
|
||||||
|
//var length
|
||||||
|
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
// refresh upload url if 50 minutes passed
|
||||||
|
if time.Since(preTime) > 50*time.Minute {
|
||||||
|
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
preTime = time.Now()
|
||||||
|
}
|
||||||
|
if remain := stream.GetSize() - offset; length > remain {
|
||||||
|
length = remain
|
||||||
|
}
|
||||||
|
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||||
|
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = retry.Do(func() error {
|
||||||
|
//rd.Reset()
|
||||||
|
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||||
|
},
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
retry.Delay(time.Second))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
offset += partSize
|
||||||
|
up(float64(i*100) / float64(count))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||||
|
// 3. complete
|
||||||
|
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||||
|
}
|
@ -2,52 +2,102 @@ package aliyundrive_open
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
func (d *AliyundriveOpen) refreshToken() error {
|
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||||
url := d.base + "/oauth/access_token"
|
url := d.base + "/oauth/access_token"
|
||||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||||
url = d.OauthTokenURL
|
url = d.OauthTokenURL
|
||||||
}
|
}
|
||||||
var resp base.TokenResp
|
//var resp base.TokenResp
|
||||||
var e ErrResp
|
var e ErrResp
|
||||||
_, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
ForceContentType("application/json").
|
//ForceContentType("application/json").
|
||||||
SetBody(base.Json{
|
SetBody(base.Json{
|
||||||
"client_id": d.ClientID,
|
"client_id": d.ClientID,
|
||||||
"client_secret": d.ClientSecret,
|
"client_secret": d.ClientSecret,
|
||||||
"grant_type": "refresh_token",
|
"grant_type": "refresh_token",
|
||||||
"refresh_token": d.RefreshToken,
|
"refresh_token": d.RefreshToken,
|
||||||
}).
|
}).
|
||||||
SetResult(&resp).
|
//SetResult(&resp).
|
||||||
SetError(&e).
|
SetError(&e).
|
||||||
Post(url)
|
Post(url)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
log.Debugf("[ali_open] refresh token response: %s", res.String())
|
||||||
|
if e.Code != "" {
|
||||||
|
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||||
|
}
|
||||||
|
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
|
||||||
|
if refresh == "" {
|
||||||
|
return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
|
||||||
|
}
|
||||||
|
curSub, err := getSub(d.RefreshToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
newSub, err := getSub(refresh)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if curSub != newSub {
|
||||||
|
return "", "", errors.New("failed to refresh token: sub not match")
|
||||||
|
}
|
||||||
|
return refresh, access, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSub(token string) (string, error) {
|
||||||
|
segments := strings.Split(token, ".")
|
||||||
|
if len(segments) != 3 {
|
||||||
|
return "", errors.New("not a jwt token because of invalid segments")
|
||||||
|
}
|
||||||
|
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.New("failed to decode jwt token")
|
||||||
|
}
|
||||||
|
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) refreshToken() error {
|
||||||
|
refresh, access, err := d._refreshToken()
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||||
|
}
|
||||||
|
refresh, access, err = d._refreshToken()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if e.Code != "" {
|
log.Infof("[ali_open] token exchange: %s -> %s", d.RefreshToken, refresh)
|
||||||
return fmt.Errorf("failed to refresh token: %s", e.Message)
|
d.RefreshToken, d.AccessToken = refresh, access
|
||||||
}
|
|
||||||
if resp.RefreshToken == "" {
|
|
||||||
return errors.New("failed to refresh token: refresh token is empty")
|
|
||||||
}
|
|
||||||
d.RefreshToken, d.AccessToken = resp.RefreshToken, resp.AccessToken
|
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||||
|
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
// TODO check whether access_token is expired
|
// TODO check whether access_token is expired
|
||||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||||
@ -61,20 +111,23 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback,
|
|||||||
req.SetError(&e)
|
req.SetError(&e)
|
||||||
res, err := req.Execute(method, d.base+uri)
|
res, err := req.Execute(method, d.base+uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
if res != nil {
|
||||||
|
log.Errorf("[aliyundrive_open] request error: %s", res.String())
|
||||||
|
}
|
||||||
|
return nil, err, nil
|
||||||
}
|
}
|
||||||
isRetry := len(retry) > 0 && retry[0]
|
isRetry := len(retry) > 0 && retry[0]
|
||||||
if e.Code != "" {
|
if e.Code != "" {
|
||||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
||||||
err = d.refreshToken()
|
err = d.refreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err, nil
|
||||||
}
|
}
|
||||||
return d.request(uri, method, callback, true)
|
return d.requestReturnErrResp(uri, method, callback, true)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message)
|
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
||||||
@ -118,58 +171,8 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePartInfos(size int) []base.Json {
|
func getNowTime() (time.Time, string) {
|
||||||
partInfoList := make([]base.Json, size)
|
nowTime := time.Now()
|
||||||
for i := 0; i < size; i++ {
|
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||||
partInfoList[i] = base.Json{"part_number": 1 + i}
|
return nowTime, nowTimeStr
|
||||||
}
|
|
||||||
return partInfoList
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
|
||||||
partInfoList := makePartInfos(count)
|
|
||||||
var resp CreateResp
|
|
||||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"file_id": fileId,
|
|
||||||
"part_info_list": partInfoList,
|
|
||||||
"upload_id": uploadId,
|
|
||||||
}).SetResult(&resp)
|
|
||||||
})
|
|
||||||
return resp.PartInfoList, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
|
|
||||||
partInfo := resp.PartInfoList[i-1]
|
|
||||||
uploadUrl := partInfo.UploadUrl
|
|
||||||
if d.InternalUpload {
|
|
||||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
if retry {
|
|
||||||
reader.Reset()
|
|
||||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
if retry && res.StatusCode == http.StatusForbidden {
|
|
||||||
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reader.Reset()
|
|
||||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
|
||||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
@ -2,9 +2,11 @@ package aliyundrive_share
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/rateg"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -51,8 +53,14 @@ func (d *AliyundriveShare) Init(ctx context.Context) error {
|
|||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
Limit: 4,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
|
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||||
|
Limit: 1,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,6 +73,9 @@ func (d *AliyundriveShare) Drop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AliyundriveShare) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if d.limitList == nil {
|
||||||
|
return nil, fmt.Errorf("driver not init")
|
||||||
|
}
|
||||||
return d.limitList(ctx, dir)
|
return d.limitList(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +90,9 @@ func (d *AliyundriveShare) list(ctx context.Context, dir model.Obj) ([]model.Obj
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *AliyundriveShare) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if d.limitLink == nil {
|
||||||
|
return nil, fmt.Errorf("driver not init")
|
||||||
|
}
|
||||||
return d.limitLink(ctx, file)
|
return d.limitLink(ctx, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
|
|||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Modified: f.UpdatedAt,
|
Modified: f.UpdatedAt,
|
||||||
|
Ctime: f.CreatedAt,
|
||||||
IsFolder: f.Type == "folder",
|
IsFolder: f.Type == "folder",
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||||
|
@ -3,6 +3,8 @@ package drivers
|
|||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
||||||
@ -16,6 +18,7 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||||
@ -43,6 +46,7 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/uss"
|
_ "github.com/alist-org/alist/v3/drivers/uss"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
||||||
)
|
)
|
||||||
|
@ -1,28 +1,33 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BaiduNetdisk struct {
|
type BaiduNetdisk struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
|
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Config() driver.Config {
|
func (d *BaiduNetdisk) Config() driver.Config {
|
||||||
@ -34,11 +39,24 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||||
|
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||||
|
}
|
||||||
|
|
||||||
res, err := d.get("/xpan/nas", map[string]string{
|
res, err := d.get("/xpan/nas", map[string]string{
|
||||||
"method": "uinfo",
|
"method": "uinfo",
|
||||||
}, nil)
|
}, nil)
|
||||||
log.Debugf("[baidu] get uinfo: %s", string(res))
|
log.Debugf("[baidu] get uinfo: %s", string(res))
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.vipType = utils.Json.Get(res, "vip_type").ToInt()
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Drop(ctx context.Context) error {
|
func (d *BaiduNetdisk) Drop(ctx context.Context) error {
|
||||||
@ -62,12 +80,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
|||||||
return d.linkOfficial(file, args)
|
return d.linkOfficial(file, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
var newDir File
|
||||||
return err
|
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newDir), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
data := []base.Json{
|
data := []base.Json{
|
||||||
{
|
{
|
||||||
"path": srcObj.GetPath(),
|
"path": srcObj.GetPath(),
|
||||||
@ -76,10 +98,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := d.manage("move", data)
|
_, err := d.manage("move", data)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
data := []base.Json{
|
data := []base.Json{
|
||||||
{
|
{
|
||||||
"path": srcObj.GetPath(),
|
"path": srcObj.GetPath(),
|
||||||
@ -87,7 +117,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := d.manage("rename", data)
|
_, err := d.manage("rename", data)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
|
||||||
|
srcObj.Name = newName
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -108,126 +148,175 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
contentMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||||
if err != nil {
|
if len(contentMd5) < utils.MD5.Width {
|
||||||
return err
|
return nil, errors.New("invalid hash")
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
streamSize := stream.GetSize()
|
||||||
_ = os.Remove(tempFile.Name())
|
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||||
}()
|
mtime := stream.ModTime().Unix()
|
||||||
var Default int64 = 4 * 1024 * 1024
|
ctime := stream.CreateTime().Unix()
|
||||||
defaultByteData := make([]byte, Default)
|
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
|
||||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
|
|
||||||
var SliceSize int64 = 256 * 1024
|
var newFile File
|
||||||
|
_, err := d.create(path, streamSize, 0, "", blockList, &newFile, mtime, ctime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// rapid upload
|
||||||
|
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||||
|
return newObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tempFile, err := stream.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
streamSize := stream.GetSize()
|
||||||
|
sliceSize := d.getSliceSize()
|
||||||
|
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
||||||
|
lastBlockSize := streamSize % sliceSize
|
||||||
|
if streamSize > 0 && lastBlockSize == 0 {
|
||||||
|
lastBlockSize = sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
//cal md5 for first 256k data
|
||||||
|
const SliceSize int64 = 256 * 1024
|
||||||
// cal md5
|
// cal md5
|
||||||
h1 := md5.New()
|
blockList := make([]string, 0, count)
|
||||||
h2 := md5.New()
|
byteSize := sliceSize
|
||||||
block_list := make([]string, 0)
|
fileMd5H := md5.New()
|
||||||
content_md5 := ""
|
sliceMd5H := md5.New()
|
||||||
slice_md5 := ""
|
sliceMd5H2 := md5.New()
|
||||||
left := stream.GetSize()
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
byteSize := Default
|
for i := 1; i <= count; i++ {
|
||||||
var byteData []byte
|
|
||||||
if left < Default {
|
|
||||||
byteSize = left
|
|
||||||
byteData = make([]byte, byteSize)
|
|
||||||
} else {
|
|
||||||
byteData = defaultByteData
|
|
||||||
}
|
|
||||||
left -= byteSize
|
|
||||||
_, err = io.ReadFull(tempFile, byteData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
h1.Write(byteData)
|
|
||||||
h2.Write(byteData)
|
|
||||||
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
|
||||||
h2.Reset()
|
|
||||||
}
|
|
||||||
content_md5 = hex.EncodeToString(h1.Sum(nil))
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if stream.GetSize() <= SliceSize {
|
|
||||||
slice_md5 = content_md5
|
|
||||||
} else {
|
|
||||||
sliceData := make([]byte, SliceSize)
|
|
||||||
_, err = io.ReadFull(tempFile, sliceData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
h2.Write(sliceData)
|
|
||||||
slice_md5 = hex.EncodeToString(h2.Sum(nil))
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
|
||||||
path := encodeURIComponent(rawPath)
|
|
||||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
|
|
||||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
|
|
||||||
path, stream.GetSize(),
|
|
||||||
block_list_str,
|
|
||||||
content_md5, slice_md5)
|
|
||||||
params := map[string]string{
|
|
||||||
"method": "precreate",
|
|
||||||
}
|
|
||||||
var precreateResp PrecreateResp
|
|
||||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugf("%+v", precreateResp)
|
|
||||||
if precreateResp.ReturnType == 2 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
params = map[string]string{
|
|
||||||
"method": "upload",
|
|
||||||
"access_token": d.AccessToken,
|
|
||||||
"type": "tmpfile",
|
|
||||||
"path": path,
|
|
||||||
"uploadid": precreateResp.Uploadid,
|
|
||||||
}
|
|
||||||
left = stream.GetSize()
|
|
||||||
for i, partseq := range precreateResp.BlockList {
|
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
byteSize := Default
|
if i == count {
|
||||||
var byteData []byte
|
byteSize = lastBlockSize
|
||||||
if left < Default {
|
|
||||||
byteSize = left
|
|
||||||
byteData = make([]byte, byteSize)
|
|
||||||
} else {
|
|
||||||
byteData = defaultByteData
|
|
||||||
}
|
}
|
||||||
left -= byteSize
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
_, err = io.ReadFull(tempFile, byteData)
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
|
sliceMd5H.Reset()
|
||||||
|
}
|
||||||
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
|
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||||
|
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||||
|
mtime := stream.ModTime().Unix()
|
||||||
|
ctime := stream.CreateTime().Unix()
|
||||||
|
|
||||||
|
// step.1 预上传
|
||||||
|
// 尝试获取之前的进度
|
||||||
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
|
if !ok {
|
||||||
|
params := map[string]string{
|
||||||
|
"method": "precreate",
|
||||||
|
}
|
||||||
|
form := map[string]string{
|
||||||
|
"path": path,
|
||||||
|
"size": strconv.FormatInt(streamSize, 10),
|
||||||
|
"isdir": "0",
|
||||||
|
"autoinit": "1",
|
||||||
|
"rtype": "3",
|
||||||
|
"block_list": blockListStr,
|
||||||
|
"content-md5": contentMd5,
|
||||||
|
"slice-md5": sliceMd5,
|
||||||
|
}
|
||||||
|
joinTime(form, ctime, mtime)
|
||||||
|
|
||||||
|
log.Debugf("[baidu_netdisk] precreate data: %s", form)
|
||||||
|
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
u := "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
|
log.Debugf("%+v", precreateResp)
|
||||||
params["partseq"] = strconv.Itoa(partseq)
|
if precreateResp.ReturnType == 2 {
|
||||||
res, err := base.RestyClient.R().
|
//rapid upload, since got md5 match from baidu server
|
||||||
SetContext(ctx).
|
if err != nil {
|
||||||
SetQueryParams(params).
|
return nil, err
|
||||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
}
|
||||||
Post(u)
|
return fileToObj(precreateResp.File), nil
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugln(res.String())
|
|
||||||
if len(precreateResp.BlockList) > 0 {
|
|
||||||
up(i * 100 / len(precreateResp.BlockList))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
// step.2 上传分片
|
||||||
return err
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
for i, partseq := range precreateResp.BlockList {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||||
|
if partseq+1 == count {
|
||||||
|
byteSize = lastBlockSize
|
||||||
|
}
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
params := map[string]string{
|
||||||
|
"method": "upload",
|
||||||
|
"access_token": d.AccessToken,
|
||||||
|
"type": "tmpfile",
|
||||||
|
"path": path,
|
||||||
|
"uploadid": precreateResp.Uploadid,
|
||||||
|
"partseq": strconv.Itoa(partseq),
|
||||||
|
}
|
||||||
|
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||||
|
precreateResp.BlockList[i] = -1
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
// 如果属于用户主动取消,则保存上传进度
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.3 创建文件
|
||||||
|
var newFile File
|
||||||
|
_, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||||
|
res, err := base.RestyClient.R().
|
||||||
|
SetContext(ctx).
|
||||||
|
SetQueryParams(params).
|
||||||
|
SetFileReader("file", fileName, file).
|
||||||
|
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugln(res.RawResponse.Status + res.String())
|
||||||
|
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||||
|
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
|
if errCode != 0 || errNo != 0 {
|
||||||
|
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
|||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -17,10 +18,8 @@ type File struct {
|
|||||||
//OwnerType int `json:"owner_type"`
|
//OwnerType int `json:"owner_type"`
|
||||||
//Category int `json:"category"`
|
//Category int `json:"category"`
|
||||||
//RealCategory string `json:"real_category"`
|
//RealCategory string `json:"real_category"`
|
||||||
FsId int64 `json:"fs_id"`
|
FsId int64 `json:"fs_id"`
|
||||||
ServerMtime int64 `json:"server_mtime"`
|
|
||||||
//OperId int `json:"oper_id"`
|
//OperId int `json:"oper_id"`
|
||||||
//ServerCtime int `json:"server_ctime"`
|
|
||||||
Thumbs struct {
|
Thumbs struct {
|
||||||
//Icon string `json:"icon"`
|
//Icon string `json:"icon"`
|
||||||
Url3 string `json:"url3"`
|
Url3 string `json:"url3"`
|
||||||
@ -28,29 +27,52 @@ type File struct {
|
|||||||
//Url1 string `json:"url1"`
|
//Url1 string `json:"url1"`
|
||||||
} `json:"thumbs"`
|
} `json:"thumbs"`
|
||||||
//Wpfile int `json:"wpfile"`
|
//Wpfile int `json:"wpfile"`
|
||||||
//LocalMtime int `json:"local_mtime"`
|
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
//Share int `json:"share"`
|
//Share int `json:"share"`
|
||||||
//ServerAtime int `json:"server_atime"`
|
|
||||||
//Pl int `json:"pl"`
|
//Pl int `json:"pl"`
|
||||||
//LocalCtime int `json:"local_ctime"`
|
|
||||||
ServerFilename string `json:"server_filename"`
|
ServerFilename string `json:"server_filename"`
|
||||||
//Md5 string `json:"md5"`
|
Md5 string `json:"md5"`
|
||||||
//OwnerId int `json:"owner_id"`
|
//OwnerId int `json:"owner_id"`
|
||||||
//Unlist int `json:"unlist"`
|
//Unlist int `json:"unlist"`
|
||||||
Isdir int `json:"isdir"`
|
Isdir int `json:"isdir"`
|
||||||
|
|
||||||
|
// list resp
|
||||||
|
ServerCtime int64 `json:"server_ctime"`
|
||||||
|
ServerMtime int64 `json:"server_mtime"`
|
||||||
|
LocalMtime int64 `json:"local_mtime"`
|
||||||
|
LocalCtime int64 `json:"local_ctime"`
|
||||||
|
//ServerAtime int64 `json:"server_atime"` `
|
||||||
|
|
||||||
|
// only create and precreate resp
|
||||||
|
Ctime int64 `json:"ctime"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
func fileToObj(f File) *model.ObjThumb {
|
||||||
|
if f.ServerFilename == "" {
|
||||||
|
f.ServerFilename = path.Base(f.Path)
|
||||||
|
}
|
||||||
|
if f.LocalCtime == 0 {
|
||||||
|
f.LocalCtime = f.Ctime
|
||||||
|
}
|
||||||
|
if f.LocalMtime == 0 {
|
||||||
|
f.LocalMtime = f.Mtime
|
||||||
|
}
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
ID: strconv.FormatInt(f.FsId, 10),
|
ID: strconv.FormatInt(f.FsId, 10),
|
||||||
|
Path: f.Path,
|
||||||
Name: f.ServerFilename,
|
Name: f.ServerFilename,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Modified: time.Unix(f.ServerMtime, 0),
|
Modified: time.Unix(f.LocalMtime, 0),
|
||||||
|
Ctime: time.Unix(f.LocalCtime, 0),
|
||||||
IsFolder: f.Isdir == 1,
|
IsFolder: f.Isdir == 1,
|
||||||
|
|
||||||
|
// 直接获取的MD5是错误的
|
||||||
|
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||||
}
|
}
|
||||||
@ -154,10 +176,15 @@ type DownloadResp2 struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PrecreateResp struct {
|
type PrecreateResp struct {
|
||||||
Path string `json:"path"`
|
Errno int `json:"errno"`
|
||||||
Uploadid string `json:"uploadid"`
|
RequestId int64 `json:"request_id"`
|
||||||
ReturnType int `json:"return_type"`
|
ReturnType int `json:"return_type"`
|
||||||
BlockList []int `json:"block_list"`
|
|
||||||
Errno int `json:"errno"`
|
// return_type=1
|
||||||
RequestId int64 `json:"request_id"`
|
Path string `json:"path"`
|
||||||
|
Uploadid string `json:"uploadid"`
|
||||||
|
BlockList []int `json:"block_list"`
|
||||||
|
|
||||||
|
// return_type=2
|
||||||
|
File File `json:"info"`
|
||||||
}
|
}
|
||||||
|
@ -1,25 +1,27 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
func (d *BaiduNetdisk) refreshToken() error {
|
func (d *BaiduNetdisk) refreshToken() error {
|
||||||
err := d._refreshToken()
|
err := d._refreshToken()
|
||||||
if err != nil && err == errs.EmptyToken {
|
if err != nil && errors.Is(err, errs.EmptyToken) {
|
||||||
err = d._refreshToken()
|
err = d._refreshToken()
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@ -50,30 +52,40 @@ func (d *BaiduNetdisk) _refreshToken() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
req := base.RestyClient.R()
|
var result []byte
|
||||||
req.SetQueryParam("access_token", d.AccessToken)
|
err := retry.Do(func() error {
|
||||||
if callback != nil {
|
req := base.RestyClient.R()
|
||||||
callback(req)
|
req.SetQueryParam("access_token", d.AccessToken)
|
||||||
}
|
if callback != nil {
|
||||||
if resp != nil {
|
callback(req)
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
res, err := req.Execute(method, furl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
|
||||||
if errno != 0 {
|
|
||||||
if errno == -6 {
|
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.request(furl, method, callback, resp)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("errno: %d, refer to https://pan.baidu.com/union/doc/", errno)
|
if resp != nil {
|
||||||
}
|
req.SetResult(resp)
|
||||||
return res.Body(), nil
|
}
|
||||||
|
res, err := req.Execute(method, furl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("[baidu_netdisk] req: %s, resp: %s", furl, res.String())
|
||||||
|
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
|
if errno != 0 {
|
||||||
|
if utils.SliceContains([]int{111, -6}, errno) {
|
||||||
|
log.Info("refreshing baidu_netdisk token.")
|
||||||
|
err2 := d.refreshToken()
|
||||||
|
if err2 != nil {
|
||||||
|
return retry.Unrecoverable(err2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||||
|
}
|
||||||
|
result = res.Body()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
||||||
@ -82,10 +94,10 @@ func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp inter
|
|||||||
}, resp)
|
}, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) postForm(pathname string, params map[string]string, form map[string]string, resp interface{}) ([]byte, error) {
|
||||||
return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) {
|
return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetQueryParams(params)
|
req.SetQueryParams(params)
|
||||||
req.SetBody(data)
|
req.SetFormData(form)
|
||||||
}, resp)
|
}, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -140,6 +152,9 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
|
|||||||
//if res.StatusCode() == 302 {
|
//if res.StatusCode() == 302 {
|
||||||
u = res.Header().Get("location")
|
u = res.Header().Get("location")
|
||||||
//}
|
//}
|
||||||
|
|
||||||
|
updateObjMd5(file, "pan.baidu.com", u)
|
||||||
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: u,
|
URL: u,
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
@ -162,6 +177,9 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
|
||||||
|
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: resp.Info[0].Dlink,
|
URL: resp.Info[0].Dlink,
|
||||||
Header: http.Header{
|
Header: http.Header{
|
||||||
@ -170,32 +188,79 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemanager",
|
"method": "filemanager",
|
||||||
"opera": opera,
|
"opera": opera,
|
||||||
}
|
}
|
||||||
marshal, err := utils.Json.Marshal(filelist)
|
marshal, _ := utils.Json.MarshalToString(filelist)
|
||||||
if err != nil {
|
return d.postForm("/xpan/file", params, map[string]string{
|
||||||
return nil, err
|
"async": "0",
|
||||||
}
|
"filelist": marshal,
|
||||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
|
"ondup": "fail",
|
||||||
return d.post("/xpan/file", params, data, nil)
|
}, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "create",
|
"method": "create",
|
||||||
}
|
}
|
||||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
form := map[string]string{
|
||||||
if uploadid != "" {
|
"path": path,
|
||||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
"size": strconv.FormatInt(size, 10),
|
||||||
|
"isdir": strconv.Itoa(isdir),
|
||||||
|
"rtype": "3",
|
||||||
}
|
}
|
||||||
return d.post("/xpan/file", params, data, nil)
|
if mtime != 0 && ctime != 0 {
|
||||||
|
joinTime(form, ctime, mtime)
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploadid != "" {
|
||||||
|
form["uploadid"] = uploadid
|
||||||
|
}
|
||||||
|
if block_list != "" {
|
||||||
|
form["block_list"] = block_list
|
||||||
|
}
|
||||||
|
return d.postForm("/xpan/file", params, form, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeURIComponent(str string) string {
|
func joinTime(form map[string]string, ctime, mtime int64) {
|
||||||
r := url.QueryEscape(str)
|
form["local_mtime"] = strconv.FormatInt(mtime, 10)
|
||||||
r = strings.ReplaceAll(r, "+", "%20")
|
form["local_ctime"] = strconv.FormatInt(ctime, 10)
|
||||||
return r
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateObjMd5(obj model.Obj, userAgent, u string) {
|
||||||
|
object := model.GetRawObject(obj)
|
||||||
|
if object != nil {
|
||||||
|
req, _ := http.NewRequest(http.MethodHead, u, nil)
|
||||||
|
req.Header.Add("User-Agent", userAgent)
|
||||||
|
resp, _ := base.HttpClient.Do(req)
|
||||||
|
if resp != nil {
|
||||||
|
contentMd5 := resp.Header.Get("Content-Md5")
|
||||||
|
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultSliceSize int64 = 4 * utils.MB
|
||||||
|
VipSliceSize = 16 * utils.MB
|
||||||
|
SVipSliceSize = 32 * utils.MB
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) getSliceSize() int64 {
|
||||||
|
switch d.vipType {
|
||||||
|
case 1:
|
||||||
|
return VipSliceSize
|
||||||
|
case 2:
|
||||||
|
return SVipSliceSize
|
||||||
|
default:
|
||||||
|
return DefaultSliceSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// func encodeURIComponent(str string) string {
|
||||||
|
// r := url.QueryEscape(str)
|
||||||
|
// r = strings.ReplaceAll(r, "+", "%20")
|
||||||
|
// return r
|
||||||
|
// }
|
||||||
|
@ -4,18 +4,22 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -26,6 +30,8 @@ type BaiduPhoto struct {
|
|||||||
AccessToken string
|
AccessToken string
|
||||||
Uk int64
|
Uk int64
|
||||||
root model.Obj
|
root model.Obj
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Config() driver.Config {
|
func (d *BaiduPhoto) Config() driver.Config {
|
||||||
@ -37,6 +43,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
if err := d.refreshToken(); err != nil {
|
if err := d.refreshToken(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -126,7 +137,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
case *File:
|
case *File:
|
||||||
return d.linkFile(ctx, file, args)
|
return d.linkFile(ctx, file, args)
|
||||||
case *AlbumFile:
|
case *AlbumFile:
|
||||||
return d.linkAlbum(ctx, file, args)
|
f, err := d.CopyAlbumFile(ctx, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d.linkFile(ctx, f, args)
|
||||||
|
// 有概率无法获取到链接
|
||||||
|
//return d.linkAlbum(ctx, file, args)
|
||||||
}
|
}
|
||||||
return nil, errs.NotFile
|
return nil, errs.NotFile
|
||||||
}
|
}
|
||||||
@ -169,9 +186,9 @@ func (d *BaiduPhoto) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
// 仅支持相册之间移动
|
|
||||||
if file, ok := srcObj.(*AlbumFile); ok {
|
if file, ok := srcObj.(*AlbumFile); ok {
|
||||||
if _, ok := dstDir.(*Album); ok {
|
switch dstDir.(type) {
|
||||||
|
case *Album, *Root: // albumfile -> root -> album or albumfile -> root
|
||||||
newObj, err := d.Copy(ctx, srcObj, dstDir)
|
newObj, err := d.Copy(ctx, srcObj, dstDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -205,45 +222,57 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 不支持大小为0的文件
|
||||||
|
if stream.GetSize() == 0 {
|
||||||
|
return nil, fmt.Errorf("file size cannot be zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO:
|
||||||
|
// 暂时没有找到妙传方式
|
||||||
|
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
// 需要获取完整文件md5,必须支持 io.Seek
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := stream.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
const DEFAULT int64 = 1 << 22
|
||||||
_ = os.Remove(tempFile.Name())
|
const SliceSize int64 = 1 << 18
|
||||||
}()
|
|
||||||
|
|
||||||
// 计算需要的数据
|
// 计算需要的数据
|
||||||
const DEFAULT = 1 << 22
|
streamSize := stream.GetSize()
|
||||||
const SliceSize = 1 << 18
|
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
||||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
lastBlockSize := streamSize % DEFAULT
|
||||||
|
if lastBlockSize == 0 {
|
||||||
|
lastBlockSize = DEFAULT
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.1 计算MD5
|
||||||
sliceMD5List := make([]string, 0, count)
|
sliceMD5List := make([]string, 0, count)
|
||||||
fileMd5 := md5.New()
|
byteSize := int64(DEFAULT)
|
||||||
sliceMd5 := md5.New()
|
fileMd5H := md5.New()
|
||||||
sliceMd52 := md5.New()
|
sliceMd5H := md5.New()
|
||||||
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
|
sliceMd5H2 := md5.New()
|
||||||
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
if i == count {
|
||||||
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
|
byteSize = lastBlockSize
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
}
|
||||||
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
|
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
sliceMd5.Reset()
|
sliceMd5H.Reset()
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
return nil, err
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
}
|
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||||
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
|
|
||||||
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
|
|
||||||
|
|
||||||
// 开始执行上传
|
// step.2 预上传
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"autoinit": "1",
|
"autoinit": "1",
|
||||||
"isdir": "0",
|
"isdir": "0",
|
||||||
@ -251,46 +280,69 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
"ctype": "11",
|
"ctype": "11",
|
||||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||||
"size": fmt.Sprint(stream.GetSize()),
|
"size": fmt.Sprint(stream.GetSize()),
|
||||||
"slice-md5": slice_md5,
|
"slice-md5": sliceMd5,
|
||||||
"content-md5": content_md5,
|
"content-md5": contentMd5,
|
||||||
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
|
"block_list": blockListStr,
|
||||||
}
|
}
|
||||||
|
|
||||||
// 预上传
|
// 尝试获取之前的进度
|
||||||
var precreateResp PrecreateResp
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
if !ok {
|
||||||
r.SetContext(ctx)
|
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||||
r.SetFormData(params)
|
r.SetContext(ctx)
|
||||||
}, &precreateResp)
|
r.SetFormData(params)
|
||||||
if err != nil {
|
}, &precreateResp)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch precreateResp.ReturnType {
|
switch precreateResp.ReturnType {
|
||||||
case 1: // 上传文件
|
case 1: //step.3 上传文件切片
|
||||||
uploadParams := map[string]string{
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
"method": "upload",
|
retry.Attempts(3),
|
||||||
"path": params["path"],
|
retry.Delay(time.Second),
|
||||||
"uploadid": precreateResp.UploadID,
|
retry.DelayType(retry.BackOffDelay))
|
||||||
}
|
for i, partseq := range precreateResp.BlockList {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||||
if utils.IsCanceled(ctx) {
|
if partseq+1 == count {
|
||||||
return nil, ctx.Err()
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
uploadParams["partseq"] = fmt.Sprint(i)
|
|
||||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
r.SetContext(ctx)
|
uploadParams := map[string]string{
|
||||||
r.SetQueryParams(uploadParams)
|
"method": "upload",
|
||||||
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
|
"path": params["path"],
|
||||||
}, nil)
|
"partseq": fmt.Sprint(partseq),
|
||||||
if err != nil {
|
"uploadid": precreateResp.UploadID,
|
||||||
return nil, err
|
}
|
||||||
|
|
||||||
|
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(uploadParams)
|
||||||
|
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||||
|
precreateResp.BlockList[i] = -1
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
}
|
}
|
||||||
up(i * 100 / count)
|
return nil, err
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case 2: // 创建文件
|
case 2: //step.4 创建文件
|
||||||
params["uploadid"] = precreateResp.UploadID
|
params["uploadid"] = precreateResp.UploadID
|
||||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
@ -300,7 +352,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case 3: // 增加到相册
|
case 3: //step.5 增加到相册
|
||||||
rootfile := precreateResp.Data.toFile()
|
rootfile := precreateResp.Data.toFile()
|
||||||
if album, ok := dstDir.(*Album); ok {
|
if album, ok := dstDir.(*Album); ok {
|
||||||
return d.AddAlbumFile(ctx, album, rootfile)
|
return d.AddAlbumFile(ctx, album, rootfile)
|
||||||
|
@ -61,11 +61,18 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
|
|||||||
|
|
||||||
func renameAlbum(album *Album, newName string) *Album {
|
func renameAlbum(album *Album, newName string) *Album {
|
||||||
return &Album{
|
return &Album{
|
||||||
AlbumID: album.AlbumID,
|
AlbumID: album.AlbumID,
|
||||||
Tid: album.Tid,
|
Tid: album.Tid,
|
||||||
JoinTime: album.JoinTime,
|
JoinTime: album.JoinTime,
|
||||||
CreateTime: album.CreateTime,
|
CreationTime: album.CreationTime,
|
||||||
Title: newName,
|
Title: newName,
|
||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BoolToIntStr(b bool) string {
|
||||||
|
if b {
|
||||||
|
return "1"
|
||||||
|
}
|
||||||
|
return "0"
|
||||||
|
}
|
||||||
|
@ -10,8 +10,10 @@ type Addition struct {
|
|||||||
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
||||||
AlbumID string `json:"album_id"`
|
AlbumID string `json:"album_id"`
|
||||||
//AlbumPassword string `json:"album_password"`
|
//AlbumPassword string `json:"album_password"`
|
||||||
|
DeleteOrigin bool `json:"delete_origin"`
|
||||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -4,6 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -51,22 +53,17 @@ type (
|
|||||||
Ctime int64 `json:"ctime"` // 创建时间 s
|
Ctime int64 `json:"ctime"` // 创建时间 s
|
||||||
Mtime int64 `json:"mtime"` // 修改时间 s
|
Mtime int64 `json:"mtime"` // 修改时间 s
|
||||||
Thumburl []string `json:"thumburl"`
|
Thumburl []string `json:"thumburl"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
parseTime *time.Time
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func (c *File) GetSize() int64 { return c.Size }
|
func (c *File) GetSize() int64 { return c.Size }
|
||||||
func (c *File) GetName() string { return getFileName(c.Path) }
|
func (c *File) GetName() string { return getFileName(c.Path) }
|
||||||
func (c *File) ModTime() time.Time {
|
func (c *File) CreateTime() time.Time { return time.Unix(c.Ctime, 0) }
|
||||||
if c.parseTime == nil {
|
func (c *File) ModTime() time.Time { return time.Unix(c.Mtime, 0) }
|
||||||
c.parseTime = toTime(c.Mtime)
|
func (c *File) IsDir() bool { return false }
|
||||||
}
|
func (c *File) GetID() string { return "" }
|
||||||
return *c.parseTime
|
func (c *File) GetPath() string { return "" }
|
||||||
}
|
|
||||||
func (c *File) IsDir() bool { return false }
|
|
||||||
func (c *File) GetID() string { return "" }
|
|
||||||
func (c *File) GetPath() string { return "" }
|
|
||||||
func (c *File) Thumb() string {
|
func (c *File) Thumb() string {
|
||||||
if len(c.Thumburl) > 0 {
|
if len(c.Thumburl) > 0 {
|
||||||
return c.Thumburl[0]
|
return c.Thumburl[0]
|
||||||
@ -74,6 +71,10 @@ func (c *File) Thumb() string {
|
|||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *File) GetHash() utils.HashInfo {
|
||||||
|
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||||
|
}
|
||||||
|
|
||||||
/*相册部分*/
|
/*相册部分*/
|
||||||
type (
|
type (
|
||||||
AlbumListResp struct {
|
AlbumListResp struct {
|
||||||
@ -84,12 +85,12 @@ type (
|
|||||||
}
|
}
|
||||||
|
|
||||||
Album struct {
|
Album struct {
|
||||||
AlbumID string `json:"album_id"`
|
AlbumID string `json:"album_id"`
|
||||||
Tid int64 `json:"tid"`
|
Tid int64 `json:"tid"`
|
||||||
Title string `json:"title"`
|
Title string `json:"title"`
|
||||||
JoinTime int64 `json:"join_time"`
|
JoinTime int64 `json:"join_time"`
|
||||||
CreateTime int64 `json:"create_time"`
|
CreationTime int64 `json:"create_time"`
|
||||||
Mtime int64 `json:"mtime"`
|
Mtime int64 `json:"mtime"`
|
||||||
|
|
||||||
parseTime *time.Time
|
parseTime *time.Time
|
||||||
}
|
}
|
||||||
@ -109,17 +110,17 @@ type (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func (a *Album) GetSize() int64 { return 0 }
|
func (a *Album) GetHash() utils.HashInfo {
|
||||||
func (a *Album) GetName() string { return a.Title }
|
return utils.HashInfo{}
|
||||||
func (a *Album) ModTime() time.Time {
|
|
||||||
if a.parseTime == nil {
|
|
||||||
a.parseTime = toTime(a.Mtime)
|
|
||||||
}
|
|
||||||
return *a.parseTime
|
|
||||||
}
|
}
|
||||||
func (a *Album) IsDir() bool { return true }
|
|
||||||
func (a *Album) GetID() string { return "" }
|
func (a *Album) GetSize() int64 { return 0 }
|
||||||
func (a *Album) GetPath() string { return "" }
|
func (a *Album) GetName() string { return a.Title }
|
||||||
|
func (a *Album) CreateTime() time.Time { return time.Unix(a.CreationTime, 0) }
|
||||||
|
func (a *Album) ModTime() time.Time { return time.Unix(a.Mtime, 0) }
|
||||||
|
func (a *Album) IsDir() bool { return true }
|
||||||
|
func (a *Album) GetID() string { return "" }
|
||||||
|
func (a *Album) GetPath() string { return "" }
|
||||||
|
|
||||||
type (
|
type (
|
||||||
CopyFileResp struct {
|
CopyFileResp struct {
|
||||||
@ -160,9 +161,9 @@ type (
|
|||||||
CreateFileResp
|
CreateFileResp
|
||||||
|
|
||||||
//不存在返回
|
//不存在返回
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
UploadID string `json:"uploadid"`
|
UploadID string `json:"uploadid"`
|
||||||
Blocklist []int64 `json:"block_list"`
|
BlockList []int `json:"block_list"`
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ const (
|
|||||||
FILE_API_URL_V2 = API_URL + "/file/v2"
|
FILE_API_URL_V2 = API_URL + "/file/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
req := base.RestyClient.R().
|
req := base.RestyClient.R().
|
||||||
SetQueryParam("access_token", d.AccessToken)
|
SetQueryParam("access_token", d.AccessToken)
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
@ -52,9 +52,17 @@ func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallba
|
|||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
// res, err := d.request(furl, method, callback, resp)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// return res.Body(), nil
|
||||||
|
//}
|
||||||
|
|
||||||
func (d *BaiduPhoto) refreshToken() error {
|
func (d *BaiduPhoto) refreshToken() error {
|
||||||
u := "https://openapi.baidu.com/oauth/2.0/token"
|
u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||||
var resp base.TokenResp
|
var resp base.TokenResp
|
||||||
@ -79,11 +87,11 @@ func (d *BaiduPhoto) refreshToken() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(furl, http.MethodGet, callback, resp)
|
return d.Request(furl, http.MethodGet, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(furl, http.MethodPost, callback, resp)
|
return d.Request(furl, http.MethodPost, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +231,7 @@ func (d *BaiduPhoto) DeleteAlbum(ctx context.Context, album *Album) error {
|
|||||||
r.SetFormData(map[string]string{
|
r.SetFormData(map[string]string{
|
||||||
"album_id": album.AlbumID,
|
"album_id": album.AlbumID,
|
||||||
"tid": fmt.Sprint(album.Tid),
|
"tid": fmt.Sprint(album.Tid),
|
||||||
"delete_origin_image": "0", // 是否删除原图 0 不删除 1 删除
|
"delete_origin_image": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -237,7 +245,7 @@ func (d *BaiduPhoto) DeleteAlbumFile(ctx context.Context, file *AlbumFile) error
|
|||||||
"album_id": fmt.Sprint(file.AlbumID),
|
"album_id": fmt.Sprint(file.AlbumID),
|
||||||
"tid": fmt.Sprint(file.Tid),
|
"tid": fmt.Sprint(file.Tid),
|
||||||
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
|
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
|
||||||
"del_origin": "0", // 是否删除原图 0 不删除 1 删除
|
"del_origin": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -391,6 +399,49 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
|
|||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*func (d *BaiduPhoto) linkStreamAlbum(ctx context.Context, file *AlbumFile) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
Header: http.Header{},
|
||||||
|
Writer: func(w io.Writer) error {
|
||||||
|
res, err := d.Get(ALBUM_API_URL+"/streaming", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
|
"album_id": file.AlbumID,
|
||||||
|
"tid": fmt.Sprint(file.Tid),
|
||||||
|
"uk": fmt.Sprint(file.Uk),
|
||||||
|
}).SetDoNotParseResponse(true)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.RawBody().Close()
|
||||||
|
_, err = io.Copy(w, res.RawBody())
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}*/
|
||||||
|
|
||||||
|
/*func (d *BaiduPhoto) linkStream(ctx context.Context, file *File) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
Header: http.Header{},
|
||||||
|
Writer: func(w io.Writer) error {
|
||||||
|
res, err := d.Get(FILE_API_URL_V1+"/streaming", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
|
}).SetDoNotParseResponse(true)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.RawBody().Close()
|
||||||
|
_, err = io.Copy(w, res.RawBody())
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}*/
|
||||||
|
|
||||||
// 获取uk
|
// 获取uk
|
||||||
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
||||||
var info UInfo
|
var info UInfo
|
||||||
|
31
drivers/base/upload.go
Normal file
31
drivers/base/upload.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package base
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/go-cache"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// storage upload progress, for upload recovery
|
||||||
|
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
|
||||||
|
|
||||||
|
// Save upload progress for 20 minutes
|
||||||
|
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
|
||||||
|
return UploadStateCache.Set(
|
||||||
|
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
|
||||||
|
state,
|
||||||
|
cache.WithEx[any](time.Minute*20))
|
||||||
|
}
|
||||||
|
|
||||||
|
// An upload progress can only be made by one process alone,
|
||||||
|
// so here you need to get it and then delete it.
|
||||||
|
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
|
||||||
|
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
|
||||||
|
if ok {
|
||||||
|
state, ok = v.(T)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
@ -1,30 +1 @@
|
|||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func HandleRange(link *model.Link, file io.ReadSeekCloser, header http.Header, size int64) {
|
|
||||||
if header.Get("Range") != "" {
|
|
||||||
r, err := http_range.ParseRange(header.Get("Range"), size)
|
|
||||||
if err == nil && len(r) > 0 {
|
|
||||||
_, err := file.Seek(r[0].Start, io.SeekStart)
|
|
||||||
if err == nil {
|
|
||||||
link.Data = utils.NewLimitReadCloser(file, func() error {
|
|
||||||
return file.Close()
|
|
||||||
}, r[0].Length)
|
|
||||||
link.Status = http.StatusPartialContent
|
|
||||||
link.Header = http.Header{
|
|
||||||
"Content-Range": []string{r[0].ContentRange(size)},
|
|
||||||
"Content-Length": []string{strconv.FormatInt(r[0].Length, 10)},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -49,7 +49,19 @@ func (d *Cloudreve) List(ctx context.Context, dir model.Obj, args model.ListArgs
|
|||||||
}
|
}
|
||||||
|
|
||||||
return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) {
|
return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) {
|
||||||
return objectToObj(src), nil
|
thumb, err := d.GetThumb(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if src.Type == "dir" && d.EnableThumbAndFolderSize {
|
||||||
|
var dprop DirectoryProp
|
||||||
|
err = d.request(http.MethodGet, "/object/property/"+src.Id+"?is_folder=true", nil, &dprop)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
src.Size = dprop.Size
|
||||||
|
}
|
||||||
|
return objectToObj(src, thumb), nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,7 +127,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
if stream.GetReadCloser() == http.NoBody {
|
if io.ReadCloser(stream) == http.NoBody {
|
||||||
return d.create(ctx, dstDir, stream)
|
return d.create(ctx, dstDir, stream)
|
||||||
}
|
}
|
||||||
var r DirectoryResp
|
var r DirectoryResp
|
||||||
|
@ -9,10 +9,12 @@ type Addition struct {
|
|||||||
// Usually one of two
|
// Usually one of two
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
// define other
|
// define other
|
||||||
Address string `json:"address" required:"true"`
|
Address string `json:"address" required:"true"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Cookie string `json:"cookie"`
|
Cookie string `json:"cookie"`
|
||||||
|
CustomUA string `json:"custom_ua"`
|
||||||
|
EnableThumbAndFolderSize bool `json:"enable_thumb_and_folder_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -44,13 +44,20 @@ type Object struct {
|
|||||||
SourceEnabled bool `json:"source_enabled"`
|
SourceEnabled bool `json:"source_enabled"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func objectToObj(f Object) *model.Object {
|
type DirectoryProp struct {
|
||||||
return &model.Object{
|
Size int `json:"size"`
|
||||||
ID: f.Id,
|
}
|
||||||
Name: f.Name,
|
|
||||||
Size: int64(f.Size),
|
func objectToObj(f Object, t model.Thumbnail) *model.ObjThumb {
|
||||||
Modified: f.Date,
|
return &model.ObjThumb{
|
||||||
IsFolder: f.Type == "dir",
|
Object: model.Object{
|
||||||
|
ID: f.Id,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: int64(f.Size),
|
||||||
|
Modified: f.Date,
|
||||||
|
IsFolder: f.Type == "dir",
|
||||||
|
},
|
||||||
|
Thumbnail: t,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,15 +22,18 @@ const loginPath = "/user/session"
|
|||||||
|
|
||||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||||
u := d.Address + "/api/v3" + path
|
u := d.Address + "/api/v3" + path
|
||||||
|
ua := d.CustomUA
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
"User-Agent": ua,
|
||||||
})
|
})
|
||||||
|
|
||||||
var r Resp
|
var r Resp
|
||||||
|
|
||||||
req.SetResult(&r)
|
req.SetResult(&r)
|
||||||
|
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
@ -146,3 +149,26 @@ func convertSrc(obj model.Obj) map[string]interface{} {
|
|||||||
m["items"] = items
|
m["items"] = items
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||||
|
if !d.Addition.EnableThumbAndFolderSize {
|
||||||
|
return model.Thumbnail{}, nil
|
||||||
|
}
|
||||||
|
ua := d.CustomUA
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
|
req := base.NoRedirectClient.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
|
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||||
|
"User-Agent": ua,
|
||||||
|
})
|
||||||
|
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||||
|
if err != nil {
|
||||||
|
return model.Thumbnail{}, err
|
||||||
|
}
|
||||||
|
return model.Thumbnail{
|
||||||
|
Thumbnail: resp.Header().Get("Location"),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
402
drivers/crypt/driver.go
Normal file
402
drivers/crypt/driver.go
Normal file
@ -0,0 +1,402 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"io"
|
||||||
|
stdpath "path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
|
rcCrypt "github.com/rclone/rclone/backend/crypt"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Crypt struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
cipher *rcCrypt.Cipher
|
||||||
|
remoteStorage driver.Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
const obfuscatedPrefix = "___Obfuscated___"
|
||||||
|
|
||||||
|
func (d *Crypt) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Init(ctx context.Context) error {
|
||||||
|
//obfuscate credentials if it's updated or just created
|
||||||
|
err := d.updateObfusParm(&d.Password)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to obfuscate password: %w", err)
|
||||||
|
}
|
||||||
|
err = d.updateObfusParm(&d.Salt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to obfuscate salt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
isCryptExt := regexp.MustCompile(`^[.][A-Za-z0-9-_]{2,}$`).MatchString
|
||||||
|
if !isCryptExt(d.EncryptedSuffix) {
|
||||||
|
return fmt.Errorf("EncryptedSuffix is Illegal")
|
||||||
|
}
|
||||||
|
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
|
||||||
|
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
|
||||||
|
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
|
||||||
|
//need remote storage exist
|
||||||
|
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't find remote storage: %w", err)
|
||||||
|
}
|
||||||
|
d.remoteStorage = storage
|
||||||
|
|
||||||
|
p, _ := strings.CutPrefix(d.Password, obfuscatedPrefix)
|
||||||
|
p2, _ := strings.CutPrefix(d.Salt, obfuscatedPrefix)
|
||||||
|
config := configmap.Simple{
|
||||||
|
"password": p,
|
||||||
|
"password2": p2,
|
||||||
|
"filename_encryption": d.FileNameEnc,
|
||||||
|
"directory_name_encryption": d.DirNameEnc,
|
||||||
|
"filename_encoding": d.FileNameEncoding,
|
||||||
|
"suffix": d.EncryptedSuffix,
|
||||||
|
"pass_bad_blocks": "",
|
||||||
|
}
|
||||||
|
c, err := rcCrypt.NewCipher(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create Cipher: %w", err)
|
||||||
|
}
|
||||||
|
d.cipher = c
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) updateObfusParm(str *string) error {
|
||||||
|
temp := *str
|
||||||
|
if !strings.HasPrefix(temp, obfuscatedPrefix) {
|
||||||
|
temp, err := obscure.Obscure(temp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
temp = obfuscatedPrefix + temp
|
||||||
|
*str = temp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
path := dir.GetPath()
|
||||||
|
//return d.list(ctx, d.RemotePath, path)
|
||||||
|
//remoteFull
|
||||||
|
|
||||||
|
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
|
||||||
|
// the obj must implement the model.SetPath interface
|
||||||
|
// return objs, err
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []model.Obj
|
||||||
|
for _, obj := range objs {
|
||||||
|
if obj.IsDir() {
|
||||||
|
name, err := d.cipher.DecryptDirName(obj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: 0,
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
Ctime: obj.CreateTime(),
|
||||||
|
// discarding hash as it's encrypted
|
||||||
|
}
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
thumb, ok := model.GetThumb(obj)
|
||||||
|
size, err := d.cipher.DecryptedSize(obj.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := d.cipher.DecryptFileName(obj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: size,
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
Ctime: obj.CreateTime(),
|
||||||
|
// discarding hash as it's encrypted
|
||||||
|
}
|
||||||
|
if d.Thumbnail && thumb == "" {
|
||||||
|
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
|
||||||
|
}
|
||||||
|
if !ok && !d.Thumbnail {
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
objWithThumb := model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
result = append(result, &objWithThumb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
if utils.PathEqual(path, "/") {
|
||||||
|
return &model.Object{
|
||||||
|
Name: "Root",
|
||||||
|
IsFolder: true,
|
||||||
|
Path: "/",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
remoteFullPath := ""
|
||||||
|
var remoteObj model.Obj
|
||||||
|
var err, err2 error
|
||||||
|
firstTryIsFolder, secondTry := guessPath(path)
|
||||||
|
remoteFullPath = d.getPathForRemote(path, firstTryIsFolder)
|
||||||
|
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err != nil {
|
||||||
|
if errs.IsObjectNotFound(err) && secondTry {
|
||||||
|
//try the opposite
|
||||||
|
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
|
||||||
|
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, err2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var size int64 = 0
|
||||||
|
name := ""
|
||||||
|
if !remoteObj.IsDir() {
|
||||||
|
size, err = d.cipher.DecryptedSize(remoteObj.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptedSize failed for %s ,will use original size, err:%s", path, err)
|
||||||
|
size = remoteObj.GetSize()
|
||||||
|
}
|
||||||
|
name, err = d.cipher.DecryptFileName(remoteObj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptFileName failed for %s ,will use original name, err:%s", path, err)
|
||||||
|
name = remoteObj.GetName()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
name, err = d.cipher.DecryptDirName(remoteObj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptDirName failed for %s ,will use original name, err:%s", path, err)
|
||||||
|
name = remoteObj.GetName()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj := &model.Object{
|
||||||
|
Path: path,
|
||||||
|
Name: name,
|
||||||
|
Size: size,
|
||||||
|
Modified: remoteObj.ModTime(),
|
||||||
|
IsFolder: remoteObj.IsDir(),
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
|
//return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
remoteLink, remoteFile, err := op.Link(ctx, d.remoteStorage, dstDirActualPath, args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||||
|
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||||
|
}
|
||||||
|
remoteFileSize := remoteFile.GetSize()
|
||||||
|
remoteClosers := utils.EmptyClosers()
|
||||||
|
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||||
|
length := underlyingLength
|
||||||
|
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||||
|
length = -1
|
||||||
|
}
|
||||||
|
rrc := remoteLink.RangeReadCloser
|
||||||
|
if len(remoteLink.URL) > 0 {
|
||||||
|
|
||||||
|
rangedRemoteLink := &model.Link{
|
||||||
|
URL: remoteLink.URL,
|
||||||
|
Header: remoteLink.Header,
|
||||||
|
}
|
||||||
|
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
rrc = converted
|
||||||
|
}
|
||||||
|
if rrc != nil {
|
||||||
|
//remoteRangeReader, err :=
|
||||||
|
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||||
|
remoteClosers.AddClosers(rrc.GetClosers())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return remoteReader, nil
|
||||||
|
}
|
||||||
|
if remoteLink.MFile != nil {
|
||||||
|
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//remoteClosers.Add(remoteLink.MFile)
|
||||||
|
//keep reuse same MFile and close at last.
|
||||||
|
remoteClosers.Add(remoteLink.MFile)
|
||||||
|
return io.NopCloser(remoteLink.MFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
|
||||||
|
}
|
||||||
|
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readSeeker, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||||
|
resultLink := &model.Link{
|
||||||
|
Header: remoteLink.Header,
|
||||||
|
RangeReadCloser: resultRangeReadCloser,
|
||||||
|
Expiration: remoteLink.Expiration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return resultLink, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(parentDir.GetPath(), true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dir := d.cipher.EncryptDirName(dirName)
|
||||||
|
return op.MakeDir(ctx, d.remoteStorage, stdpath.Join(dstDirActualPath, dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Move(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
remoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
var newEncryptedName string
|
||||||
|
if srcObj.IsDir() {
|
||||||
|
newEncryptedName = d.cipher.EncryptDirName(newName)
|
||||||
|
} else {
|
||||||
|
newEncryptedName = d.cipher.EncryptFileName(newName)
|
||||||
|
}
|
||||||
|
return op.Rename(ctx, d.remoteStorage, remoteActualPath, newEncryptedName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
remoteActualPath, err := d.getActualPathForRemote(obj.GetPath(), obj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt the data into wrappedIn
|
||||||
|
wrappedIn, err := d.cipher.EncryptData(streamer)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
|
||||||
|
streamOut := &stream.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
ID: streamer.GetID(),
|
||||||
|
Path: streamer.GetPath(),
|
||||||
|
Name: d.cipher.EncryptFileName(streamer.GetName()),
|
||||||
|
Size: d.cipher.EncryptedSize(streamer.GetSize()),
|
||||||
|
Modified: streamer.ModTime(),
|
||||||
|
IsFolder: streamer.IsDir(),
|
||||||
|
},
|
||||||
|
Reader: wrappedIn,
|
||||||
|
Mimetype: "application/octet-stream",
|
||||||
|
WebPutAsTask: streamer.NeedStore(),
|
||||||
|
Exist: streamer.GetExist(),
|
||||||
|
}
|
||||||
|
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Crypt)(nil)
|
44
drivers/crypt/meta.go
Normal file
44
drivers/crypt/meta.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
//driver.RootPath
|
||||||
|
//driver.RootID
|
||||||
|
// define other
|
||||||
|
|
||||||
|
FileNameEnc string `json:"filename_encryption" type:"select" required:"true" options:"off,standard,obfuscate" default:"off"`
|
||||||
|
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
|
||||||
|
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
|
||||||
|
|
||||||
|
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||||
|
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
|
||||||
|
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
|
||||||
|
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
||||||
|
|
||||||
|
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Crypt",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: true,
|
||||||
|
NoCache: true,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Crypt{}
|
||||||
|
})
|
||||||
|
}
|
1
drivers/crypt/types.go
Normal file
1
drivers/crypt/types.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package crypt
|
44
drivers/crypt/util.go
Normal file
44
drivers/crypt/util.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
stdpath "path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
// will give the best guessing based on the path
|
||||||
|
func guessPath(path string) (isFolder, secondTry bool) {
|
||||||
|
if strings.HasSuffix(path, "/") {
|
||||||
|
//confirmed a folder
|
||||||
|
return true, false
|
||||||
|
}
|
||||||
|
lastSlash := strings.LastIndex(path, "/")
|
||||||
|
if strings.Index(path[lastSlash:], ".") < 0 {
|
||||||
|
//no dot, try folder then try file
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
return false, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) getPathForRemote(path string, isFolder bool) (remoteFullPath string) {
|
||||||
|
if isFolder && !strings.HasSuffix(path, "/") {
|
||||||
|
path = path + "/"
|
||||||
|
}
|
||||||
|
dir, fileName := filepath.Split(path)
|
||||||
|
|
||||||
|
remoteDir := d.cipher.EncryptDirName(dir)
|
||||||
|
remoteFileName := ""
|
||||||
|
if len(strings.TrimSpace(fileName)) > 0 {
|
||||||
|
remoteFileName = d.cipher.EncryptFileName(fileName)
|
||||||
|
}
|
||||||
|
return stdpath.Join(d.RemotePath, remoteDir, remoteFileName)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// actual path is used for internal only. any link for user should come from remoteFullPath
|
||||||
|
func (d *Crypt) getActualPathForRemote(path string, isFolder bool) (string, error) {
|
||||||
|
_, remoteActualPath, err := op.GetStorageAndActualPath(d.getPathForRemote(path, isFolder))
|
||||||
|
return remoteActualPath, err
|
||||||
|
}
|
@ -203,7 +203,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
|
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
up((i + 1) * 100 / count)
|
up(float64(i+1) * 100 / float64(count))
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += byteSize
|
offset += byteSize
|
||||||
|
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -65,11 +64,10 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
Data: r,
|
MFile: r,
|
||||||
}
|
}
|
||||||
base.HandleRange(link, r, args.Header, file.GetSize())
|
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
@ -30,43 +31,59 @@ func (d *FTP) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// An FTP file reader that implements io.ReadSeekCloser for seeking.
|
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||||
type FTPFileReader struct {
|
type FileReader struct {
|
||||||
conn *ftp.ServerConn
|
conn *ftp.ServerConn
|
||||||
resp *ftp.Response
|
resp *ftp.Response
|
||||||
offset int64
|
offset atomic.Int64
|
||||||
mu sync.Mutex
|
readAtOffset int64
|
||||||
path string
|
mu sync.Mutex
|
||||||
|
path string
|
||||||
|
size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
|
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||||
return &FTPFileReader{
|
return &FileReader{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
path: path,
|
path: path,
|
||||||
|
size: size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
|
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||||
|
n, err = r.ReadAt(buf, r.offset.Load())
|
||||||
|
r.offset.Add(int64(n))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||||
|
if off < 0 {
|
||||||
|
return -1, os.ErrInvalid
|
||||||
|
}
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
defer r.mu.Unlock()
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if off != r.readAtOffset {
|
||||||
|
//have to restart the connection, to correct offset
|
||||||
|
_ = r.resp.Close()
|
||||||
|
r.resp = nil
|
||||||
|
}
|
||||||
|
|
||||||
if r.resp == nil {
|
if r.resp == nil {
|
||||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
|
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||||
|
r.readAtOffset = off
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err = r.resp.Read(buf)
|
n, err = r.resp.Read(buf)
|
||||||
r.offset += int64(n)
|
r.readAtOffset += int64(n)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
r.mu.Lock()
|
oldOffset := r.offset.Load()
|
||||||
defer r.mu.Unlock()
|
|
||||||
|
|
||||||
oldOffset := r.offset
|
|
||||||
var newOffset int64
|
var newOffset int64
|
||||||
switch whence {
|
switch whence {
|
||||||
case io.SeekStart:
|
case io.SeekStart:
|
||||||
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
|||||||
case io.SeekCurrent:
|
case io.SeekCurrent:
|
||||||
newOffset = oldOffset + offset
|
newOffset = oldOffset + offset
|
||||||
case io.SeekEnd:
|
case io.SeekEnd:
|
||||||
size, err := r.conn.FileSize(r.path)
|
return r.size, nil
|
||||||
if err != nil {
|
|
||||||
return oldOffset, err
|
|
||||||
}
|
|
||||||
newOffset = offset + int64(size)
|
|
||||||
default:
|
default:
|
||||||
return -1, os.ErrInvalid
|
return -1, os.ErrInvalid
|
||||||
}
|
}
|
||||||
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
|||||||
// offset not changed, so return directly
|
// offset not changed, so return directly
|
||||||
return oldOffset, nil
|
return oldOffset, nil
|
||||||
}
|
}
|
||||||
r.offset = newOffset
|
r.offset.Store(newOffset)
|
||||||
|
|
||||||
if r.resp != nil {
|
|
||||||
// close the existing ftp data connection, otherwise the next read will be blocked
|
|
||||||
_ = r.resp.Close() // we do not care about whether it returns an error
|
|
||||||
r.resp = nil
|
|
||||||
}
|
|
||||||
return newOffset, nil
|
return newOffset, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *FTPFileReader) Close() error {
|
func (r *FileReader) Close() error {
|
||||||
if r.resp != nil {
|
if r.resp != nil {
|
||||||
return r.resp.Close()
|
return r.resp.Close()
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
obj := stream.GetOld()
|
obj := stream.GetExist()
|
||||||
var (
|
var (
|
||||||
e Error
|
e Error
|
||||||
url string
|
url string
|
||||||
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
putUrl := res.Header().Get("location")
|
putUrl := res.Header().Get("location")
|
||||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||||
}, nil)
|
}, nil)
|
||||||
} else {
|
} else {
|
||||||
err = d.chunkUpload(ctx, stream, putUrl)
|
err = d.chunkUpload(ctx, stream, putUrl)
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,12 +24,17 @@ type File struct {
|
|||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
MimeType string `json:"mimeType"`
|
MimeType string `json:"mimeType"`
|
||||||
ModifiedTime time.Time `json:"modifiedTime"`
|
ModifiedTime time.Time `json:"modifiedTime"`
|
||||||
|
CreatedTime time.Time `json:"createdTime"`
|
||||||
Size string `json:"size"`
|
Size string `json:"size"`
|
||||||
ThumbnailLink string `json:"thumbnailLink"`
|
ThumbnailLink string `json:"thumbnailLink"`
|
||||||
ShortcutDetails struct {
|
ShortcutDetails struct {
|
||||||
TargetId string `json:"targetId"`
|
TargetId string `json:"targetId"`
|
||||||
TargetMimeType string `json:"targetMimeType"`
|
TargetMimeType string `json:"targetMimeType"`
|
||||||
} `json:"shortcutDetails"`
|
} `json:"shortcutDetails"`
|
||||||
|
|
||||||
|
MD5Checksum string `json:"md5Checksum"`
|
||||||
|
SHA1Checksum string `json:"sha1Checksum"`
|
||||||
|
SHA256Checksum string `json:"sha256Checksum"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
func fileToObj(f File) *model.ObjThumb {
|
||||||
@ -39,10 +45,18 @@ func fileToObj(f File) *model.ObjThumb {
|
|||||||
ID: f.Id,
|
ID: f.Id,
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: size,
|
Size: size,
|
||||||
|
Ctime: f.CreatedTime,
|
||||||
Modified: f.ModifiedTime,
|
Modified: f.ModifiedTime,
|
||||||
IsFolder: f.MimeType == "application/vnd.google-apps.folder",
|
IsFolder: f.MimeType == "application/vnd.google-apps.folder",
|
||||||
|
HashInfo: utils.NewHashInfoByMap(map[*utils.HashType]string{
|
||||||
|
utils.MD5: f.MD5Checksum,
|
||||||
|
utils.SHA1: f.SHA1Checksum,
|
||||||
|
utils.SHA256: f.SHA256Checksum,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: f.ThumbnailLink,
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{},
|
|
||||||
}
|
}
|
||||||
if f.MimeType == "application/vnd.google-apps.shortcut" {
|
if f.MimeType == "application/vnd.google-apps.shortcut" {
|
||||||
obj.ID = f.ShortcutDetails.TargetId
|
obj.ID = f.ShortcutDetails.TargetId
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
@ -13,6 +12,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
@ -195,7 +196,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||||||
}
|
}
|
||||||
query := map[string]string{
|
query := map[string]string{
|
||||||
"orderBy": orderBy,
|
"orderBy": orderBy,
|
||||||
"fields": "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken",
|
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
|
||||||
"pageSize": "1000",
|
"pageSize": "1000",
|
||||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
||||||
//"includeItemsFromAllDrives": "true",
|
//"includeItemsFromAllDrives": "true",
|
||||||
@ -216,25 +217,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|||||||
|
|
||||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||||
var finish int64 = 0
|
var offset int64 = 0
|
||||||
for finish < stream.GetSize() {
|
for offset < stream.GetSize() {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
chunkSize := stream.GetSize() - finish
|
chunkSize := stream.GetSize() - offset
|
||||||
if chunkSize > defaultChunkSize {
|
if chunkSize > defaultChunkSize {
|
||||||
chunkSize = defaultChunkSize
|
chunkSize = defaultChunkSize
|
||||||
}
|
}
|
||||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
}).SetBody(reader).SetContext(ctx)
|
||||||
}, nil)
|
}, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
finish += chunkSize
|
offset += chunkSize
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
|
req.SetBody(stream).SetContext(ctx)
|
||||||
}, nil, postHeaders)
|
}, nil, postHeaders)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2,9 +2,7 @@ package lanzou
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -19,6 +17,8 @@ type LanZou struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
uid string
|
uid string
|
||||||
vei string
|
vei string
|
||||||
|
|
||||||
|
flag int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Config() driver.Config {
|
func (d *LanZou) Config() driver.Config {
|
||||||
@ -30,16 +30,18 @@ func (d *LanZou) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Init(ctx context.Context) (err error) {
|
func (d *LanZou) Init(ctx context.Context) (err error) {
|
||||||
if d.IsCookie() {
|
switch d.Type {
|
||||||
|
case "account":
|
||||||
|
_, err := d.Login()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case "cookie":
|
||||||
if d.RootFolderID == "" {
|
if d.RootFolderID == "" {
|
||||||
d.RootFolderID = "-1"
|
d.RootFolderID = "-1"
|
||||||
}
|
}
|
||||||
ylogin := regexp.MustCompile("ylogin=(.*?);").FindStringSubmatch(d.Cookie)
|
d.vei, d.uid, err = d.getVeiAndUid()
|
||||||
if len(ylogin) < 2 {
|
|
||||||
return fmt.Errorf("cookie does not contain ylogin")
|
|
||||||
}
|
|
||||||
d.uid = ylogin[1]
|
|
||||||
d.vei, err = d.getVei()
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -51,7 +53,7 @@ func (d *LanZou) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
// 获取的大小和时间不准确
|
// 获取的大小和时间不准确
|
||||||
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
return d.GetAllFiles(dir.GetID())
|
return d.GetAllFiles(dir.GetID())
|
||||||
} else {
|
} else {
|
||||||
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
|
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
|
||||||
@ -119,7 +121,7 @@ func (d *LanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
data, err := d.doupload(func(req *resty.Request) {
|
data, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -137,11 +139,11 @@ func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
FolID: utils.Json.Get(data, "text").ToString(),
|
FolID: utils.Json.Get(data, "text").ToString(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
if !srcObj.IsDir() {
|
if !srcObj.IsDir() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -157,11 +159,11 @@ func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
|||||||
return srcObj, nil
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
if !srcObj.IsDir() {
|
if !srcObj.IsDir() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -179,11 +181,11 @@ func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (
|
|||||||
return srcObj, nil
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if obj.IsDir() {
|
if obj.IsDir() {
|
||||||
@ -200,13 +202,13 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return errs.NotImplement
|
return errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
var resp RespText[[]FileOrFolder]
|
var resp RespText[[]FileOrFolder]
|
||||||
_, err := d._post(d.BaseUrl+"/fileup.php", func(req *resty.Request) {
|
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"task": "1",
|
"task": "1",
|
||||||
"vie": "2",
|
"vie": "2",
|
||||||
@ -221,5 +223,5 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
return &resp.Text[0], nil
|
return &resp.Text[0], nil
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package lanzou
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -117,13 +118,102 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
|
|||||||
|
|
||||||
// 根据key查询js变量
|
// 根据key查询js变量
|
||||||
func findJSVarFunc(key, data string) string {
|
func findJSVarFunc(key, data string) string {
|
||||||
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
var values []string
|
||||||
|
if key != "sasign" {
|
||||||
|
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||||
|
} else {
|
||||||
|
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
|
||||||
|
if len(matches) == 3 {
|
||||||
|
values = matches[1]
|
||||||
|
} else {
|
||||||
|
if len(matches) > 0 {
|
||||||
|
values = matches[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
if len(values) == 0 {
|
if len(values) == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return values[1]
|
return values[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
|
||||||
|
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
|
||||||
|
|
||||||
|
// 查找所有方法位置
|
||||||
|
func findJSFunctionIndex(data string, all bool) [][2]int {
|
||||||
|
findFunction := findFunction
|
||||||
|
if all {
|
||||||
|
findFunction = findFunctionAll
|
||||||
|
}
|
||||||
|
|
||||||
|
indexs := findFunction.FindAllStringIndex(data, -1)
|
||||||
|
fIndexs := make([][2]int, 0, len(indexs))
|
||||||
|
|
||||||
|
for _, index := range indexs {
|
||||||
|
if len(index) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
count, data := 0, data[index[1]:]
|
||||||
|
for ii, v := range data {
|
||||||
|
if v == ' ' && count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v == '{' {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == '}' {
|
||||||
|
count--
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fIndexs
|
||||||
|
}
|
||||||
|
|
||||||
|
// 删除JS全局方法
|
||||||
|
func removeJSGlobalFunction(html string) string {
|
||||||
|
indexs := findJSFunctionIndex(html, false)
|
||||||
|
block := make([]string, len(indexs))
|
||||||
|
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
|
||||||
|
index := indexs[i]
|
||||||
|
block[i] = html[index[1]:next]
|
||||||
|
next = index[0]
|
||||||
|
}
|
||||||
|
return strings.Join(block, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据名称获取方法
|
||||||
|
func getJSFunctionByName(html string, name string) (string, error) {
|
||||||
|
indexs := findJSFunctionIndex(html, true)
|
||||||
|
for _, index := range indexs {
|
||||||
|
data := html[index[0]:index[1]]
|
||||||
|
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("not find %s function", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 解析html中的JSON,选择最长的数据
|
||||||
|
func htmlJsonToMap2(html string) (map[string]string, error) {
|
||||||
|
datas := findDataReg.FindAllStringSubmatch(html, -1)
|
||||||
|
var sData string
|
||||||
|
for _, data := range datas {
|
||||||
|
if len(datas) > 0 && len(data[1]) > len(sData) {
|
||||||
|
sData = data[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sData == "" {
|
||||||
|
return nil, fmt.Errorf("not find data")
|
||||||
|
}
|
||||||
|
return jsonToMap(sData, html), nil
|
||||||
|
}
|
||||||
|
|
||||||
// 解析html中的JSON
|
// 解析html中的JSON
|
||||||
func htmlJsonToMap(html string) (map[string]string, error) {
|
func htmlJsonToMap(html string) (map[string]string, error) {
|
||||||
datas := findDataReg.FindStringSubmatch(html)
|
datas := findDataReg.FindStringSubmatch(html)
|
||||||
@ -190,3 +280,14 @@ func GetExpirationTime(url string) (etime time.Duration) {
|
|||||||
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
|
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CookieToString(cookies []*http.Cookie) string {
|
||||||
|
if cookies == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
cookieStrings := make([]string, len(cookies))
|
||||||
|
for i, cookie := range cookies {
|
||||||
|
cookieStrings[i] = cookie.Name + "=" + cookie.Value
|
||||||
|
}
|
||||||
|
return strings.Join(cookieStrings, ";")
|
||||||
|
}
|
||||||
|
@ -6,8 +6,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
Type string `json:"type" type:"select" options:"cookie,url" default:"cookie"`
|
Type string `json:"type" type:"select" options:"account,cookie,url" default:"cookie"`
|
||||||
Cookie string `json:"cookie" required:"true" help:"about 15 days valid, ignore if shareUrl is used"`
|
|
||||||
|
Account string `json:"account"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
|
||||||
|
Cookie string `json:"cookie" help:"about 15 days valid, ignore if shareUrl is used"`
|
||||||
|
|
||||||
driver.RootID
|
driver.RootID
|
||||||
SharePassword string `json:"share_password"`
|
SharePassword string `json:"share_password"`
|
||||||
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
|
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
|
||||||
@ -19,6 +24,10 @@ func (a *Addition) IsCookie() bool {
|
|||||||
return a.Type == "cookie"
|
return a.Type == "cookie"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Addition) IsAccount() bool {
|
||||||
|
return a.Type == "account"
|
||||||
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "Lanzou",
|
Name: "Lanzou",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
|
@ -3,11 +3,14 @@ package lanzou
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrFileShareCancel = errors.New("file sharing cancellation")
|
var ErrFileShareCancel = errors.New("file sharing cancellation")
|
||||||
var ErrFileNotExist = errors.New("file does not exist")
|
var ErrFileNotExist = errors.New("file does not exist")
|
||||||
|
var ErrCookieExpiration = errors.New("cookie expiration")
|
||||||
|
|
||||||
type RespText[T any] struct {
|
type RespText[T any] struct {
|
||||||
Text T `json:"text"`
|
Text T `json:"text"`
|
||||||
@ -17,6 +20,9 @@ type RespInfo[T any] struct {
|
|||||||
Info T `json:"info"`
|
Info T `json:"info"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ model.Obj = (*FileOrFolder)(nil)
|
||||||
|
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
|
||||||
|
|
||||||
type FileOrFolder struct {
|
type FileOrFolder struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
//Onof string `json:"onof"` // 是否存在提取码
|
//Onof string `json:"onof"` // 是否存在提取码
|
||||||
@ -48,6 +54,14 @@ type FileOrFolder struct {
|
|||||||
shareInfo *FileShare `json:"-"`
|
shareInfo *FileShare `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FileOrFolder) CreateTime() time.Time {
|
||||||
|
return f.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileOrFolder) GetHash() utils.HashInfo {
|
||||||
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *FileOrFolder) GetID() string {
|
func (f *FileOrFolder) GetID() string {
|
||||||
if f.IsDir() {
|
if f.IsDir() {
|
||||||
return f.FolID
|
return f.FolID
|
||||||
@ -129,6 +143,14 @@ type FileOrFolderByShareUrl struct {
|
|||||||
repairFlag bool `json:"-"`
|
repairFlag bool `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
|
||||||
|
return f.ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
|
||||||
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
||||||
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
||||||
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
||||||
|
@ -5,13 +5,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -37,7 +40,24 @@ func (d *LanZou) get(url string, callback base.ReqCallback) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
return d._post(url, callback, resp, false)
|
data, err := d._post(url, callback, resp, false)
|
||||||
|
if err == ErrCookieExpiration && d.IsAccount() {
|
||||||
|
if atomic.CompareAndSwapInt32(&d.flag, 0, 1) {
|
||||||
|
_, err2 := d.Login()
|
||||||
|
atomic.SwapInt32(&d.flag, 0)
|
||||||
|
if err2 != nil {
|
||||||
|
err = errors.Join(err, err2)
|
||||||
|
d.Status = err.Error()
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for atomic.LoadInt32(&d.flag) != 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
return d._post(url, callback, resp, false)
|
||||||
|
}
|
||||||
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
|
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
|
||||||
@ -49,10 +69,12 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
callback(req)
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
}, up)
|
}, up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return data, err
|
||||||
}
|
}
|
||||||
switch utils.Json.Get(data, "zt").ToInt() {
|
switch utils.Json.Get(data, "zt").ToInt() {
|
||||||
case 1, 2, 4:
|
case 1, 2, 4:
|
||||||
@ -61,12 +83,14 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
|||||||
utils.Json.Unmarshal(data, resp)
|
utils.Json.Unmarshal(data, resp)
|
||||||
}
|
}
|
||||||
return data, nil
|
return data, nil
|
||||||
|
case 9: // 登录过期
|
||||||
|
return data, ErrCookieExpiration
|
||||||
default:
|
default:
|
||||||
info := utils.Json.Get(data, "inf").ToString()
|
info := utils.Json.Get(data, "inf").ToString()
|
||||||
if info == "" {
|
if info == "" {
|
||||||
info = utils.Json.Get(data, "info").ToString()
|
info = utils.Json.Get(data, "info").ToString()
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf(info)
|
return data, fmt.Errorf(info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,6 +125,28 @@ func (d *LanZou) request(url string, method string, callback base.ReqCallback, u
|
|||||||
return res.Body(), err
|
return res.Body(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *LanZou) Login() ([]*http.Cookie, error) {
|
||||||
|
resp, err := base.NewRestyClient().SetRedirectPolicy(resty.NoRedirectPolicy()).
|
||||||
|
R().SetFormData(map[string]string{
|
||||||
|
"task": "3",
|
||||||
|
"uid": d.Account,
|
||||||
|
"pwd": d.Password,
|
||||||
|
"setSessionId": "",
|
||||||
|
"setSig": "",
|
||||||
|
"setScene": "",
|
||||||
|
"setTocen": "",
|
||||||
|
"formhash": "",
|
||||||
|
}).Post("https://up.woozooo.com/mlogin.php")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if utils.Json.Get(resp.Body(), "zt").ToInt() != 1 {
|
||||||
|
return nil, fmt.Errorf("login err: %s", resp.Body())
|
||||||
|
}
|
||||||
|
d.Cookie = CookieToString(resp.Cookies())
|
||||||
|
return resp.Cookies(), nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
通过cookie获取数据
|
通过cookie获取数据
|
||||||
*/
|
*/
|
||||||
@ -212,7 +258,7 @@ var sizeFindReg = regexp.MustCompile(`(?i)大小\W*([0-9.]+\s*[bkm]+)`)
|
|||||||
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
|
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
|
||||||
|
|
||||||
// 查找分享文件夹子文件夹ID和名称
|
// 查找分享文件夹子文件夹ID和名称
|
||||||
var findSubFolaerReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
||||||
|
|
||||||
// 获取下载页面链接
|
// 获取下载页面链接
|
||||||
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
|
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
|
||||||
@ -300,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
|||||||
|
|
||||||
// 需要密码
|
// 需要密码
|
||||||
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
||||||
param, err := htmlFormToMap(sharePageData)
|
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
param, err := htmlJsonToMap(sharePageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -325,7 +375,6 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nextPageData := RemoveNotes(string(data))
|
nextPageData := RemoveNotes(string(data))
|
||||||
|
|
||||||
param, err = htmlJsonToMap(nextPageData)
|
param, err = htmlJsonToMap(nextPageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -406,7 +455,7 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
|||||||
|
|
||||||
files := make([]FileOrFolderByShareUrl, 0)
|
files := make([]FileOrFolderByShareUrl, 0)
|
||||||
// vip获取文件夹
|
// vip获取文件夹
|
||||||
floders := findSubFolaerReg.FindAllStringSubmatch(sharePageData, -1)
|
floders := findSubFolderReg.FindAllStringSubmatch(sharePageData, -1)
|
||||||
for _, floder := range floders {
|
for _, floder := range floders {
|
||||||
if len(floder) == 3 {
|
if len(floder) == 3 {
|
||||||
files = append(files, FileOrFolderByShareUrl{
|
files = append(files, FileOrFolderByShareUrl{
|
||||||
@ -427,10 +476,10 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
/*// 文件夹中的文件也不加密
|
// 文件夹中的文件加密
|
||||||
for i := 0; i < len(resp.Text); i++ {
|
for i := 0; i < len(resp.Text); i++ {
|
||||||
resp.Text[i].Pwd = pwd
|
resp.Text[i].Pwd = pwd
|
||||||
}*/
|
}
|
||||||
if len(resp.Text) == 0 {
|
if len(resp.Text) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -451,21 +500,32 @@ func (d *LanZou) getFileRealInfo(downURL string) (*int64, *time.Time) {
|
|||||||
return &size, &time
|
return &size, &time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) getVei() (string, error) {
|
func (d *LanZou) getVeiAndUid() (vei string, uid string, err error) {
|
||||||
resp, err := d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
var resp []byte
|
||||||
|
resp, err = d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
"item": "files",
|
"item": "files",
|
||||||
"action": "index",
|
"action": "index",
|
||||||
"u": d.uid,
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return
|
||||||
}
|
}
|
||||||
|
// uid
|
||||||
|
uids := regexp.MustCompile(`uid=([^'"&;]+)`).FindStringSubmatch(string(resp))
|
||||||
|
if len(uids) < 2 {
|
||||||
|
err = fmt.Errorf("uid variable not find")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uid = uids[1]
|
||||||
|
|
||||||
|
// vei
|
||||||
html := RemoveNotes(string(resp))
|
html := RemoveNotes(string(resp))
|
||||||
data, err := htmlJsonToMap(html)
|
data, err := htmlJsonToMap(html)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return
|
||||||
}
|
}
|
||||||
return data["vei"], nil
|
vei = data["vei"]
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,16 +1,18 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -19,6 +21,8 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/sign"
|
"github.com/alist-org/alist/v3/internal/sign"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
|
"github.com/djherbis/times"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
_ "golang.org/x/image/webp"
|
_ "golang.org/x/image/webp"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -80,36 +84,63 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") {
|
if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
thumb := ""
|
file := d.FileInfoToObj(f, args.ReqPath, fullPath)
|
||||||
if d.Thumbnail {
|
files = append(files, file)
|
||||||
typeName := utils.GetFileType(f.Name())
|
|
||||||
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
|
||||||
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, f.Name())
|
|
||||||
thumb = utils.EncodePath(thumb, true)
|
|
||||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(args.ReqPath, f.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
|
||||||
var size int64
|
|
||||||
if !isFolder {
|
|
||||||
size = f.Size()
|
|
||||||
}
|
|
||||||
file := model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
Path: filepath.Join(dir.GetPath(), f.Name()),
|
|
||||||
Name: f.Name(),
|
|
||||||
Modified: f.ModTime(),
|
|
||||||
Size: size,
|
|
||||||
IsFolder: isFolder,
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{
|
|
||||||
Thumbnail: thumb,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
files = append(files, &file)
|
|
||||||
}
|
}
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||||
|
thumb := ""
|
||||||
|
if d.Thumbnail {
|
||||||
|
typeName := utils.GetFileType(f.Name())
|
||||||
|
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
||||||
|
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", reqPath, f.Name())
|
||||||
|
thumb = utils.EncodePath(thumb, true)
|
||||||
|
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||||
|
var size int64
|
||||||
|
if !isFolder {
|
||||||
|
size = f.Size()
|
||||||
|
}
|
||||||
|
var ctime time.Time
|
||||||
|
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||||
|
if err == nil {
|
||||||
|
if t.HasBirthTime() {
|
||||||
|
ctime = t.BirthTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file := model.ObjThumb{
|
||||||
|
Object: model.Object{
|
||||||
|
Path: filepath.Join(fullPath, f.Name()),
|
||||||
|
Name: f.Name(),
|
||||||
|
Modified: f.ModTime(),
|
||||||
|
Size: size,
|
||||||
|
IsFolder: isFolder,
|
||||||
|
Ctime: ctime,
|
||||||
|
},
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &file
|
||||||
|
|
||||||
|
}
|
||||||
|
func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
f, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
file := d.FileInfoToObj(f, path, path)
|
||||||
|
//h := "123123"
|
||||||
|
//if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") {
|
||||||
|
// s.SetHash(h,"SHA1")
|
||||||
|
//}
|
||||||
|
return file, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
path = filepath.Join(d.GetRootPath(), path)
|
path = filepath.Join(d.GetRootPath(), path)
|
||||||
@ -125,10 +156,18 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
|||||||
if isFolder {
|
if isFolder {
|
||||||
size = 0
|
size = 0
|
||||||
}
|
}
|
||||||
|
var ctime time.Time
|
||||||
|
t, err := times.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
if t.HasBirthTime() {
|
||||||
|
ctime = t.BirthTime()
|
||||||
|
}
|
||||||
|
}
|
||||||
file := model.Object{
|
file := model.Object{
|
||||||
Path: path,
|
Path: path,
|
||||||
Name: f.Name(),
|
Name: f.Name(),
|
||||||
Modified: f.ModTime(),
|
Modified: f.ModTime(),
|
||||||
|
Ctime: ctime,
|
||||||
Size: size,
|
Size: size,
|
||||||
IsFolder: isFolder,
|
IsFolder: isFolder,
|
||||||
}
|
}
|
||||||
@ -147,13 +186,21 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
"Content-Type": []string{"image/png"},
|
"Content-Type": []string{"image/png"},
|
||||||
}
|
}
|
||||||
if thumbPath != nil {
|
if thumbPath != nil {
|
||||||
link.FilePath = thumbPath
|
open, err := os.Open(*thumbPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
link.MFile = open
|
||||||
} else {
|
} else {
|
||||||
link.Data = io.NopCloser(buf)
|
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
|
||||||
link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
link.FilePath = &fullPath
|
open, err := os.Open(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
link.MFile = open
|
||||||
}
|
}
|
||||||
return &link, nil
|
return &link, nil
|
||||||
}
|
}
|
||||||
@ -237,6 +284,10 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
err = os.Chtimes(fullPath, stream.ModTime(), stream.ModTime())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ func readDir(dirname string) ([]fs.FileInfo, error) {
|
|||||||
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||||
fullPath := file.GetPath()
|
fullPath := file.GetPath()
|
||||||
thumbPrefix := "alist_thumb_"
|
thumbPrefix := "alist_thumb_"
|
||||||
thumbName := thumbPrefix + utils.GetMD5Encode(fullPath) + ".png"
|
thumbName := thumbPrefix + utils.GetMD5EncodeStr(fullPath) + ".png"
|
||||||
if d.ThumbCacheFolder != "" {
|
if d.ThumbCacheFolder != "" {
|
||||||
// skip if the file is a thumbnail
|
// skip if the file is a thumbnail
|
||||||
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
||||||
@ -91,7 +91,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
|||||||
srcBuf = imgBuf
|
srcBuf = imgBuf
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := imaging.Decode(srcBuf)
|
image, err := imaging.Decode(srcBuf, imaging.AutoOrientation(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := stream.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
_ = os.Remove(tempFile.Name())
|
|
||||||
}()
|
}()
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
|
@ -5,6 +5,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -39,7 +43,7 @@ func (d *Mega) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
if node, ok := dir.(*MegaNode); ok {
|
if node, ok := dir.(*MegaNode); ok {
|
||||||
nodes, err := d.c.FS.GetChildren(node.Node)
|
nodes, err := d.c.FS.GetChildren(node.n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -53,7 +57,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
log.Errorf("can't convert: %+v", dir)
|
log.Errorf("can't convert: %+v", dir)
|
||||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||||
@ -64,77 +68,67 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
|||||||
|
|
||||||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
if node, ok := file.(*MegaNode); ok {
|
if node, ok := file.(*MegaNode); ok {
|
||||||
//link, err := d.c.Link(node.Node, true)
|
|
||||||
|
//down, err := d.c.NewDownload(n.Node)
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
// return nil, err
|
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||||
//}
|
//}
|
||||||
//return &model.Link{URL: link}, nil
|
|
||||||
down, err := d.c.NewDownload(node.Node)
|
size := file.GetSize()
|
||||||
if err != nil {
|
var finalClosers utils.Closers
|
||||||
return nil, err
|
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
}
|
length := httpRange.Length
|
||||||
//u := down.GetResourceUrl()
|
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||||
//u = strings.Replace(u, "http", "https", 1)
|
length = -1
|
||||||
//return &model.Link{URL: u}, nil
|
|
||||||
r, w := io.Pipe()
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
_ = recover()
|
|
||||||
}()
|
|
||||||
log.Debugf("chunk size: %d", down.Chunks())
|
|
||||||
var (
|
|
||||||
chunk []byte
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
for id := 0; id < down.Chunks(); id++ {
|
|
||||||
chunk, err = down.DownloadChunk(id)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("mega down: %+v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.Debugf("id: %d,len: %d", id, len(chunk))
|
|
||||||
//_, _, err = down.ChunkLocation(id)
|
|
||||||
//if err != nil {
|
|
||||||
// log.Errorf("mega down: %+v", err)
|
|
||||||
// return
|
|
||||||
//}
|
|
||||||
//_, err = c.Write(chunk)
|
|
||||||
if _, err = w.Write(chunk); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err = w.CloseWithError(err)
|
var down *mega.Download
|
||||||
|
err := utils.Retry(3, time.Second, func() (err error) {
|
||||||
|
down, err = d.c.NewDownload(node.n)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("mega down: %+v", err)
|
return nil, fmt.Errorf("open download file failed: %w", err)
|
||||||
}
|
}
|
||||||
}()
|
oo := &openObject{
|
||||||
return &model.Link{Data: r}, nil
|
ctx: ctx,
|
||||||
|
d: down,
|
||||||
|
skip: httpRange.Start,
|
||||||
|
}
|
||||||
|
finalClosers.Add(oo)
|
||||||
|
|
||||||
|
return readers.NewLimitedReadCloser(oo, length), nil
|
||||||
|
}
|
||||||
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
|
||||||
|
resultLink := &model.Link{
|
||||||
|
RangeReadCloser: resultRangeReadCloser,
|
||||||
|
}
|
||||||
|
return resultLink, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
if parentNode, ok := parentDir.(*MegaNode); ok {
|
if parentNode, ok := parentDir.(*MegaNode); ok {
|
||||||
_, err := d.c.CreateDir(dirName, parentNode.Node)
|
_, err := d.c.CreateDir(dirName, parentNode.n)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to convert dir to mega node")
|
return fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||||
return d.c.Move(srcNode.Node, dstNode.Node)
|
return d.c.Move(srcNode.n, dstNode.n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to convert dir to mega node")
|
return fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||||
return d.c.Rename(srcNode.Node, newName)
|
return d.c.Rename(srcNode.n, newName)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to convert dir to mega node")
|
return fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -143,14 +137,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
|
|
||||||
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
if node, ok := obj.(*MegaNode); ok {
|
if node, ok := obj.(*MegaNode); ok {
|
||||||
return d.c.Delete(node.Node, false)
|
return d.c.Delete(node.n, false)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to convert dir to mega node")
|
return fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||||
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
|
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -176,13 +170,13 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
up(id * 100 / u.Chunks())
|
up(float64(id) * 100 / float64(u.Chunks()))
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = u.Finish()
|
_, err = u.Finish()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to convert dir to mega node")
|
return fmt.Errorf("unable to convert dir to mega n")
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package mega
|
package mega
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -8,29 +9,36 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type MegaNode struct {
|
type MegaNode struct {
|
||||||
*mega.Node
|
n *mega.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (m *MegaNode) GetSize() int64 {
|
func (m *MegaNode) GetSize() int64 {
|
||||||
// //TODO implement me
|
return m.n.GetSize()
|
||||||
// panic("implement me")
|
}
|
||||||
//}
|
|
||||||
//
|
func (m *MegaNode) GetName() string {
|
||||||
//func (m *MegaNode) GetName() string {
|
return m.n.GetName()
|
||||||
// //TODO implement me
|
}
|
||||||
// panic("implement me")
|
|
||||||
//}
|
func (m *MegaNode) CreateTime() time.Time {
|
||||||
|
return m.n.GetTimeStamp()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MegaNode) GetHash() utils.HashInfo {
|
||||||
|
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
|
||||||
|
return utils.HashInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
func (m *MegaNode) ModTime() time.Time {
|
func (m *MegaNode) ModTime() time.Time {
|
||||||
return m.GetTimeStamp()
|
return m.n.GetTimeStamp()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MegaNode) IsDir() bool {
|
func (m *MegaNode) IsDir() bool {
|
||||||
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
|
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MegaNode) GetID() string {
|
func (m *MegaNode) GetID() string {
|
||||||
return m.GetHash()
|
return m.n.GetHash()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MegaNode) GetPath() string {
|
func (m *MegaNode) GetPath() string {
|
||||||
|
@ -1,3 +1,92 @@
|
|||||||
package mega
|
package mega
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/t3rm1n4l/go-mega"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
// openObject represents a download in progress
|
||||||
|
type openObject struct {
|
||||||
|
ctx context.Context
|
||||||
|
mu sync.Mutex
|
||||||
|
d *mega.Download
|
||||||
|
id int
|
||||||
|
skip int64
|
||||||
|
chunk []byte
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the next chunk
|
||||||
|
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||||
|
if oo.id >= oo.d.Chunks() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
var chunk []byte
|
||||||
|
err = utils.Retry(3, time.Second, func() (err error) {
|
||||||
|
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.chunk = chunk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) bytes into p.
|
||||||
|
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return 0, fmt.Errorf("read on closed file")
|
||||||
|
}
|
||||||
|
// Skip data at the start if requested
|
||||||
|
for oo.skip > 0 {
|
||||||
|
_, size, err := oo.d.ChunkLocation(oo.id)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip < int64(size) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.skip -= int64(size)
|
||||||
|
}
|
||||||
|
if len(oo.chunk) == 0 {
|
||||||
|
err = oo.getChunk(oo.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip > 0 {
|
||||||
|
oo.chunk = oo.chunk[oo.skip:]
|
||||||
|
oo.skip = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n = copy(p, oo.chunk)
|
||||||
|
oo.chunk = oo.chunk[n:]
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closed the file - MAC errors are reported here
|
||||||
|
func (oo *openObject) Close() (err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = utils.Retry(3, 500*time.Millisecond, func() (err error) {
|
||||||
|
return oo.d.Finish()
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to finish download: %w", err)
|
||||||
|
}
|
||||||
|
oo.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user