Compare commits
103 Commits
Author | SHA1 | Date | |
---|---|---|---|
8c3337b88b | |||
7238243664 | |||
ba2b15ab24 | |||
28dc8822b7 | |||
358c5055e9 | |||
b6cd40e6d3 | |||
7d96d8070d | |||
d482fb5f26 | |||
60402ce1fc | |||
1e3950c847 | |||
ed550594da | |||
3bbae29f93 | |||
3b74f8cd9a | |||
e9bdb91e01 | |||
1aa024ed6b | |||
13e8d36e1a | |||
5606c23768 | |||
0b675d6c02 | |||
c1db3a36ad | |||
c59dbb4f9e | |||
df6b306fce | |||
9d45718e5f | |||
b91ed7a78a | |||
95386d777b | |||
635809c376 | |||
af6bb2a6aa | |||
a797494aa3 | |||
353dd7f796 | |||
1c00d64952 | |||
ff5cf3f4fa | |||
5b6b2f427a | |||
7877184bee | |||
e9cb37122e | |||
a425392a2b | |||
75acbcc115 | |||
30415cefbe | |||
1d06a0019f | |||
3686075a7f | |||
6c1c7e5cc0 | |||
c4f901b201 | |||
4b7acb1389 | |||
15b7169df4 | |||
861948bcf3 | |||
e5ffd39cf2 | |||
8b353da0d2 | |||
49bde82426 | |||
3e285aaec4 | |||
355fc576b1 | |||
a69d72aa20 | |||
e5d123c5d3 | |||
220eb33f88 | |||
5238850036 | |||
81ac963567 | |||
3c21a9a520 | |||
1dc1dd1f07 | |||
c9ea9bce81 | |||
9f08353d31 | |||
ce0c3626c2 | |||
06f46206db | |||
579f0c06af | |||
b12d92acc9 | |||
e700ce15e5 | |||
7dbef7d559 | |||
7e9cdd8b07 | |||
cee6bc6b5d | |||
cfd23c05b4 | |||
0c1acd72ca | |||
e2ca06dcca | |||
0828fd787d | |||
2e23ea68d4 | |||
4afa822bec | |||
f2ca9b40db | |||
4c2535cb22 | |||
d4ea8787c9 | |||
a4de04528a | |||
f60aae7499 | |||
de8f9e9eee | |||
cace9db12f | |||
ec2fb82836 | |||
afcfbf02ea | |||
cad04e07dd | |||
30f732138c | |||
04034bd03b | |||
6ec9a8d4c7 | |||
3f7882b467 | |||
a4511c1963 | |||
9d1f122717 | |||
5dd73d80d8 | |||
fce872bc1b | |||
df6c4c80c2 | |||
d2ff040cf8 | |||
a31af209cc | |||
3f8b3da52b | |||
6887f14ec6 | |||
3e0de5eaac | |||
61101a60f4 | |||
3529023bf9 | |||
d1d1a089a4 | |||
fa66358b1e | |||
2b533e4b91 | |||
d3530a8d80 | |||
6052eb3512 | |||
d17f7f7cad |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -3,7 +3,7 @@
|
|||||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||||
patreon: # Replace with a single Patreon username
|
patreon: # Replace with a single Patreon username
|
||||||
open_collective: # Replace with a single Open Collective username
|
open_collective: # Replace with a single Open Collective username
|
||||||
ko_fi: # Replace with a single Ko-fi username
|
ko_fi: xhofe # Replace with a single Ko-fi username
|
||||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||||
liberapay: # Replace with a single Liberapay username
|
liberapay: # Replace with a single Liberapay username
|
||||||
|
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -7,28 +7,44 @@ body:
|
|||||||
value: |
|
value: |
|
||||||
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
||||||
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
||||||
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
attributes:
|
attributes:
|
||||||
label: Please make sure of the following things
|
label: Please make sure of the following things
|
||||||
description: You may select more than one, even select all.
|
description: |
|
||||||
|
You must check all the following, otherwise your issue may be closed directly. Or you can go to the [discussions](https://github.com/alist-org/alist/discussions)
|
||||||
|
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||||
options:
|
options:
|
||||||
- label: I have read the [documentation](https://alist.nn.ci).
|
- label: |
|
||||||
- label: I'm sure there are no duplicate issues or discussions.
|
I have read the [documentation](https://alist.nn.ci).
|
||||||
- label: I'm sure it's due to `alist` and not something else(such as `Dependencies` or `Operational`).
|
我已经阅读了[文档](https://alist.nn.ci)。
|
||||||
- label: I'm sure I'm using the latest version
|
- label: |
|
||||||
|
I'm sure there are no duplicate issues or discussions.
|
||||||
|
我确定没有重复的issue或讨论。
|
||||||
|
- label: |
|
||||||
|
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||||
|
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||||
|
- label: |
|
||||||
|
I'm sure this issue is not fixed in the latest version.
|
||||||
|
我确定这个问题在最新版本中没有被修复。
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: version
|
id: version
|
||||||
attributes:
|
attributes:
|
||||||
label: Alist Version / Alist 版本
|
label: AList Version / AList 版本
|
||||||
description: What version of our software are you running?
|
description: |
|
||||||
placeholder: v2.0.0
|
What version of our software are you running? Do not use `latest` or `master` as an answer.
|
||||||
|
您使用的是哪个版本的软件?请不要使用`latest`或`master`作为答案。
|
||||||
|
placeholder: v3.xx.xx
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
- type: input
|
||||||
id: driver
|
id: driver
|
||||||
attributes:
|
attributes:
|
||||||
label: Driver used / 使用的存储驱动
|
label: Driver used / 使用的存储驱动
|
||||||
description: What storage driver are you using?
|
description: |
|
||||||
|
What storage driver are you using?
|
||||||
|
您使用的是哪个存储驱动?
|
||||||
placeholder: "for example: Onedrive"
|
placeholder: "for example: Onedrive"
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
@ -47,6 +63,15 @@ body:
|
|||||||
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
id: config
|
||||||
|
attributes:
|
||||||
|
label: Config / 配置
|
||||||
|
description: |
|
||||||
|
Please provide the configuration file of your `AList` application and take a screenshot of the relevant storage configuration. (hide privacy field)
|
||||||
|
请提供您的`AList`应用的配置文件,并截图相关存储配置。(隐藏隐私字段)
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: logs
|
id: logs
|
||||||
attributes:
|
attributes:
|
||||||
|
4
.github/workflows/auto_lang.yml
vendored
4
.github/workflows/auto_lang.yml
vendored
@ -53,8 +53,8 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cd alist-web
|
cd alist-web
|
||||||
git add .
|
git add .
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
4
.github/workflows/build_docker.yml
vendored
4
.github/workflows/build_docker.yml
vendored
@ -53,8 +53,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Commit
|
- name: Commit
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Noah Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||||
|
|
||||||
- name: Push commit
|
- name: Push commit
|
||||||
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
if: github.event.label.name == 'question'
|
if: github.event.label.name == 'question'
|
||||||
steps:
|
steps:
|
||||||
- name: Create comment
|
- name: Create comment
|
||||||
uses: actions-cool/issues-helper@v3.4.0
|
uses: actions-cool/issues-helper@v3.5.2
|
||||||
with:
|
with:
|
||||||
actions: 'create-comment'
|
actions: 'create-comment'
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
17
.github/workflows/issue_rm_working.yml
vendored
Normal file
17
.github/workflows/issue_rm_working.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
name: Remove working label when issue closed
|
||||||
|
|
||||||
|
on:
|
||||||
|
issues:
|
||||||
|
types: [closed]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
rm-working:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Remove working label
|
||||||
|
uses: actions-cool/issues-helper@v3
|
||||||
|
with:
|
||||||
|
actions: 'remove-labels'
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
issue-number: ${{ github.event.issue.number }}
|
||||||
|
labels: 'working'
|
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@ -41,17 +41,11 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
bash build.sh release
|
bash build.sh release
|
||||||
|
|
||||||
- name: Release latest
|
|
||||||
uses: irongut/EditRelease@v1.2.0
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.MY_TOKEN }}
|
|
||||||
id: ${{ github.event.release.id }}
|
|
||||||
prerelease: false
|
|
||||||
|
|
||||||
- name: Upload assets
|
- name: Upload assets
|
||||||
uses: softprops/action-gh-release@v1
|
uses: softprops/action-gh-release@v1
|
||||||
with:
|
with:
|
||||||
files: build/compress/*
|
files: build/compress/*
|
||||||
|
prerelease: false
|
||||||
|
|
||||||
release_desktop:
|
release_desktop:
|
||||||
needs: release
|
needs: release
|
||||||
@ -68,8 +62,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Add tag
|
- name: Add tag
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||||
git tag -a $version -m "release $version"
|
git tag -a $version -m "release $version"
|
||||||
|
|
||||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -56,8 +56,8 @@ jobs:
|
|||||||
|
|
||||||
- name: Add tag
|
- name: Add tag
|
||||||
run: |
|
run: |
|
||||||
git config --local user.email "i@nn.ci"
|
git config --local user.email "bot@nn.ci"
|
||||||
git config --local user.name "Andy Hsu"
|
git config --local user.name "IlaBot"
|
||||||
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
||||||
|
|
||||||
- name: Push tags
|
- name: Push tags
|
||||||
|
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
name: release_linux_musl_arm
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [ published ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release_arm:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
platform: [ ubuntu-latest ]
|
||||||
|
go-version: [ '1.20' ]
|
||||||
|
name: Release
|
||||||
|
runs-on: ${{ matrix.platform }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Build
|
||||||
|
run: |
|
||||||
|
bash build.sh release linux_musl_arm
|
||||||
|
|
||||||
|
- name: Upload assets
|
||||||
|
uses: softprops/action-gh-release@v1
|
||||||
|
with:
|
||||||
|
files: build/compress/*
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -29,3 +29,5 @@ output/
|
|||||||
/daemon/
|
/daemon/
|
||||||
/public/dist/*
|
/public/dist/*
|
||||||
/!public/dist/README.md
|
/!public/dist/README.md
|
||||||
|
|
||||||
|
.VSCodeCounter
|
@ -7,7 +7,7 @@
|
|||||||
Prerequisites:
|
Prerequisites:
|
||||||
|
|
||||||
- [git](https://git-scm.com)
|
- [git](https://git-scm.com)
|
||||||
- [Go 1.19+](https://golang.org/doc/install)
|
- [Go 1.20+](https://golang.org/doc/install)
|
||||||
- [gcc](https://gcc.gnu.org/)
|
- [gcc](https://gcc.gnu.org/)
|
||||||
- [nodejs](https://nodejs.org/)
|
- [nodejs](https://nodejs.org/)
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
@ -91,6 +91,7 @@ English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_O
|
|||||||
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
||||||
- [x] Offline download
|
- [x] Offline download
|
||||||
- [x] Copy files between two storage
|
- [x] Copy files between two storage
|
||||||
|
- [x] Multi-thread downloading acceleration for single-thread download/stream
|
||||||
|
|
||||||
## Document
|
## Document
|
||||||
|
|
||||||
@ -112,8 +113,7 @@ https://alist.nn.ci/guide/sponsor.html
|
|||||||
### Special sponsors
|
### Special sponsors
|
||||||
|
|
||||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
|
||||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## Contributors
|
## Contributors
|
||||||
|
@ -39,7 +39,7 @@
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
[English](./README.md) | 中文 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
[English](./README.md) | 中文 | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
## 功能
|
## 功能
|
||||||
|
|
||||||
@ -90,6 +90,7 @@
|
|||||||
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
||||||
- [x] 离线下载
|
- [x] 离线下载
|
||||||
- [x] 跨存储复制文件
|
- [x] 跨存储复制文件
|
||||||
|
- [x] 单线程下载/串流的多线程下载加速
|
||||||
|
|
||||||
## 文档
|
## 文档
|
||||||
|
|
||||||
@ -110,8 +111,7 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
|
|||||||
### 特别赞助
|
### 特别赞助
|
||||||
|
|
||||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
||||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
|
||||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
## 贡献者
|
## 贡献者
|
||||||
|
138
README_ja.md
Normal file
138
README_ja.md
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
<div align="center">
|
||||||
|
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||||
|
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||||
|
<div>
|
||||||
|
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||||
|
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||||
|
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||||
|
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
|
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||||
|
</a>
|
||||||
|
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||||
|
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<a href="https://github.com/Xhofe/alist/discussions">
|
||||||
|
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||||
|
</a>
|
||||||
|
<a href="https://discord.gg/F4ymsH4xv2">
|
||||||
|
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||||
|
</a>
|
||||||
|
<a href="https://github.com/Xhofe/alist/releases">
|
||||||
|
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||||
|
</a>
|
||||||
|
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||||
|
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||||
|
</a>
|
||||||
|
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||||
|
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
[English](./README.md) | [中文](./README_cn.md) | 日本語 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||||
|
|
||||||
|
## 特徴
|
||||||
|
|
||||||
|
- [x] マルチストレージ
|
||||||
|
- [x] ローカルストレージ
|
||||||
|
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||||
|
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||||
|
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||||
|
- [x] [GoogleDrive](https://drive.google.com/)
|
||||||
|
- [x] [123pan](https://www.123pan.com/)
|
||||||
|
- [x] FTP / SFTP
|
||||||
|
- [x] [PikPak](https://www.mypikpak.com/)
|
||||||
|
- [x] [S3](https://aws.amazon.com/s3/)
|
||||||
|
- [x] [Seafile](https://seafile.com/)
|
||||||
|
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||||
|
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||||
|
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||||
|
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||||
|
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||||
|
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||||
|
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||||
|
- [x] [Terabox](https://www.terabox.com/main)
|
||||||
|
- [x] [UC](https://drive.uc.cn)
|
||||||
|
- [x] [Quark](https://pan.quark.cn)
|
||||||
|
- [x] [Thunder](https://pan.xunlei.com)
|
||||||
|
- [x] [Lanzou](https://www.lanzou.com/)
|
||||||
|
- [x] [Aliyundrive share](https://www.aliyundrive.com/)
|
||||||
|
- [x] [Google photo](https://photos.google.com/)
|
||||||
|
- [x] [Mega.nz](https://mega.nz)
|
||||||
|
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||||
|
- [x] SMB
|
||||||
|
- [x] [115](https://115.com/)
|
||||||
|
- [X] Cloudreve
|
||||||
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
|
- [x] デプロイが簡単で、すぐに使える
|
||||||
|
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
||||||
|
- [x] ギャラリーモードでの画像プレビュー
|
||||||
|
- [x] ビデオとオーディオのプレビュー、歌詞と字幕のサポート
|
||||||
|
- [x] Office ドキュメントのプレビュー (docx, pptx, xlsx, ...)
|
||||||
|
- [x] `README.md` のプレビューレンダリング
|
||||||
|
- [x] ファイルのパーマリンクコピーと直接ダウンロード
|
||||||
|
- [x] ダークモード
|
||||||
|
- [x] 国際化
|
||||||
|
- [x] 保護されたルート (パスワード保護と認証)
|
||||||
|
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
||||||
|
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||||
|
- [x] Cloudflare ワーカープロキシ
|
||||||
|
- [x] ファイル/フォルダパッケージのダウンロード
|
||||||
|
- [x] ウェブアップロード(訪問者にアップロードを許可できる), 削除, mkdir, 名前変更, 移動, コピー
|
||||||
|
- [x] オフラインダウンロード
|
||||||
|
- [x] 二つのストレージ間でファイルをコピー
|
||||||
|
- [x] シングルスレッドのダウンロード/ストリーム向けのマルチスレッド ダウンロード アクセラレーション
|
||||||
|
|
||||||
|
## ドキュメント
|
||||||
|
|
||||||
|
<https://alist.nn.ci/>
|
||||||
|
|
||||||
|
## デモ
|
||||||
|
|
||||||
|
<https://al.nn.ci>
|
||||||
|
|
||||||
|
## ディスカッション
|
||||||
|
|
||||||
|
一般的なご質問は[ディスカッションフォーラム](https://github.com/Xhofe/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
||||||
|
|
||||||
|
## スポンサー
|
||||||
|
|
||||||
|
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||||
|
https://alist.nn.ci/guide/sponsor.html
|
||||||
|
|
||||||
|
### スペシャルスポンサー
|
||||||
|
|
||||||
|
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||||
|
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||||
|
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||||
|
|
||||||
|
## コントリビューター
|
||||||
|
|
||||||
|
これらの素晴らしい人々に感謝します:
|
||||||
|
|
||||||
|
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||||
|
|
||||||
|
## ライセンス
|
||||||
|
|
||||||
|
`AList` は AGPL-3.0 ライセンスの下でライセンスされたオープンソースソフトウェアです。
|
||||||
|
|
||||||
|
## 免責事項
|
||||||
|
- このプログラムはフリーでオープンソースのプロジェクトです。ネットワークディスク上でファイルを共有するように設計されており、golang のダウンロードや学習に便利です。利用にあたっては関連法規を遵守し、悪用しないようお願いします;
|
||||||
|
- このプログラムは、公式インターフェースの動作を破壊することなく、公式 sdk/インターフェースを呼び出すことで実装されています;
|
||||||
|
- このプログラムは、302リダイレクト/トラフィック転送のみを行い、いかなるユーザーデータも傍受、保存、改ざんしません;
|
||||||
|
- このプログラムを使用する前に、アカウントの禁止、ダウンロード速度の制限など、対応するリスクを理解し、負担する必要があります;
|
||||||
|
- もし侵害があれば、[メール](mailto:i@nn.ci)で私に連絡してください。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
49
build.sh
49
build.sh
@ -93,14 +93,15 @@ BuildRelease() {
|
|||||||
mkdir -p "build"
|
mkdir -p "build"
|
||||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||||
BASE="https://musl.nn.ci/"
|
BASE="https://musl.nn.ci/"
|
||||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross arm-linux-musleabihf-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||||
for i in "${FILES[@]}"; do
|
for i in "${FILES[@]}"; do
|
||||||
url="${BASE}${i}.tgz"
|
url="${BASE}${i}.tgz"
|
||||||
curl -L -o "${i}.tgz" "${url}"
|
curl -L -o "${i}.tgz" "${url}"
|
||||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||||
|
rm -f "${i}.tgz"
|
||||||
done
|
done
|
||||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-arm linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc arm-linux-musleabihf-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||||
for i in "${!OS_ARCHES[@]}"; do
|
for i in "${!OS_ARCHES[@]}"; do
|
||||||
os_arch=${OS_ARCHES[$i]}
|
os_arch=${OS_ARCHES[$i]}
|
||||||
cgo_cc=${CGO_ARGS[$i]}
|
cgo_cc=${CGO_ARGS[$i]}
|
||||||
@ -120,6 +121,39 @@ BuildRelease() {
|
|||||||
mv alist-* build
|
mv alist-* build
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BuildReleaseLinuxMuslArm() {
|
||||||
|
rm -rf .git/
|
||||||
|
mkdir -p "build"
|
||||||
|
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||||
|
BASE="https://musl.nn.ci/"
|
||||||
|
# FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armeb-linux-musleabi-cross armeb-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||||
|
FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||||
|
for i in "${FILES[@]}"; do
|
||||||
|
url="${BASE}${i}.tgz"
|
||||||
|
curl -L -o "${i}.tgz" "${url}"
|
||||||
|
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||||
|
rm -f "${i}.tgz"
|
||||||
|
done
|
||||||
|
# OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armeb linux-musleabihf-armeb linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||||
|
# CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armeb-linux-musleabi-gcc armeb-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||||
|
# GOARMS=('' '' '' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||||
|
OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||||
|
CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||||
|
GOARMS=('' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||||
|
for i in "${!OS_ARCHES[@]}"; do
|
||||||
|
os_arch=${OS_ARCHES[$i]}
|
||||||
|
cgo_cc=${CGO_ARGS[$i]}
|
||||||
|
arm=${GOARMS[$i]}
|
||||||
|
echo building for ${os_arch}
|
||||||
|
export GOOS=linux
|
||||||
|
export GOARCH=arm
|
||||||
|
export CC=${cgo_cc}
|
||||||
|
export CGO_ENABLED=1
|
||||||
|
export GOARM=${arm}
|
||||||
|
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
MakeRelease() {
|
MakeRelease() {
|
||||||
cd build
|
cd build
|
||||||
mkdir compress
|
mkdir compress
|
||||||
@ -139,8 +173,8 @@ MakeRelease() {
|
|||||||
rm -f alist.exe
|
rm -f alist.exe
|
||||||
done
|
done
|
||||||
cd compress
|
cd compress
|
||||||
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
find . -type f -print0 | xargs -0 md5sum >"$1"
|
||||||
cat md5.txt
|
cat "$1"
|
||||||
cd ../..
|
cd ../..
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,9 +189,12 @@ elif [ "$1" = "release" ]; then
|
|||||||
FetchWebRelease
|
FetchWebRelease
|
||||||
if [ "$2" = "docker" ]; then
|
if [ "$2" = "docker" ]; then
|
||||||
BuildDocker
|
BuildDocker
|
||||||
|
elif [ "$2" = "linux_musl_arm" ]; then
|
||||||
|
BuildReleaseLinuxMuslArm
|
||||||
|
MakeRelease "md5-linux-musl-arm.txt"
|
||||||
else
|
else
|
||||||
BuildRelease
|
BuildRelease
|
||||||
MakeRelease
|
MakeRelease "md5.txt"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo -e "Parameter error"
|
echo -e "Parameter error"
|
||||||
|
69
cmd/admin.go
69
cmd/admin.go
@ -4,30 +4,87 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PasswordCmd represents the password command
|
// AdminCmd represents the password command
|
||||||
var PasswordCmd = &cobra.Command{
|
var AdminCmd = &cobra.Command{
|
||||||
Use: "admin",
|
Use: "admin",
|
||||||
Aliases: []string{"password"},
|
Aliases: []string{"password"},
|
||||||
Short: "Show admin user's info",
|
Short: "Show admin user's info and some operations about admin user's password",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
Init()
|
Init()
|
||||||
admin, err := op.GetAdmin()
|
admin, err := op.GetAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
utils.Log.Infof("admin user's info: \nusername: %s\npassword: %s", admin.Username, admin.Password)
|
utils.Log.Infof("Admin user's username: %s", admin.Username)
|
||||||
|
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
|
||||||
|
utils.Log.Infof("You can reset the password with a random string by running [alist admin random]")
|
||||||
|
utils.Log.Infof("You can also set a new password by running [alist admin set NEW_PASSWORD]")
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
var RandomPasswordCmd = &cobra.Command{
|
||||||
RootCmd.AddCommand(PasswordCmd)
|
Use: "random",
|
||||||
|
Short: "Reset admin user's password to a random string",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
newPwd := random.String(8)
|
||||||
|
setAdminPassword(newPwd)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var SetPasswordCmd = &cobra.Command{
|
||||||
|
Use: "set",
|
||||||
|
Short: "Set admin user's password",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
if len(args) == 0 {
|
||||||
|
utils.Log.Errorf("Please enter the new password")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
setAdminPassword(args[0])
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var ShowTokenCmd = &cobra.Command{
|
||||||
|
Use: "token",
|
||||||
|
Short: "Show admin token",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
Init()
|
||||||
|
token := setting.GetStr(conf.Token)
|
||||||
|
utils.Log.Infof("Admin token: %s", token)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func setAdminPassword(pwd string) {
|
||||||
|
Init()
|
||||||
|
admin, err := op.GetAdmin()
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
admin.SetPassword(pwd)
|
||||||
|
if err := op.UpdateUser(admin); err != nil {
|
||||||
|
utils.Log.Errorf("failed update admin user: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.Log.Infof("admin user has been updated:")
|
||||||
|
utils.Log.Infof("username: %s", admin.Username)
|
||||||
|
utils.Log.Infof("password: %s", pwd)
|
||||||
|
DelAdminCacheOnline()
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
RootCmd.AddCommand(AdminCmd)
|
||||||
|
AdminCmd.AddCommand(RandomPasswordCmd)
|
||||||
|
AdminCmd.AddCommand(SetPasswordCmd)
|
||||||
|
AdminCmd.AddCommand(ShowTokenCmd)
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
// Cobra supports Persistent Flags which will work for this command
|
||||||
|
@ -24,6 +24,7 @@ var Cancel2FACmd = &cobra.Command{
|
|||||||
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
utils.Log.Info("2FA canceled")
|
utils.Log.Info("2FA canceled")
|
||||||
|
DelAdminCacheOnline()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@ -74,6 +75,16 @@ the address is defined in config file`,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
utils.Log.Fatalf("failed to listen unix: %+v", err)
|
||||||
}
|
}
|
||||||
|
// set socket file permission
|
||||||
|
mode, err := strconv.ParseUint(conf.Conf.Scheme.UnixFilePerm, 8, 32)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to parse socket file permission: %+v", err)
|
||||||
|
} else {
|
||||||
|
err = os.Chmod(conf.Conf.Scheme.UnixFile, os.FileMode(mode))
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to chmod socket file: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
err = unixSrv.Serve(listener)
|
err = unixSrv.Serve(listener)
|
||||||
if err != nil && err != http.ErrServerClosed {
|
if err != nil && err != http.ErrServerClosed {
|
||||||
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
utils.Log.Fatalf("failed to start unix: %s", err.Error())
|
||||||
|
151
cmd/storage.go
151
cmd/storage.go
@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/db"
|
"github.com/alist-org/alist/v3/internal/db"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/charmbracelet/bubbles/table"
|
||||||
|
tea "github.com/charmbracelet/bubbletea"
|
||||||
|
"github.com/charmbracelet/lipgloss"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -15,31 +21,134 @@ var storageCmd = &cobra.Command{
|
|||||||
Short: "Manage storage",
|
Short: "Manage storage",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
var disableStorageCmd = &cobra.Command{
|
||||||
var mountPath string
|
Use: "disable",
|
||||||
var disable = &cobra.Command{
|
Short: "Disable a storage",
|
||||||
Use: "disable",
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
Short: "Disable a storage",
|
if len(args) < 1 {
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
utils.Log.Errorf("mount path is required")
|
||||||
Init()
|
return
|
||||||
storage, err := db.GetStorageByMountPath(mountPath)
|
}
|
||||||
|
mountPath := args[0]
|
||||||
|
Init()
|
||||||
|
storage, err := db.GetStorageByMountPath(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||||
|
} else {
|
||||||
|
storage.Disabled = true
|
||||||
|
err = db.UpdateStorage(storage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||||
} else {
|
} else {
|
||||||
storage.Disabled = true
|
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||||
err = db.UpdateStorage(storage)
|
|
||||||
if err != nil {
|
|
||||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
|
||||||
} else {
|
|
||||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
}
|
},
|
||||||
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
|
}
|
||||||
RootCmd.AddCommand(storageCmd)
|
|
||||||
storageCmd.AddCommand(disable)
|
|
||||||
|
|
||||||
|
var baseStyle = lipgloss.NewStyle().
|
||||||
|
BorderStyle(lipgloss.NormalBorder()).
|
||||||
|
BorderForeground(lipgloss.Color("240"))
|
||||||
|
|
||||||
|
type model struct {
|
||||||
|
table table.Model
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m model) Init() tea.Cmd { return nil }
|
||||||
|
|
||||||
|
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||||
|
var cmd tea.Cmd
|
||||||
|
switch msg := msg.(type) {
|
||||||
|
case tea.KeyMsg:
|
||||||
|
switch msg.String() {
|
||||||
|
case "esc":
|
||||||
|
if m.table.Focused() {
|
||||||
|
m.table.Blur()
|
||||||
|
} else {
|
||||||
|
m.table.Focus()
|
||||||
|
}
|
||||||
|
case "q", "ctrl+c":
|
||||||
|
return m, tea.Quit
|
||||||
|
//case "enter":
|
||||||
|
// return m, tea.Batch(
|
||||||
|
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
|
||||||
|
// )
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.table, cmd = m.table.Update(msg)
|
||||||
|
return m, cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m model) View() string {
|
||||||
|
return baseStyle.Render(m.table.View()) + "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageTableHeight int
|
||||||
|
var listStorageCmd = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Short: "List all storages",
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
Init()
|
||||||
|
storages, _, err := db.GetStorages(1, -1)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||||
|
} else {
|
||||||
|
utils.Log.Infof("Found %d storages", len(storages))
|
||||||
|
columns := []table.Column{
|
||||||
|
{Title: "ID", Width: 4},
|
||||||
|
{Title: "Driver", Width: 16},
|
||||||
|
{Title: "Mount Path", Width: 30},
|
||||||
|
{Title: "Enabled", Width: 7},
|
||||||
|
}
|
||||||
|
|
||||||
|
var rows []table.Row
|
||||||
|
for i := range storages {
|
||||||
|
storage := storages[i]
|
||||||
|
enabled := "true"
|
||||||
|
if storage.Disabled {
|
||||||
|
enabled = "false"
|
||||||
|
}
|
||||||
|
rows = append(rows, table.Row{
|
||||||
|
strconv.Itoa(int(storage.ID)),
|
||||||
|
storage.Driver,
|
||||||
|
storage.MountPath,
|
||||||
|
enabled,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
t := table.New(
|
||||||
|
table.WithColumns(columns),
|
||||||
|
table.WithRows(rows),
|
||||||
|
table.WithFocused(true),
|
||||||
|
table.WithHeight(storageTableHeight),
|
||||||
|
)
|
||||||
|
|
||||||
|
s := table.DefaultStyles()
|
||||||
|
s.Header = s.Header.
|
||||||
|
BorderStyle(lipgloss.NormalBorder()).
|
||||||
|
BorderForeground(lipgloss.Color("240")).
|
||||||
|
BorderBottom(true).
|
||||||
|
Bold(false)
|
||||||
|
s.Selected = s.Selected.
|
||||||
|
Foreground(lipgloss.Color("229")).
|
||||||
|
Background(lipgloss.Color("57")).
|
||||||
|
Bold(false)
|
||||||
|
t.SetStyles(s)
|
||||||
|
|
||||||
|
m := model{t}
|
||||||
|
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||||
|
utils.Log.Errorf("failed to run program: %+v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
RootCmd.AddCommand(storageCmd)
|
||||||
|
storageCmd.AddCommand(disableStorageCmd)
|
||||||
|
storageCmd.AddCommand(listStorageCmd)
|
||||||
|
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||||
// Here you will define your flags and configuration settings.
|
// Here you will define your flags and configuration settings.
|
||||||
|
|
||||||
// Cobra supports Persistent Flags which will work for this command
|
// Cobra supports Persistent Flags which will work for this command
|
||||||
|
52
cmd/user.go
Normal file
52
cmd/user.go
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DelAdminCacheOnline() {
|
||||||
|
admin, err := op.GetAdmin()
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Errorf("[del_admin_cache] get admin error: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
DelUserCacheOnline(admin.Username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func DelUserCacheOnline(username string) {
|
||||||
|
client := resty.New().SetTimeout(1 * time.Second).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
|
token := setting.GetStr(conf.Token)
|
||||||
|
port := conf.Conf.Scheme.HttpPort
|
||||||
|
u := fmt.Sprintf("http://localhost:%d/api/admin/user/del_cache", port)
|
||||||
|
if port == -1 {
|
||||||
|
if conf.Conf.Scheme.HttpsPort == -1 {
|
||||||
|
utils.Log.Warnf("[del_user_cache] no open port")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
u = fmt.Sprintf("https://localhost:%d/api/admin/user/del_cache", conf.Conf.Scheme.HttpsPort)
|
||||||
|
}
|
||||||
|
res, err := client.R().SetHeader("Authorization", token).SetQueryParam("username", username).Post(u)
|
||||||
|
if err != nil {
|
||||||
|
utils.Log.Warnf("[del_user_cache_online] failed: %+v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if res.StatusCode() != 200 {
|
||||||
|
utils.Log.Warnf("[del_user_cache_online] failed: %+v", res.String())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||||
|
msg := utils.Json.Get(res.Body(), "message").ToString()
|
||||||
|
if code != 200 {
|
||||||
|
utils.Log.Errorf("[del_user_cache_online] error: %s", msg)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
utils.Log.Debugf("[del_user_cache_online] del user [%s] cache success", username)
|
||||||
|
}
|
@ -83,7 +83,7 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package _115
|
package _115
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -14,9 +15,11 @@ func (d *Pan115) login() error {
|
|||||||
var err error
|
var err error
|
||||||
opts := []driver.Option{
|
opts := []driver.Option{
|
||||||
driver.UA(UserAgent),
|
driver.UA(UserAgent),
|
||||||
|
func(c *driver.Pan115Client) {
|
||||||
|
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||||
|
},
|
||||||
}
|
}
|
||||||
d.client = driver.New(opts...)
|
d.client = driver.New(opts...)
|
||||||
d.client.SetHttpClient(base.HttpClient)
|
|
||||||
cr := &driver.Credential{}
|
cr := &driver.Credential{}
|
||||||
if d.Addition.QRCodeToken != "" {
|
if d.Addition.QRCodeToken != "" {
|
||||||
s := &driver.QRCodeSession{
|
s := &driver.QRCodeSession{
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
package _123
|
package _123
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -45,6 +43,9 @@ func (d *Pan123) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Drop(ctx context.Context) error {
|
func (d *Pan123) Drop(ctx context.Context) error {
|
||||||
|
_, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{})
|
||||||
|
}, nil)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +110,7 @@ func (d *Pan123) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
log.Debugln("res code: ", res.StatusCode())
|
log.Debugln("res code: ", res.StatusCode())
|
||||||
if res.StatusCode() == 302 {
|
if res.StatusCode() == 302 {
|
||||||
link.URL = res.Header().Get("location")
|
link.URL = res.Header().Get("location")
|
||||||
} else if res.StatusCode() == 200 {
|
} else if res.StatusCode() < 300 {
|
||||||
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||||
}
|
}
|
||||||
link.Header = http.Header{
|
link.Header = http.Header{
|
||||||
@ -180,40 +181,23 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
const DEFAULT int64 = 10485760
|
// const DEFAULT int64 = 10485760
|
||||||
var uploadFile io.Reader
|
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
if d.StreamUpload && stream.GetSize() > DEFAULT {
|
// need to calculate md5 of the full content
|
||||||
// 只计算前10MIB
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, DEFAULT))
|
if err != nil {
|
||||||
if n, err := io.CopyN(io.MultiWriter(buf, h), stream, DEFAULT); err != io.EOF && n == 0 {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
defer func() {
|
||||||
// 增加额外参数防止MD5碰撞
|
_ = tempFile.Close()
|
||||||
h.Write([]byte(stream.GetName()))
|
_ = os.Remove(tempFile.Name())
|
||||||
num := make([]byte, 8)
|
}()
|
||||||
binary.BigEndian.PutUint64(num, uint64(stream.GetSize()))
|
if _, err = io.Copy(h, tempFile); err != nil {
|
||||||
h.Write(num)
|
return err
|
||||||
// 拼装
|
}
|
||||||
uploadFile = io.MultiReader(buf, stream)
|
_, err = tempFile.Seek(0, io.SeekStart)
|
||||||
} else {
|
if err != nil {
|
||||||
// 计算完整文件MD5
|
return err
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
_ = os.Remove(tempFile.Name())
|
|
||||||
}()
|
|
||||||
if _, err = io.Copy(h, tempFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
uploadFile = tempFile
|
|
||||||
}
|
}
|
||||||
etag := hex.EncodeToString(h.Sum(nil))
|
etag := hex.EncodeToString(h.Sum(nil))
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
@ -237,7 +221,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||||
err = d.newUpload(ctx, &resp, stream, uploadFile, up)
|
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
@ -254,7 +238,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: uploadFile,
|
Body: tempFile,
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,6 @@ type Addition struct {
|
|||||||
driver.RootID
|
driver.RootID
|
||||||
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
StreamUpload bool `json:"stream_upload"`
|
|
||||||
AccessToken string
|
AccessToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
package _123
|
package _123
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -42,7 +45,30 @@ func (f File) GetID() string {
|
|||||||
return strconv.FormatInt(f.FileId, 10)
|
return strconv.FormatInt(f.FileId, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f File) Thumb() string {
|
||||||
|
if f.DownloadUrl == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du, err := url.Parse(f.DownloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||||
|
query := du.Query()
|
||||||
|
query.Set("w", "70")
|
||||||
|
query.Set("h", "70")
|
||||||
|
if !query.Has("type") {
|
||||||
|
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||||
|
}
|
||||||
|
if !query.Has("trade_key") {
|
||||||
|
query.Set("trade_key", "123pan-thumbnail")
|
||||||
|
}
|
||||||
|
du.RawQuery = query.Encode()
|
||||||
|
return du.String()
|
||||||
|
}
|
||||||
|
|
||||||
var _ model.Obj = (*File)(nil)
|
var _ model.Obj = (*File)(nil)
|
||||||
|
var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
//func (f File) Thumb() string {
|
//func (f File) Thumb() string {
|
||||||
//
|
//
|
||||||
|
@ -34,6 +34,25 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star
|
|||||||
return &s3PreSignedUrls, nil
|
return &s3PreSignedUrls, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||||
|
data := base.Json{
|
||||||
|
"StorageNode": upReq.Data.StorageNode,
|
||||||
|
"bucket": upReq.Data.Bucket,
|
||||||
|
"key": upReq.Data.Key,
|
||||||
|
"partNumberEnd": end,
|
||||||
|
"partNumberStart": start,
|
||||||
|
"uploadId": upReq.Data.UploadId,
|
||||||
|
}
|
||||||
|
var s3PreSignedUrls S3PreSignedURLs
|
||||||
|
_, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data).SetContext(ctx)
|
||||||
|
}, &s3PreSignedUrls)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &s3PreSignedUrls, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"StorageNode": upReq.Data.StorageNode,
|
"StorageNode": upReq.Data.StorageNode,
|
||||||
@ -51,11 +70,17 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
||||||
chunkSize := int64(1024 * 1024 * 5)
|
chunkSize := int64(1024 * 1024 * 16)
|
||||||
// fetch s3 pre signed urls
|
// fetch s3 pre signed urls
|
||||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||||
// upload 10 chunks each batch
|
// only 1 batch is allowed
|
||||||
batchSize := 10
|
isMultipart := chunkCount > 1
|
||||||
|
batchSize := 1
|
||||||
|
getS3UploadUrl := d.getS3Auth
|
||||||
|
if isMultipart {
|
||||||
|
batchSize = 10
|
||||||
|
getS3UploadUrl = d.getS3PreSignedUrls
|
||||||
|
}
|
||||||
for i := 1; i <= chunkCount; i += batchSize {
|
for i := 1; i <= chunkCount; i += batchSize {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -65,7 +90,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if end > chunkCount+1 {
|
if end > chunkCount+1 {
|
||||||
end = chunkCount + 1
|
end = chunkCount + 1
|
||||||
}
|
}
|
||||||
s3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, start, end)
|
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -78,7 +103,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -89,7 +114,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool) error {
|
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||||
if uploadUrl == "" {
|
if uploadUrl == "" {
|
||||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||||
@ -111,13 +136,13 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
|
|||||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||||
}
|
}
|
||||||
// refresh s3 pre signed urls
|
// refresh s3 pre signed urls
|
||||||
newS3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, cur, end)
|
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||||
// retry
|
// retry
|
||||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true)
|
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||||
}
|
}
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
body, err := io.ReadAll(res.Body)
|
body, err := io.ReadAll(res.Body)
|
||||||
|
@ -15,10 +15,12 @@ import (
|
|||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Api = "https://www.123pan.com/api"
|
||||||
AApi = "https://www.123pan.com/a/api"
|
AApi = "https://www.123pan.com/a/api"
|
||||||
BApi = "https://www.123pan.com/b/api"
|
BApi = "https://www.123pan.com/b/api"
|
||||||
MainApi = AApi
|
MainApi = Api
|
||||||
SignIn = MainApi + "/user/sign_in"
|
SignIn = MainApi + "/user/sign_in"
|
||||||
|
Logout = MainApi + "/user/logout"
|
||||||
UserInfo = MainApi + "/user/info"
|
UserInfo = MainApi + "/user/info"
|
||||||
FileList = MainApi + "/file/list/new"
|
FileList = MainApi + "/file/list/new"
|
||||||
DownloadInfo = MainApi + "/file/download_info"
|
DownloadInfo = MainApi + "/file/download_info"
|
||||||
@ -32,6 +34,7 @@ const (
|
|||||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||||
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Pan123) login() error {
|
func (d *Pan123) login() error {
|
||||||
@ -50,6 +53,14 @@ func (d *Pan123) login() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
res, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
|
SetHeaders(map[string]string{
|
||||||
|
"origin": "https://www.123pan.com",
|
||||||
|
"referer": "https://www.123pan.com/",
|
||||||
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
//"user-agent": base.UserAgent,
|
||||||
|
}).
|
||||||
SetBody(body).Post(SignIn)
|
SetBody(body).Post(SignIn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -62,14 +73,30 @@ func (d *Pan123) login() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//func authKey(reqUrl string) (*string, error) {
|
||||||
|
// reqURL, err := url.Parse(reqUrl)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// nowUnix := time.Now().Unix()
|
||||||
|
// random := rand.Intn(0x989680)
|
||||||
|
//
|
||||||
|
// p4 := fmt.Sprintf("%d|%d|%s|%s|%s|%s", nowUnix, random, reqURL.Path, "web", "3", AuthKeySalt)
|
||||||
|
// authKey := fmt.Sprintf("%d-%d-%x", nowUnix, random, md5.Sum([]byte(p4)))
|
||||||
|
// return &authKey, nil
|
||||||
|
//}
|
||||||
|
|
||||||
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"origin": "https://www.123pan.com",
|
"origin": "https://www.123pan.com",
|
||||||
"referer": "https://www.123pan.com/",
|
"referer": "https://www.123pan.com/",
|
||||||
"authorization": "Bearer " + d.AccessToken,
|
"authorization": "Bearer " + d.AccessToken,
|
||||||
"platform": "web",
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
"app-version": "1.2",
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
//"user-agent": base.UserAgent,
|
||||||
})
|
})
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
callback(req)
|
callback(req)
|
||||||
@ -77,6 +104,11 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r
|
|||||||
if resp != nil {
|
if resp != nil {
|
||||||
req.SetResult(resp)
|
req.SetResult(resp)
|
||||||
}
|
}
|
||||||
|
//authKey, err := authKey(url)
|
||||||
|
//if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
//}
|
||||||
|
//req.SetQueryParam("auth-key", *authKey)
|
||||||
res, err := req.Execute(method, url)
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
149
drivers/123_share/driver.go
Normal file
149
drivers/123_share/driver.go
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Pan123Share struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Init(ctx context.Context) error {
|
||||||
|
// TODO login / refresh token
|
||||||
|
//op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
// TODO return the files list, required
|
||||||
|
files, err := d.getFiles(dir.GetID())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||||
|
return src, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file, required
|
||||||
|
if f, ok := file.(File); ok {
|
||||||
|
//var resp DownResp
|
||||||
|
var headers map[string]string
|
||||||
|
if !utils.IsLocalIPAddr(args.IP) {
|
||||||
|
headers = map[string]string{
|
||||||
|
//"X-Real-IP": "1.1.1.1",
|
||||||
|
"X-Forwarded-For": args.IP,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
data := base.Json{
|
||||||
|
"shareKey": d.ShareKey,
|
||||||
|
"SharePwd": d.SharePwd,
|
||||||
|
"etag": f.Etag,
|
||||||
|
"fileId": f.FileId,
|
||||||
|
"s3keyFlag": f.S3KeyFlag,
|
||||||
|
"size": f.Size,
|
||||||
|
}
|
||||||
|
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(data).SetHeaders(headers)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
|
||||||
|
u, err := url.Parse(downloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
nu := u.Query().Get("params")
|
||||||
|
if nu != "" {
|
||||||
|
du, _ := base64.StdEncoding.DecodeString(nu)
|
||||||
|
u, err = url.Parse(string(du))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
u_ := u.String()
|
||||||
|
log.Debug("download url: ", u_)
|
||||||
|
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debug(res.String())
|
||||||
|
link := model.Link{
|
||||||
|
URL: u_,
|
||||||
|
}
|
||||||
|
log.Debugln("res code: ", res.StatusCode())
|
||||||
|
if res.StatusCode() == 302 {
|
||||||
|
link.URL = res.Header().Get("location")
|
||||||
|
} else if res.StatusCode() < 300 {
|
||||||
|
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||||
|
}
|
||||||
|
link.Header = http.Header{
|
||||||
|
"Referer": []string{"https://www.123pan.com/"},
|
||||||
|
}
|
||||||
|
return &link, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
// TODO create folder, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
// TODO move obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
// TODO rename obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
// TODO copy obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
// TODO remove obj, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
// TODO upload file, optional
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Pan123Share) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Pan123Share)(nil)
|
34
drivers/123_share/meta.go
Normal file
34
drivers/123_share/meta.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
ShareKey string `json:"sharekey" required:"true"`
|
||||||
|
SharePwd string `json:"sharepassword" required:"true"`
|
||||||
|
driver.RootID
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "123PanShare",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: true,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Pan123Share{}
|
||||||
|
})
|
||||||
|
}
|
91
drivers/123_share/types.go
Normal file
91
drivers/123_share/types.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
FileName string `json:"FileName"`
|
||||||
|
Size int64 `json:"Size"`
|
||||||
|
UpdateAt time.Time `json:"UpdateAt"`
|
||||||
|
FileId int64 `json:"FileId"`
|
||||||
|
Type int `json:"Type"`
|
||||||
|
Etag string `json:"Etag"`
|
||||||
|
S3KeyFlag string `json:"S3KeyFlag"`
|
||||||
|
DownloadUrl string `json:"DownloadUrl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetSize() int64 {
|
||||||
|
return f.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetName() string {
|
||||||
|
return f.FileName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) ModTime() time.Time {
|
||||||
|
return f.UpdateAt
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) IsDir() bool {
|
||||||
|
return f.Type == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) GetID() string {
|
||||||
|
return strconv.FormatInt(f.FileId, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f File) Thumb() string {
|
||||||
|
if f.DownloadUrl == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du, err := url.Parse(f.DownloadUrl)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||||
|
query := du.Query()
|
||||||
|
query.Set("w", "70")
|
||||||
|
query.Set("h", "70")
|
||||||
|
if !query.Has("type") {
|
||||||
|
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||||
|
}
|
||||||
|
if !query.Has("trade_key") {
|
||||||
|
query.Set("trade_key", "123pan-thumbnail")
|
||||||
|
}
|
||||||
|
du.RawQuery = query.Encode()
|
||||||
|
return du.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ model.Obj = (*File)(nil)
|
||||||
|
var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
|
//func (f File) Thumb() string {
|
||||||
|
//
|
||||||
|
//}
|
||||||
|
//var _ model.Thumb = (*File)(nil)
|
||||||
|
|
||||||
|
type Files struct {
|
||||||
|
//BaseResp
|
||||||
|
Data struct {
|
||||||
|
InfoList []File `json:"InfoList"`
|
||||||
|
Next string `json:"Next"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//type DownResp struct {
|
||||||
|
// //BaseResp
|
||||||
|
// Data struct {
|
||||||
|
// DownloadUrl string `json:"DownloadUrl"`
|
||||||
|
// } `json:"data"`
|
||||||
|
//}
|
81
drivers/123_share/util.go
Normal file
81
drivers/123_share/util.go
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package _123Share
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Api = "https://www.123pan.com/api"
|
||||||
|
AApi = "https://www.123pan.com/a/api"
|
||||||
|
BApi = "https://www.123pan.com/b/api"
|
||||||
|
MainApi = Api
|
||||||
|
FileList = MainApi + "/share/get"
|
||||||
|
DownloadInfo = MainApi + "/share/download/info"
|
||||||
|
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeaders(map[string]string{
|
||||||
|
"origin": "https://www.123pan.com",
|
||||||
|
"referer": "https://www.123pan.com/",
|
||||||
|
"user-agent": "Dart/2.19(dart:io)",
|
||||||
|
"platform": "android",
|
||||||
|
"app-version": "36",
|
||||||
|
})
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
req.SetResult(resp)
|
||||||
|
}
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
body := res.Body()
|
||||||
|
code := utils.Json.Get(body, "code").ToInt()
|
||||||
|
if code != 0 {
|
||||||
|
return nil, errors.New(jsoniter.Get(body, "message").ToString())
|
||||||
|
}
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
|
||||||
|
page := 1
|
||||||
|
res := make([]File, 0)
|
||||||
|
for {
|
||||||
|
var resp Files
|
||||||
|
query := map[string]string{
|
||||||
|
"limit": "100",
|
||||||
|
"next": "0",
|
||||||
|
"orderBy": d.OrderBy,
|
||||||
|
"orderDirection": d.OrderDirection,
|
||||||
|
"parentFileId": parentId,
|
||||||
|
"Page": strconv.Itoa(page),
|
||||||
|
"shareKey": d.ShareKey,
|
||||||
|
"SharePwd": d.SharePwd,
|
||||||
|
}
|
||||||
|
_, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(query)
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
page++
|
||||||
|
res = append(res, resp.Data.InfoList...)
|
||||||
|
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
@ -300,6 +300,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
|
|
||||||
var partSize = getPartSize(stream.GetSize())
|
var partSize = getPartSize(stream.GetSize())
|
||||||
part := (stream.GetSize() + partSize - 1) / partSize
|
part := (stream.GetSize() + partSize - 1) / partSize
|
||||||
|
if part == 0 {
|
||||||
|
part = 1
|
||||||
|
}
|
||||||
for i := int64(0); i < part; i++ {
|
for i := int64(0); i < part; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
@ -331,13 +334,11 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
_ = res.Body.Close()
|
||||||
log.Debugf("%+v", res)
|
log.Debugf("%+v", res)
|
||||||
|
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
res.Body.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -42,8 +42,8 @@ func calSign(body, ts, randStr string) string {
|
|||||||
sort.Strings(strs)
|
sort.Strings(strs)
|
||||||
body = strings.Join(strs, "")
|
body = strings.Join(strs, "")
|
||||||
body = base64.StdEncoding.EncodeToString([]byte(body))
|
body = base64.StdEncoding.EncodeToString([]byte(body))
|
||||||
res := utils.GetMD5Encode(body) + utils.GetMD5Encode(ts+":"+randStr)
|
res := utils.GetMD5EncodeStr(body) + utils.GetMD5EncodeStr(ts+":"+randStr)
|
||||||
res = strings.ToUpper(utils.GetMD5Encode(res))
|
res = strings.ToUpper(utils.GetMD5EncodeStr(res))
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -385,7 +385,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
|||||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||||
sliceMd5 := fileMd5
|
sliceMd5 := fileMd5
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > DEFAULT {
|
||||||
sliceMd5 = utils.GetMD5Encode(strings.Join(md5s, "\n"))
|
sliceMd5 = utils.GetMD5EncodeStr(strings.Join(md5s, "\n"))
|
||||||
}
|
}
|
||||||
res, err = d.uploadRequest("/person/commitMultiUploadFile", map[string]string{
|
res, err = d.uploadRequest("/person/commitMultiUploadFile", map[string]string{
|
||||||
"uploadFileId": uploadFileId,
|
"uploadFileId": uploadFileId,
|
||||||
|
@ -3,10 +3,13 @@ package _189pc
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
@ -22,6 +25,8 @@ type Cloud189PC struct {
|
|||||||
|
|
||||||
loginParam *LoginParam
|
loginParam *LoginParam
|
||||||
tokenInfo *AppSessionResp
|
tokenInfo *AppSessionResp
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Config() driver.Config {
|
func (y *Cloud189PC) Config() driver.Config {
|
||||||
@ -42,6 +47,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
y.FamilyID = ""
|
y.FamilyID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 限制上传线程数
|
||||||
|
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
|
||||||
|
if y.uploadThread < 1 || y.uploadThread > 32 {
|
||||||
|
y.uploadThread, y.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
// 初始化请求客户端
|
// 初始化请求客户端
|
||||||
if y.client == nil {
|
if y.client == nil {
|
||||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||||
@ -51,7 +62,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 避免重复登陆
|
// 避免重复登陆
|
||||||
identity := utils.GetMD5Encode(y.Username + y.Password)
|
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||||
if !y.isLogin() || y.identity != identity {
|
if !y.isLogin() || y.identity != identity {
|
||||||
y.identity = identity
|
y.identity = identity
|
||||||
if err = y.login(); err != nil {
|
if err = y.login(); err != nil {
|
||||||
@ -135,13 +146,14 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
return like, nil
|
return like, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
fullUrl += "/family/file"
|
fullUrl += "/family/file"
|
||||||
}
|
}
|
||||||
fullUrl += "/createFolder.action"
|
fullUrl += "/createFolder.action"
|
||||||
|
|
||||||
|
var newFolder Cloud189Folder
|
||||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
@ -158,11 +170,15 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
|||||||
"parentFolderId": parentDir.GetID(),
|
"parentFolderId": parentDir.GetID(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &newFolder)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &newFolder, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -182,11 +198,17 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
queryParam := make(map[string]string)
|
queryParam := make(map[string]string)
|
||||||
fullUrl := API_URL
|
fullUrl := API_URL
|
||||||
method := http.MethodPost
|
method := http.MethodPost
|
||||||
@ -195,23 +217,34 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
|||||||
method = http.MethodGet
|
method = http.MethodGet
|
||||||
queryParam["familyId"] = y.FamilyID
|
queryParam["familyId"] = y.FamilyID
|
||||||
}
|
}
|
||||||
if srcObj.IsDir() {
|
|
||||||
fullUrl += "/renameFolder.action"
|
var newObj model.Obj
|
||||||
queryParam["folderId"] = srcObj.GetID()
|
switch f := srcObj.(type) {
|
||||||
queryParam["destFolderName"] = newName
|
case *Cloud189File:
|
||||||
} else {
|
|
||||||
fullUrl += "/renameFile.action"
|
fullUrl += "/renameFile.action"
|
||||||
queryParam["fileId"] = srcObj.GetID()
|
queryParam["fileId"] = srcObj.GetID()
|
||||||
queryParam["destFileName"] = newName
|
queryParam["destFileName"] = newName
|
||||||
|
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
|
||||||
|
case *Cloud189Folder:
|
||||||
|
fullUrl += "/renameFolder.action"
|
||||||
|
queryParam["folderId"] = srcObj.GetID()
|
||||||
|
queryParam["destFolderName"] = newName
|
||||||
|
newObj = &Cloud189Folder{}
|
||||||
|
default:
|
||||||
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx).SetQueryParams(queryParam)
|
||||||
req.SetQueryParams(queryParam)
|
}, nil, newObj)
|
||||||
}, nil, nil)
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -232,11 +265,15 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return y.WaitBatchTask("COPY", resp.TaskID, time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
var resp CreateBatchTaskResp
|
||||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -256,19 +293,26 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
"familyId": y.FamilyID,
|
"familyId": y.FamilyID,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 批量任务数量限制,过快会导致无法删除
|
||||||
|
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
switch y.UploadMethod {
|
switch y.UploadMethod {
|
||||||
case "stream":
|
|
||||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
|
||||||
case "old":
|
case "old":
|
||||||
return y.OldUpload(ctx, dstDir, stream, up)
|
return y.OldUpload(ctx, dstDir, stream, up)
|
||||||
case "rapid":
|
case "rapid":
|
||||||
return y.FastUpload(ctx, dstDir, stream, up)
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
|
case "stream":
|
||||||
|
if stream.GetSize() == 0 {
|
||||||
|
return y.FastUpload(ctx, dstDir, stream, up)
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
default:
|
default:
|
||||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
return y.StreamUpload(ctx, dstDir, stream, up)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -83,6 +84,55 @@ func MustParseTime(str string) *time.Time {
|
|||||||
return &lastOpTime
|
return &lastOpTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Time time.Time
|
||||||
|
|
||||||
|
func (t *Time) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||||
|
func (t *Time) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||||
|
b, err := e.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b, ok := b.(xml.CharData); ok {
|
||||||
|
if err = t.Unmarshal(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.Skip()
|
||||||
|
}
|
||||||
|
func (t *Time) Unmarshal(b []byte) error {
|
||||||
|
bs := strings.Trim(string(b), "\"")
|
||||||
|
var v time.Time
|
||||||
|
var err error
|
||||||
|
for _, f := range []string{"2006-01-02 15:04:05 -07", "Jan 2, 2006 15:04:05 PM -07"} {
|
||||||
|
v, err = time.ParseInLocation(f, bs+" +08", time.Local)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*t = Time(v)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type String string
|
||||||
|
|
||||||
|
func (t *String) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||||
|
func (t *String) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||||
|
b, err := e.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if b, ok := b.(xml.CharData); ok {
|
||||||
|
if err = t.Unmarshal(b); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e.Skip()
|
||||||
|
}
|
||||||
|
func (s *String) Unmarshal(b []byte) error {
|
||||||
|
*s = String(bytes.Trim(b, "\""))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func toFamilyOrderBy(o string) string {
|
func toFamilyOrderBy(o string) string {
|
||||||
switch o {
|
switch o {
|
||||||
case "filename":
|
case "filename":
|
||||||
@ -110,9 +160,8 @@ func toDesc(o string) string {
|
|||||||
func ParseHttpHeader(str string) map[string]string {
|
func ParseHttpHeader(str string) map[string]string {
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
for _, value := range strings.Split(str, "&") {
|
for _, value := range strings.Split(str, "&") {
|
||||||
i := strings.Index(value, "=")
|
if k, v, found := strings.Cut(value, "="); found {
|
||||||
if i > 0 {
|
header[k] = v
|
||||||
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return header
|
return header
|
||||||
@ -122,10 +171,6 @@ func MustString(str string, err error) string {
|
|||||||
return str
|
return str
|
||||||
}
|
}
|
||||||
|
|
||||||
func MustToBytes(b []byte, err error) []byte {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func BoolToNumber(b bool) int {
|
func BoolToNumber(b bool) int {
|
||||||
if b {
|
if b {
|
||||||
return 1
|
return 1
|
||||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
|||||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||||
FamilyID string `json:"family_id"`
|
FamilyID string `json:"family_id"`
|
||||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
NoUseOcr bool `json:"no_use_ocr"`
|
NoUseOcr bool `json:"no_use_ocr"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,8 +151,13 @@ type FamilyInfoResp struct {
|
|||||||
/*文件部分*/
|
/*文件部分*/
|
||||||
// 文件
|
// 文件
|
||||||
type Cloud189File struct {
|
type Cloud189File struct {
|
||||||
CreateDate string `json:"createDate"`
|
ID String `json:"id"`
|
||||||
FileCata int64 `json:"fileCata"`
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Md5 string `json:"md5"`
|
||||||
|
|
||||||
|
LastOpTime Time `json:"lastOpTime"`
|
||||||
|
CreateDate Time `json:"createDate"`
|
||||||
Icon struct {
|
Icon struct {
|
||||||
//iconOption 5
|
//iconOption 5
|
||||||
SmallUrl string `json:"smallUrl"`
|
SmallUrl string `json:"smallUrl"`
|
||||||
@ -162,62 +167,44 @@ type Cloud189File struct {
|
|||||||
Max600 string `json:"max600"`
|
Max600 string `json:"max600"`
|
||||||
MediumURL string `json:"mediumUrl"`
|
MediumURL string `json:"mediumUrl"`
|
||||||
} `json:"icon"`
|
} `json:"icon"`
|
||||||
ID int64 `json:"id"`
|
|
||||||
LastOpTime string `json:"lastOpTime"`
|
|
||||||
Md5 string `json:"md5"`
|
|
||||||
MediaType int `json:"mediaType"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Orientation int64 `json:"orientation"`
|
|
||||||
Rev string `json:"rev"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
StarLabel int64 `json:"starLabel"`
|
|
||||||
|
|
||||||
parseTime *time.Time
|
// Orientation int64 `json:"orientation"`
|
||||||
|
// FileCata int64 `json:"fileCata"`
|
||||||
|
// MediaType int `json:"mediaType"`
|
||||||
|
// Rev string `json:"rev"`
|
||||||
|
// StarLabel int64 `json:"starLabel"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||||
func (c *Cloud189File) GetName() string { return c.Name }
|
func (c *Cloud189File) GetName() string { return c.Name }
|
||||||
func (c *Cloud189File) ModTime() time.Time {
|
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||||
if c.parseTime == nil {
|
func (c *Cloud189File) IsDir() bool { return false }
|
||||||
c.parseTime = MustParseTime(c.LastOpTime)
|
func (c *Cloud189File) GetID() string { return string(c.ID) }
|
||||||
}
|
func (c *Cloud189File) GetPath() string { return "" }
|
||||||
return *c.parseTime
|
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
||||||
}
|
|
||||||
func (c *Cloud189File) IsDir() bool { return false }
|
|
||||||
func (c *Cloud189File) GetID() string { return fmt.Sprint(c.ID) }
|
|
||||||
func (c *Cloud189File) GetPath() string { return "" }
|
|
||||||
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
|
||||||
|
|
||||||
// 文件夹
|
// 文件夹
|
||||||
type Cloud189Folder struct {
|
type Cloud189Folder struct {
|
||||||
ID int64 `json:"id"`
|
ID String `json:"id"`
|
||||||
ParentID int64 `json:"parentId"`
|
ParentID int64 `json:"parentId"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
|
||||||
FileCata int64 `json:"fileCata"`
|
LastOpTime Time `json:"lastOpTime"`
|
||||||
FileCount int64 `json:"fileCount"`
|
CreateDate Time `json:"createDate"`
|
||||||
|
|
||||||
LastOpTime string `json:"lastOpTime"`
|
// FileListSize int64 `json:"fileListSize"`
|
||||||
CreateDate string `json:"createDate"`
|
// FileCount int64 `json:"fileCount"`
|
||||||
|
// FileCata int64 `json:"fileCata"`
|
||||||
FileListSize int64 `json:"fileListSize"`
|
// Rev string `json:"rev"`
|
||||||
Rev string `json:"rev"`
|
// StarLabel int64 `json:"starLabel"`
|
||||||
StarLabel int64 `json:"starLabel"`
|
|
||||||
|
|
||||||
parseTime *time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||||
func (c *Cloud189Folder) ModTime() time.Time {
|
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||||
if c.parseTime == nil {
|
func (c *Cloud189Folder) IsDir() bool { return true }
|
||||||
c.parseTime = MustParseTime(c.LastOpTime)
|
func (c *Cloud189Folder) GetID() string { return string(c.ID) }
|
||||||
}
|
func (c *Cloud189Folder) GetPath() string { return "" }
|
||||||
return *c.parseTime
|
|
||||||
}
|
|
||||||
func (c *Cloud189Folder) IsDir() bool { return true }
|
|
||||||
func (c *Cloud189Folder) GetID() string { return fmt.Sprint(c.ID) }
|
|
||||||
func (c *Cloud189Folder) GetPath() string { return "" }
|
|
||||||
|
|
||||||
type Cloud189FilesResp struct {
|
type Cloud189FilesResp struct {
|
||||||
//ResCode int `json:"res_code"`
|
//ResCode int `json:"res_code"`
|
||||||
@ -252,14 +239,25 @@ type InitMultiUploadResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
type UploadUrlsResp struct {
|
type UploadUrlsResp struct {
|
||||||
Code string `json:"code"`
|
Code string `json:"code"`
|
||||||
UploadUrls map[string]Part `json:"uploadUrls"`
|
Data map[string]UploadUrlsData `json:"uploadUrls"`
|
||||||
}
|
}
|
||||||
type Part struct {
|
type UploadUrlsData struct {
|
||||||
RequestURL string `json:"requestURL"`
|
RequestURL string `json:"requestURL"`
|
||||||
RequestHeader string `json:"requestHeader"`
|
RequestHeader string `json:"requestHeader"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UploadUrlInfo struct {
|
||||||
|
PartNumber int
|
||||||
|
Headers map[string]string
|
||||||
|
UploadUrlsData
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadProgress struct {
|
||||||
|
UploadInfo InitMultiUploadResp
|
||||||
|
UploadParts []string
|
||||||
|
}
|
||||||
|
|
||||||
/* 第二种上传方式 */
|
/* 第二种上传方式 */
|
||||||
type CreateUploadFileResp struct {
|
type CreateUploadFileResp struct {
|
||||||
// 上传文件请求ID
|
// 上传文件请求ID
|
||||||
@ -284,15 +282,60 @@ func (r *GetUploadFileStatusResp) GetSize() int64 {
|
|||||||
return r.DataSize + r.Size
|
return r.DataSize + r.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
type CommitUploadFileResp struct {
|
type CommitMultiUploadFileResp struct {
|
||||||
|
File struct {
|
||||||
|
UserFileID String `json:"userFileId"`
|
||||||
|
FileName string `json:"fileName"`
|
||||||
|
FileSize int64 `json:"fileSize"`
|
||||||
|
FileMd5 string `json:"fileMd5"`
|
||||||
|
CreateDate Time `json:"createDate"`
|
||||||
|
} `json:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *CommitMultiUploadFileResp) toFile() *Cloud189File {
|
||||||
|
return &Cloud189File{
|
||||||
|
ID: f.File.UserFileID,
|
||||||
|
Name: f.File.FileName,
|
||||||
|
Size: f.File.FileSize,
|
||||||
|
Md5: f.File.FileMd5,
|
||||||
|
LastOpTime: f.File.CreateDate,
|
||||||
|
CreateDate: f.File.CreateDate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type OldCommitUploadFileResp struct {
|
||||||
XMLName xml.Name `xml:"file"`
|
XMLName xml.Name `xml:"file"`
|
||||||
Id string `xml:"id"`
|
ID String `xml:"id"`
|
||||||
Name string `xml:"name"`
|
Name string `xml:"name"`
|
||||||
Size string `xml:"size"`
|
Size int64 `xml:"size"`
|
||||||
Md5 string `xml:"md5"`
|
Md5 string `xml:"md5"`
|
||||||
CreateDate string `xml:"createDate"`
|
CreateDate Time `xml:"createDate"`
|
||||||
Rev string `xml:"rev"`
|
}
|
||||||
UserId string `xml:"userId"`
|
|
||||||
|
func (f *OldCommitUploadFileResp) toFile() *Cloud189File {
|
||||||
|
return &Cloud189File{
|
||||||
|
ID: f.ID,
|
||||||
|
Name: f.Name,
|
||||||
|
Size: f.Size,
|
||||||
|
Md5: f.Md5,
|
||||||
|
CreateDate: f.CreateDate,
|
||||||
|
LastOpTime: f.CreateDate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateBatchTaskResp struct {
|
||||||
|
TaskID string `json:"taskId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type BatchTaskStateResp struct {
|
||||||
|
FailedCount int `json:"failedCount"`
|
||||||
|
Process int `json:"process"`
|
||||||
|
SkipCount int `json:"skipCount"`
|
||||||
|
SubTaskCount int `json:"subTaskCount"`
|
||||||
|
SuccessedCount int `json:"successedCount"`
|
||||||
|
SuccessedFileIDList []int64 `json:"successedFileIdList"`
|
||||||
|
TaskID string `json:"taskId"`
|
||||||
|
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中,4 完成
|
||||||
}
|
}
|
||||||
|
|
||||||
/* query 加密参数*/
|
/* query 加密参数*/
|
||||||
|
@ -15,6 +15,8 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -24,6 +26,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
@ -268,7 +271,7 @@ func (y *Cloud189PC) login() (err error) {
|
|||||||
"validateCode": y.VCode,
|
"validateCode": y.VCode,
|
||||||
"captchaToken": param.CaptchaToken,
|
"captchaToken": param.CaptchaToken,
|
||||||
"returnUrl": RETURN_URL,
|
"returnUrl": RETURN_URL,
|
||||||
"mailSuffix": "@189.cn",
|
// "mailSuffix": "@189.cn",
|
||||||
"dynamicCheck": "FALSE",
|
"dynamicCheck": "FALSE",
|
||||||
"clientType": CLIENT_TYPE,
|
"clientType": CLIENT_TYPE,
|
||||||
"cb_SaveName": "1",
|
"cb_SaveName": "1",
|
||||||
@ -434,15 +437,20 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// 普通上传
|
// 普通上传
|
||||||
func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
// 无法上传大小为0的文件
|
||||||
var DEFAULT = partSize(file.GetSize())
|
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
var sliceSize = partSize(file.GetSize())
|
||||||
|
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||||
|
lastPartSize := file.GetSize() % sliceSize
|
||||||
|
if file.GetSize() > 0 && lastPartSize == 0 {
|
||||||
|
lastPartSize = sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
params := Params{
|
params := Params{
|
||||||
"parentFolderId": dstDir.GetID(),
|
"parentFolderId": dstDir.GetID(),
|
||||||
"fileName": url.QueryEscape(file.GetName()),
|
"fileName": url.QueryEscape(file.GetName()),
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
"fileSize": fmt.Sprint(file.GetSize()),
|
||||||
"sliceSize": fmt.Sprint(DEFAULT),
|
"sliceSize": fmt.Sprint(sliceSize),
|
||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -457,72 +465,71 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
|
|
||||||
// 初始化上传
|
// 初始化上传
|
||||||
var initMultiUpload InitMultiUploadResp
|
var initMultiUpload InitMultiUploadResp
|
||||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, params, &initMultiUpload)
|
}, params, &initMultiUpload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
silceMd5Hexs := make([]string, 0, count)
|
||||||
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
|
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
return ctx.Err()
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
byteData := make([]byte, sliceSize)
|
||||||
|
if i == count {
|
||||||
|
byteData = byteData[:lastPartSize]
|
||||||
}
|
}
|
||||||
|
|
||||||
// 读取块
|
// 读取块
|
||||||
byteData.Reset()
|
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
|
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||||
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 计算块md5并进行hex和base64编码
|
// 计算块md5并进行hex和base64编码
|
||||||
md5Bytes := silceMd5.Sum(nil)
|
md5Bytes := silceMd5.Sum(nil)
|
||||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||||
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
// 获取上传链接
|
threadG.Go(func(ctx context.Context) error {
|
||||||
var uploadUrl UploadUrlsResp
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
if err != nil {
|
||||||
func(req *resty.Request) {
|
return err
|
||||||
req.SetContext(ctx)
|
}
|
||||||
}, Params{
|
|
||||||
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
|
|
||||||
"uploadFileId": initMultiUpload.Data.UploadFileID,
|
|
||||||
}, &uploadUrl)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 开始上传
|
// step.4 上传切片
|
||||||
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
|
uploadUrl := uploadUrls[0]
|
||||||
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||||
err = retry.Do(func() error {
|
if err != nil {
|
||||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
|
return err
|
||||||
return err
|
}
|
||||||
},
|
up(int(threadG.Success()) * 100 / count)
|
||||||
retry.Context(ctx),
|
return nil
|
||||||
retry.Attempts(3),
|
})
|
||||||
retry.Delay(time.Second),
|
}
|
||||||
retry.MaxDelay(5*time.Second))
|
if err = threadG.Wait(); err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return err
|
|
||||||
}
|
|
||||||
up(int(i * 100 / count))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交上传
|
// 提交上传
|
||||||
|
var resp CommitMultiUploadFileResp
|
||||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||||
func(req *resty.Request) {
|
func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -533,138 +540,211 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
"lazyCheck": "1",
|
"lazyCheck": "1",
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": "3",
|
"opertype": "3",
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// 快传
|
// 快传
|
||||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
// 需要获取完整文件md5,必须支持 io.Seek
|
||||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var DEFAULT = partSize(file.GetSize())
|
var sliceSize = partSize(file.GetSize())
|
||||||
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||||
|
lastSliceSize := file.GetSize() % sliceSize
|
||||||
|
if file.GetSize() > 0 && lastSliceSize == 0 {
|
||||||
|
lastSliceSize = sliceSize
|
||||||
|
}
|
||||||
|
|
||||||
// 优先计算所需信息
|
//step.1 优先计算所需信息
|
||||||
|
byteSize := sliceSize
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
silceMd5 := md5.New()
|
silceMd5 := md5.New()
|
||||||
silceMd5Hexs := make([]string, 0, count)
|
silceMd5Hexs := make([]string, 0, count)
|
||||||
silceMd5Base64s := make([]string, 0, count)
|
partInfos := make([]string, 0, count)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == count {
|
||||||
|
byteSize = lastSliceSize
|
||||||
}
|
}
|
||||||
|
|
||||||
silceMd5.Reset()
|
silceMd5.Reset()
|
||||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
md5Byte := silceMd5.Sum(nil)
|
md5Byte := silceMd5.Sum(nil)
|
||||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||||
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||||
}
|
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
sliceMd5Hex := fileMd5Hex
|
sliceMd5Hex := fileMd5Hex
|
||||||
if file.GetSize() > DEFAULT {
|
if file.GetSize() > sliceSize {
|
||||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||||
}
|
|
||||||
|
|
||||||
// 检测是否支持快传
|
|
||||||
params := Params{
|
|
||||||
"parentFolderId": dstDir.GetID(),
|
|
||||||
"fileName": url.QueryEscape(file.GetName()),
|
|
||||||
"fileSize": fmt.Sprint(file.GetSize()),
|
|
||||||
"fileMd5": fileMd5Hex,
|
|
||||||
"sliceSize": fmt.Sprint(DEFAULT),
|
|
||||||
"sliceMd5": sliceMd5Hex,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fullUrl := UPLOAD_URL
|
fullUrl := UPLOAD_URL
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
params.Set("familyId", y.FamilyID)
|
|
||||||
fullUrl += "/family"
|
fullUrl += "/family"
|
||||||
} else {
|
} else {
|
||||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||||
fullUrl += "/person"
|
fullUrl += "/person"
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadInfo InitMultiUploadResp
|
// 尝试恢复进度
|
||||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
req.SetContext(ctx)
|
if !ok {
|
||||||
}, params, &uploadInfo)
|
//step.2 预上传
|
||||||
if err != nil {
|
params := Params{
|
||||||
return err
|
"parentFolderId": dstDir.GetID(),
|
||||||
}
|
"fileName": url.QueryEscape(file.GetName()),
|
||||||
|
"fileSize": fmt.Sprint(file.GetSize()),
|
||||||
// 网盘中不存在该文件,开始上传
|
"fileMd5": fileMd5Hex,
|
||||||
if uploadInfo.Data.FileDataExists != 1 {
|
"sliceSize": fmt.Sprint(sliceSize),
|
||||||
var uploadUrls UploadUrlsResp
|
"sliceMd5": sliceMd5Hex,
|
||||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
}
|
||||||
func(req *resty.Request) {
|
if y.isFamily() {
|
||||||
req.SetContext(ctx)
|
params.Set("familyId", y.FamilyID)
|
||||||
}, Params{
|
}
|
||||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
var uploadInfo InitMultiUploadResp
|
||||||
"partInfo": strings.Join(silceMd5Base64s, ","),
|
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||||
}, &uploadUrls)
|
req.SetContext(ctx)
|
||||||
|
}, params, &uploadInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
uploadProgress = &UploadProgress{
|
||||||
buf := make([]byte, DEFAULT)
|
UploadInfo: uploadInfo,
|
||||||
for i := 1; i <= count; i++ {
|
UploadParts: partInfos,
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := io.ReadFull(tempFile, buf)
|
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
|
|
||||||
err = retry.Do(func() error {
|
|
||||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
retry.Context(ctx),
|
|
||||||
retry.Attempts(3),
|
|
||||||
retry.Delay(time.Second),
|
|
||||||
retry.MaxDelay(5*time.Second))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
up(int(i * 100 / count))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交
|
uploadInfo := uploadProgress.UploadInfo.Data
|
||||||
|
// 网盘中不存在该文件,开始上传
|
||||||
|
if uploadInfo.FileDataExists != 1 {
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
for i, uploadPart := range uploadProgress.UploadParts {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i, uploadPart := i, uploadPart
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
// step.3 获取上传链接
|
||||||
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
uploadUrl := uploadUrls[0]
|
||||||
|
|
||||||
|
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
|
||||||
|
if uploadUrl.PartNumber == count {
|
||||||
|
byteSize = lastSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.4 上传切片
|
||||||
|
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
up(int(threadG.Success()) * 100 / len(uploadUrls))
|
||||||
|
uploadProgress.UploadParts[i] = ""
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||||
|
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.5 提交
|
||||||
|
var resp CommitMultiUploadFileResp
|
||||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||||
func(req *resty.Request) {
|
func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
}, Params{
|
}, Params{
|
||||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
"uploadFileId": uploadInfo.UploadFileID,
|
||||||
"isLog": "0",
|
"isLog": "0",
|
||||||
"opertype": "3",
|
"opertype": "3",
|
||||||
}, nil)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
// 获取上传切片信息
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
// 对http body有大小限制,分片信息太多会出错
|
||||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||||
|
fullUrl := UPLOAD_URL
|
||||||
|
if y.isFamily() {
|
||||||
|
fullUrl += "/family"
|
||||||
|
} else {
|
||||||
|
fullUrl += "/person"
|
||||||
|
}
|
||||||
|
|
||||||
|
var uploadUrlsResp UploadUrlsResp
|
||||||
|
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||||
|
func(req *resty.Request) {
|
||||||
|
req.SetContext(ctx)
|
||||||
|
}, Params{
|
||||||
|
"uploadFileId": uploadFileId,
|
||||||
|
"partInfo": strings.Join(partInfo, ","),
|
||||||
|
}, &uploadUrlsResp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
uploadUrls := uploadUrlsResp.Data
|
||||||
|
|
||||||
|
if len(uploadUrls) != len(partInfo) {
|
||||||
|
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
|
||||||
|
}
|
||||||
|
|
||||||
|
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
|
||||||
|
for k, uploadUrl := range uploadUrls {
|
||||||
|
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
|
||||||
|
PartNumber: partNumber,
|
||||||
|
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
|
||||||
|
UploadUrlsData: uploadUrl,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sort.Slice(uploadUrlInfos, func(i, j int) bool {
|
||||||
|
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
|
||||||
|
})
|
||||||
|
return uploadUrlInfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 旧版本上传,家庭云不支持覆盖
|
||||||
|
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 需要获取完整文件md5,必须支持 io.Seek
|
||||||
|
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
@ -674,10 +754,10 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
// 计算md5
|
// 计算md5
|
||||||
fileMd5 := md5.New()
|
fileMd5 := md5.New()
|
||||||
if _, err := io.Copy(fileMd5, tempFile); err != nil {
|
if _, err := io.Copy(fileMd5, tempFile); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||||
|
|
||||||
@ -718,14 +798,14 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
}, &uploadInfo)
|
}, &uploadInfo)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 网盘中不存在该文件,开始上传
|
// 网盘中不存在该文件,开始上传
|
||||||
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
|
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
|
||||||
for status.Size < file.GetSize() && status.FileDataExists != 1 {
|
for status.Size < file.GetSize() && status.FileDataExists != 1 {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
header := map[string]string{
|
header := map[string]string{
|
||||||
@ -742,7 +822,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
|
|
||||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
||||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取断点状态
|
// 获取断点状态
|
||||||
@ -760,17 +840,17 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
}
|
}
|
||||||
}, &status)
|
}, &status)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
up(int(status.Size / file.GetSize()))
|
up(int(status.Size / file.GetSize()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// 提交
|
// 提交
|
||||||
var resp CommitUploadFileResp
|
var resp OldCommitUploadFileResp
|
||||||
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
|
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if y.isFamily() {
|
if y.isFamily() {
|
||||||
@ -788,7 +868,10 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}, &resp)
|
}, &resp)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return resp.toFile(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (y *Cloud189PC) isFamily() bool {
|
func (y *Cloud189PC) isFamily() bool {
|
||||||
@ -829,3 +912,33 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
|
|||||||
}
|
}
|
||||||
return fmt.Sprint(infos[0].FamilyID), nil
|
return fmt.Sprint(infos[0].FamilyID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
|
||||||
|
var resp BatchTaskStateResp
|
||||||
|
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
|
||||||
|
req.SetFormData(map[string]string{
|
||||||
|
"type": aType,
|
||||||
|
"taskId": taskID,
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
|
||||||
|
for {
|
||||||
|
state, err := y.CheckBatchTask(aType, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch state.TaskStatus {
|
||||||
|
case 2:
|
||||||
|
return errors.New("there is a conflict with the target object")
|
||||||
|
case 4:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
time.Sleep(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -67,7 +67,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// init deviceID
|
// init deviceID
|
||||||
deviceID := utils.GetSHA256Encode(d.UserID)
|
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
|
||||||
// init privateKey
|
// init privateKey
|
||||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||||
state := State{
|
state := State{
|
||||||
@ -193,7 +193,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
if d.RapidUpload {
|
if d.RapidUpload {
|
||||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
io.CopyN(buf, file, 1024)
|
io.CopyN(buf, file, 1024)
|
||||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.String())
|
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||||
if localFile != nil {
|
if localFile != nil {
|
||||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -259,7 +259,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
(t.file.slice(o.toNumber(), Math.min(o.plus(8).toNumber(), t.file.size)))
|
(t.file.slice(o.toNumber(), Math.min(o.plus(8).toNumber(), t.file.size)))
|
||||||
*/
|
*/
|
||||||
buf := make([]byte, 8)
|
buf := make([]byte, 8)
|
||||||
r, _ := new(big.Int).SetString(utils.GetMD5Encode(d.AccessToken)[:16], 16)
|
r, _ := new(big.Int).SetString(utils.GetMD5EncodeStr(d.AccessToken)[:16], 16)
|
||||||
i := new(big.Int).SetInt64(file.GetSize())
|
i := new(big.Int).SetInt64(file.GetSize())
|
||||||
o := new(big.Int).SetInt64(0)
|
o := new(big.Int).SetInt64(0)
|
||||||
if file.GetSize() > 0 {
|
if file.GetSize() > 0 {
|
||||||
|
@ -2,12 +2,12 @@ package aliyundrive_open
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/rateg"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -36,13 +36,25 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
||||||
|
if d.LIVPDownloadFormat == "" {
|
||||||
|
d.LIVPDownloadFormat = "jpeg"
|
||||||
|
}
|
||||||
|
if d.DriveType == "" {
|
||||||
|
d.DriveType = "default"
|
||||||
|
}
|
||||||
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
|
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
|
||||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
Limit: 4,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
|
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||||
|
Limit: 1,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,6 +87,12 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
url := utils.Json.Get(res, "url").ToString()
|
url := utils.Json.Get(res, "url").ToString()
|
||||||
|
if url == "" {
|
||||||
|
if utils.Ext(file.GetName()) != "livp" {
|
||||||
|
return nil, errors.New("get download url failed: " + string(res))
|
||||||
|
}
|
||||||
|
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
|
||||||
|
}
|
||||||
exp := time.Hour
|
exp := time.Hour
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
URL: url,
|
URL: url,
|
||||||
@ -89,7 +107,9 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
|
|||||||
return d.limitLink(ctx, file)
|
return d.limitLink(ctx, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
nowTime, _ := getNowTime()
|
||||||
|
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -97,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
|||||||
"name": dirName,
|
"name": dirName,
|
||||||
"type": "folder",
|
"type": "folder",
|
||||||
"check_name_mode": "refuse",
|
"check_name_mode": "refuse",
|
||||||
})
|
}).SetResult(&newDir)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newDir), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
var resp MoveOrCopyResp
|
||||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
@ -110,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
|
|||||||
"to_parent_file_id": dstDir.GetID(),
|
"to_parent_file_id": dstDir.GetID(),
|
||||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
||||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||||
})
|
}).SetResult(&resp)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if resp.Exist {
|
||||||
|
return nil, errors.New("existence of files with the same name")
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.ID = resp.FileID
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
var newFile File
|
||||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"drive_id": d.DriveId,
|
"drive_id": d.DriveId,
|
||||||
"file_id": srcObj.GetID(),
|
"file_id": srcObj.GetID(),
|
||||||
"name": newName,
|
"name": newName,
|
||||||
})
|
}).SetResult(&newFile)
|
||||||
})
|
})
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -152,75 +192,8 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// rapid_upload is not currently supported
|
return d.upload(ctx, dstDir, stream, up)
|
||||||
// 1. create
|
|
||||||
// Part Size Unit: Bytes, Default: 20MB,
|
|
||||||
// Maximum number of slices 10,000, ≈195.3125GB
|
|
||||||
var partSize int64 = 20 * 1024 * 1024
|
|
||||||
createData := base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"parent_file_id": dstDir.GetID(),
|
|
||||||
"name": stream.GetName(),
|
|
||||||
"type": "file",
|
|
||||||
"check_name_mode": "ignore",
|
|
||||||
}
|
|
||||||
count := 1
|
|
||||||
if stream.GetSize() > partSize {
|
|
||||||
if stream.GetSize() > 1*1024*1024*1024*1024 { // file Size over 1TB
|
|
||||||
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
|
||||||
} else if stream.GetSize() > 768*1024*1024*1024 { // over 768GB
|
|
||||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
|
||||||
} else if stream.GetSize() > 512*1024*1024*1024 { // over 512GB
|
|
||||||
partSize = 82463373 // ≈ 78.6432MB
|
|
||||||
} else if stream.GetSize() > 384*1024*1024*1024 { // over 384GB
|
|
||||||
partSize = 54975582 // ≈ 52.4288MB
|
|
||||||
} else if stream.GetSize() > 256*1024*1024*1024 { // over 256GB
|
|
||||||
partSize = 41231687 // ≈ 39.3216MB
|
|
||||||
} else if stream.GetSize() > 128*1024*1024*1024 { // over 128GB
|
|
||||||
partSize = 27487791 // ≈ 26.2144MB
|
|
||||||
}
|
|
||||||
count = int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
|
||||||
createData["part_info_list"] = makePartInfos(count)
|
|
||||||
}
|
|
||||||
var createResp CreateResp
|
|
||||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(createData).SetResult(&createResp)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 2. upload
|
|
||||||
preTime := time.Now()
|
|
||||||
for i := 1; i <= len(createResp.PartInfoList); i++ {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
err = d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if count > 0 {
|
|
||||||
up(i * 100 / count)
|
|
||||||
}
|
|
||||||
// refresh upload url if 50 minutes passed
|
|
||||||
if time.Since(preTime) > 50*time.Minute {
|
|
||||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
preTime = time.Now()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// 3. complete
|
|
||||||
_, err = d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"file_id": createResp.FileId,
|
|
||||||
"upload_id": createResp.UploadId,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
@ -248,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
|||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||||
|
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||||
|
@ -6,16 +6,19 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
|
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
|
||||||
driver.RootID
|
driver.RootID
|
||||||
RefreshToken string `json:"refresh_token" required:"true"`
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
||||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||||
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
RapidUpload bool `json:"rapid_upload" help:"If you enable this option, the file will be uploaded to the server first, so the progress will be incorrect"`
|
||||||
AccessToken string
|
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
||||||
|
LIVPDownloadFormat string `json:"livp_download_format" type:"select" options:"jpeg,mov" default:"jpeg"`
|
||||||
|
AccessToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -17,22 +17,28 @@ type Files struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
DriveId string `json:"drive_id"`
|
DriveId string `json:"drive_id"`
|
||||||
FileId string `json:"file_id"`
|
FileId string `json:"file_id"`
|
||||||
ParentFileId string `json:"parent_file_id"`
|
ParentFileId string `json:"parent_file_id"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
FileExtension string `json:"file_extension"`
|
FileExtension string `json:"file_extension"`
|
||||||
ContentHash string `json:"content_hash"`
|
ContentHash string `json:"content_hash"`
|
||||||
Category string `json:"category"`
|
Category string `json:"category"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Thumbnail string `json:"thumbnail"`
|
Thumbnail string `json:"thumbnail"`
|
||||||
Url string `json:"url"`
|
Url string `json:"url"`
|
||||||
CreatedAt *time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
|
||||||
|
// create only
|
||||||
|
FileName string `json:"file_name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
func fileToObj(f File) *model.ObjThumb {
|
||||||
|
if f.Name == "" {
|
||||||
|
f.Name = f.FileName
|
||||||
|
}
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
ID: f.FileId,
|
ID: f.FileId,
|
||||||
@ -67,3 +73,9 @@ type CreateResp struct {
|
|||||||
RapidUpload bool `json:"rapid_upload"`
|
RapidUpload bool `json:"rapid_upload"`
|
||||||
PartInfoList []PartInfo `json:"part_info_list"`
|
PartInfoList []PartInfo `json:"part_info_list"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MoveOrCopyResp struct {
|
||||||
|
Exist bool `json:"exist"`
|
||||||
|
DriveID string `json:"drive_id"`
|
||||||
|
FileID string `json:"file_id"`
|
||||||
|
}
|
||||||
|
263
drivers/aliyundrive_open/upload.go
Normal file
263
drivers/aliyundrive_open/upload.go
Normal file
@ -0,0 +1,263 @@
|
|||||||
|
package aliyundrive_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func makePartInfos(size int) []base.Json {
|
||||||
|
partInfoList := make([]base.Json, size)
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
partInfoList[i] = base.Json{"part_number": 1 + i}
|
||||||
|
}
|
||||||
|
return partInfoList
|
||||||
|
}
|
||||||
|
|
||||||
|
func calPartSize(fileSize int64) int64 {
|
||||||
|
var partSize int64 = 20 * 1024 * 1024
|
||||||
|
if fileSize > partSize {
|
||||||
|
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
|
||||||
|
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
||||||
|
} else if fileSize > 768*1024*1024*1024 { // over 768GB
|
||||||
|
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||||
|
} else if fileSize > 512*1024*1024*1024 { // over 512GB
|
||||||
|
partSize = 82463373 // ≈ 78.6432MB
|
||||||
|
} else if fileSize > 384*1024*1024*1024 { // over 384GB
|
||||||
|
partSize = 54975582 // ≈ 52.4288MB
|
||||||
|
} else if fileSize > 256*1024*1024*1024 { // over 256GB
|
||||||
|
partSize = 41231687 // ≈ 39.3216MB
|
||||||
|
} else if fileSize > 128*1024*1024*1024 { // over 128GB
|
||||||
|
partSize = 27487791 // ≈ 26.2144MB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return partSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||||
|
partInfoList := makePartInfos(count)
|
||||||
|
var resp CreateResp
|
||||||
|
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"file_id": fileId,
|
||||||
|
"part_info_list": partInfoList,
|
||||||
|
"upload_id": uploadId,
|
||||||
|
}).SetResult(&resp)
|
||||||
|
})
|
||||||
|
return resp.PartInfoList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
||||||
|
uploadUrl := partInfo.UploadUrl
|
||||||
|
if d.InternalUpload {
|
||||||
|
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||||
|
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||||
|
// 3. complete
|
||||||
|
var newFile File
|
||||||
|
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"file_id": fileId,
|
||||||
|
"upload_id": uploadId,
|
||||||
|
}).SetResult(&newFile)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ProofRange struct {
|
||||||
|
Start int64
|
||||||
|
End int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProofRange(input string, size int64) (*ProofRange, error) {
|
||||||
|
if size == 0 {
|
||||||
|
return &ProofRange{}, nil
|
||||||
|
}
|
||||||
|
tmpStr := utils.GetMD5EncodeStr(input)[0:16]
|
||||||
|
tmpInt, err := strconv.ParseUint(tmpStr, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
index := tmpInt % uint64(size)
|
||||||
|
pr := &ProofRange{
|
||||||
|
Start: int64(index),
|
||||||
|
End: int64(index) + 8,
|
||||||
|
}
|
||||||
|
if pr.End >= size {
|
||||||
|
pr.End = size
|
||||||
|
}
|
||||||
|
return pr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
|
||||||
|
proofRange, err := getProofRange(d.AccessToken, fileSize)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
buf := make([]byte, proofRange.End-proofRange.Start)
|
||||||
|
_, err = file.ReadAt(buf, proofRange.Start)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 1. create
|
||||||
|
// Part Size Unit: Bytes, Default: 20MB,
|
||||||
|
// Maximum number of slices 10,000, ≈195.3125GB
|
||||||
|
var partSize = calPartSize(stream.GetSize())
|
||||||
|
createData := base.Json{
|
||||||
|
"drive_id": d.DriveId,
|
||||||
|
"parent_file_id": dstDir.GetID(),
|
||||||
|
"name": stream.GetName(),
|
||||||
|
"type": "file",
|
||||||
|
"check_name_mode": "ignore",
|
||||||
|
}
|
||||||
|
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||||
|
createData["part_info_list"] = makePartInfos(count)
|
||||||
|
// rapid upload
|
||||||
|
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
|
||||||
|
if rapidUpload {
|
||||||
|
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||||
|
// read 1024 bytes to calculate pre hash
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
_, err := io.CopyN(buf, stream, 1024)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
createData["size"] = stream.GetSize()
|
||||||
|
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||||
|
// if support seek, seek to start
|
||||||
|
if localFile, ok := stream.(io.Seeker); ok {
|
||||||
|
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Put spliced head back to stream
|
||||||
|
stream.SetReadCloser(struct {
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
}{
|
||||||
|
Reader: io.MultiReader(buf, stream.GetReadCloser()),
|
||||||
|
Closer: stream.GetReadCloser(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var createResp CreateResp
|
||||||
|
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(createData).SetResult(&createResp)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||||
|
// convert to local file
|
||||||
|
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_ = stream.GetReadCloser().Close()
|
||||||
|
stream.SetReadCloser(file)
|
||||||
|
// calculate full hash
|
||||||
|
h := sha1.New()
|
||||||
|
_, err = io.Copy(h, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
delete(createData, "pre_hash")
|
||||||
|
createData["proof_version"] = "v1"
|
||||||
|
createData["content_hash_name"] = "sha1"
|
||||||
|
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
|
||||||
|
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||||
|
}
|
||||||
|
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(createData).SetResult(&createResp)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// seek to start
|
||||||
|
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !createResp.RapidUpload {
|
||||||
|
// 2. upload
|
||||||
|
log.Debugf("[aliyundive_open] normal upload")
|
||||||
|
|
||||||
|
preTime := time.Now()
|
||||||
|
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
// refresh upload url if 50 minutes passed
|
||||||
|
if time.Since(preTime) > 50*time.Minute {
|
||||||
|
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
preTime = time.Now()
|
||||||
|
}
|
||||||
|
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||||
|
err = retry.Do(func() error {
|
||||||
|
rd.Reset()
|
||||||
|
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||||
|
},
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
retry.Delay(time.Second))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||||
|
// 3. complete
|
||||||
|
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||||
|
}
|
@ -2,27 +2,30 @@ package aliyundrive_open
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
|
||||||
func (d *AliyundriveOpen) refreshToken() error {
|
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||||
url := d.base + "/oauth/access_token"
|
url := d.base + "/oauth/access_token"
|
||||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||||
url = d.OauthTokenURL
|
url = d.OauthTokenURL
|
||||||
}
|
}
|
||||||
var resp base.TokenResp
|
//var resp base.TokenResp
|
||||||
var e ErrResp
|
var e ErrResp
|
||||||
_, err := base.RestyClient.R().
|
res, err := base.RestyClient.R().
|
||||||
ForceContentType("application/json").
|
ForceContentType("application/json").
|
||||||
SetBody(base.Json{
|
SetBody(base.Json{
|
||||||
"client_id": d.ClientID,
|
"client_id": d.ClientID,
|
||||||
@ -30,24 +33,71 @@ func (d *AliyundriveOpen) refreshToken() error {
|
|||||||
"grant_type": "refresh_token",
|
"grant_type": "refresh_token",
|
||||||
"refresh_token": d.RefreshToken,
|
"refresh_token": d.RefreshToken,
|
||||||
}).
|
}).
|
||||||
SetResult(&resp).
|
//SetResult(&resp).
|
||||||
SetError(&e).
|
SetError(&e).
|
||||||
Post(url)
|
Post(url)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
log.Debugf("[ali_open] refresh token response: %s", res.String())
|
||||||
|
if e.Code != "" {
|
||||||
|
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||||
|
}
|
||||||
|
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
|
||||||
|
if refresh == "" {
|
||||||
|
return "", "", errors.New("failed to refresh token: refresh token is empty")
|
||||||
|
}
|
||||||
|
curSub, err := getSub(d.RefreshToken)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
newSub, err := getSub(refresh)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if curSub != newSub {
|
||||||
|
return "", "", errors.New("failed to refresh token: sub not match")
|
||||||
|
}
|
||||||
|
return refresh, access, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSub(token string) (string, error) {
|
||||||
|
segments := strings.Split(token, ".")
|
||||||
|
if len(segments) != 3 {
|
||||||
|
return "", errors.New("not a jwt token because of invalid segments")
|
||||||
|
}
|
||||||
|
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.New("failed to decode jwt token")
|
||||||
|
}
|
||||||
|
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) refreshToken() error {
|
||||||
|
refresh, access, err := d._refreshToken()
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||||
|
}
|
||||||
|
refresh, access, err = d._refreshToken()
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if e.Code != "" {
|
log.Infof("[ali_open] toekn exchange: %s -> %s", d.RefreshToken, refresh)
|
||||||
return fmt.Errorf("failed to refresh token: %s", e.Message)
|
d.RefreshToken, d.AccessToken = refresh, access
|
||||||
}
|
|
||||||
if resp.RefreshToken == "" {
|
|
||||||
return errors.New("failed to refresh token: refresh token is empty")
|
|
||||||
}
|
|
||||||
d.RefreshToken, d.AccessToken = resp.RefreshToken, resp.AccessToken
|
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||||
|
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
// TODO check whether access_token is expired
|
// TODO check whether access_token is expired
|
||||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||||
@ -61,20 +111,23 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback,
|
|||||||
req.SetError(&e)
|
req.SetError(&e)
|
||||||
res, err := req.Execute(method, d.base+uri)
|
res, err := req.Execute(method, d.base+uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
if res != nil {
|
||||||
|
log.Errorf("[aliyundrive_open] request error: %s", res.String())
|
||||||
|
}
|
||||||
|
return nil, err, nil
|
||||||
}
|
}
|
||||||
isRetry := len(retry) > 0 && retry[0]
|
isRetry := len(retry) > 0 && retry[0]
|
||||||
if e.Code != "" {
|
if e.Code != "" {
|
||||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
||||||
err = d.refreshToken()
|
err = d.refreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err, nil
|
||||||
}
|
}
|
||||||
return d.request(uri, method, callback, true)
|
return d.requestReturnErrResp(uri, method, callback, true)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message)
|
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
||||||
@ -118,58 +171,8 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePartInfos(size int) []base.Json {
|
func getNowTime() (time.Time, string) {
|
||||||
partInfoList := make([]base.Json, size)
|
nowTime := time.Now()
|
||||||
for i := 0; i < size; i++ {
|
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||||
partInfoList[i] = base.Json{"part_number": 1 + i}
|
return nowTime, nowTimeStr
|
||||||
}
|
|
||||||
return partInfoList
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
|
||||||
partInfoList := makePartInfos(count)
|
|
||||||
var resp CreateResp
|
|
||||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(base.Json{
|
|
||||||
"drive_id": d.DriveId,
|
|
||||||
"file_id": fileId,
|
|
||||||
"part_info_list": partInfoList,
|
|
||||||
"upload_id": uploadId,
|
|
||||||
}).SetResult(&resp)
|
|
||||||
})
|
|
||||||
return resp.PartInfoList, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
|
|
||||||
partInfo := resp.PartInfoList[i-1]
|
|
||||||
uploadUrl := partInfo.UploadUrl
|
|
||||||
if d.InternalUpload {
|
|
||||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req = req.WithContext(ctx)
|
|
||||||
res, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
if retry {
|
|
||||||
reader.Reset()
|
|
||||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res.Body.Close()
|
|
||||||
if retry && res.StatusCode == http.StatusForbidden {
|
|
||||||
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
reader.Reset()
|
|
||||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
|
||||||
}
|
|
||||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
|
||||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/rateg"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -52,8 +53,14 @@ func (d *AliyundriveShare) Init(ctx context.Context) error {
|
|||||||
log.Errorf("%+v", err)
|
log.Errorf("%+v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
Limit: 4,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
|
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||||
|
Limit: 1,
|
||||||
|
Bucket: 1,
|
||||||
|
})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ package drivers
|
|||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
||||||
@ -16,6 +17,7 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||||
@ -43,6 +45,7 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/uss"
|
_ "github.com/alist-org/alist/v3/drivers/uss"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
||||||
)
|
)
|
||||||
|
@ -1,30 +1,38 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type BaiduNetdisk struct {
|
type BaiduNetdisk struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const DefaultSliceSize int64 = 4 * 1024 * 1024
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Config() driver.Config {
|
func (d *BaiduNetdisk) Config() driver.Config {
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
@ -34,6 +42,15 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||||
|
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||||
|
}
|
||||||
|
|
||||||
res, err := d.get("/xpan/nas", map[string]string{
|
res, err := d.get("/xpan/nas", map[string]string{
|
||||||
"method": "uinfo",
|
"method": "uinfo",
|
||||||
}, nil)
|
}, nil)
|
||||||
@ -62,12 +79,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
|||||||
return d.linkOfficial(file, args)
|
return d.linkOfficial(file, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
var newDir File
|
||||||
return err
|
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newDir), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
data := []base.Json{
|
data := []base.Json{
|
||||||
{
|
{
|
||||||
"path": srcObj.GetPath(),
|
"path": srcObj.GetPath(),
|
||||||
@ -76,10 +97,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := d.manage("move", data)
|
_, err := d.manage("move", data)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
data := []base.Json{
|
data := []base.Json{
|
||||||
{
|
{
|
||||||
"path": srcObj.GetPath(),
|
"path": srcObj.GetPath(),
|
||||||
@ -87,7 +116,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := d.manage("rename", data)
|
_, err := d.manage("rename", data)
|
||||||
return err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||||
|
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
|
||||||
|
srcObj.Name = newName
|
||||||
|
srcObj.Modified = time.Now()
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
@ -108,126 +147,144 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
var Default int64 = 4 * 1024 * 1024
|
|
||||||
defaultByteData := make([]byte, Default)
|
streamSize := stream.GetSize()
|
||||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
|
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1))
|
||||||
var SliceSize int64 = 256 * 1024
|
lastBlockSize := streamSize % DefaultSliceSize
|
||||||
|
if streamSize > 0 && lastBlockSize == 0 {
|
||||||
|
lastBlockSize = DefaultSliceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
//cal md5 for first 256k data
|
||||||
|
const SliceSize int64 = 256 * 1024
|
||||||
// cal md5
|
// cal md5
|
||||||
h1 := md5.New()
|
blockList := make([]string, 0, count)
|
||||||
h2 := md5.New()
|
byteSize := DefaultSliceSize
|
||||||
block_list := make([]string, 0)
|
fileMd5H := md5.New()
|
||||||
content_md5 := ""
|
sliceMd5H := md5.New()
|
||||||
slice_md5 := ""
|
sliceMd5H2 := md5.New()
|
||||||
left := stream.GetSize()
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
for i := 0; i < count; i++ {
|
|
||||||
byteSize := Default
|
for i := 1; i <= count; i++ {
|
||||||
var byteData []byte
|
if utils.IsCanceled(ctx) {
|
||||||
if left < Default {
|
return nil, ctx.Err()
|
||||||
byteSize = left
|
|
||||||
byteData = make([]byte, byteSize)
|
|
||||||
} else {
|
|
||||||
byteData = defaultByteData
|
|
||||||
}
|
}
|
||||||
left -= byteSize
|
if i == count {
|
||||||
_, err = io.ReadFull(tempFile, byteData)
|
byteSize = lastBlockSize
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
h1.Write(byteData)
|
|
||||||
h2.Write(byteData)
|
|
||||||
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
|
||||||
h2.Reset()
|
|
||||||
}
|
|
||||||
content_md5 = hex.EncodeToString(h1.Sum(nil))
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if stream.GetSize() <= SliceSize {
|
|
||||||
slice_md5 = content_md5
|
|
||||||
} else {
|
|
||||||
sliceData := make([]byte, SliceSize)
|
|
||||||
_, err = io.ReadFull(tempFile, sliceData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
h2.Write(sliceData)
|
|
||||||
slice_md5 = hex.EncodeToString(h2.Sum(nil))
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
|
sliceMd5H.Reset()
|
||||||
}
|
}
|
||||||
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
|
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||||
|
|
||||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||||
path := encodeURIComponent(rawPath)
|
path := encodeURIComponent(rawPath)
|
||||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
|
|
||||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
|
// step.1 预上传
|
||||||
path, stream.GetSize(),
|
// 尝试获取之前的进度
|
||||||
block_list_str,
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
content_md5, slice_md5)
|
if !ok {
|
||||||
params := map[string]string{
|
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s",
|
||||||
"method": "precreate",
|
path, streamSize,
|
||||||
|
blockListStr,
|
||||||
|
contentMd5, sliceMd5)
|
||||||
|
params := map[string]string{
|
||||||
|
"method": "precreate",
|
||||||
|
}
|
||||||
|
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||||
|
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
log.Debugf("%+v", precreateResp)
|
||||||
|
if precreateResp.ReturnType == 2 {
|
||||||
|
//rapid upload, since got md5 match from baidu server
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(precreateResp.File), nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
var precreateResp PrecreateResp
|
// step.2 上传分片
|
||||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
for i, partseq := range precreateResp.BlockList {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DefaultSliceSize, DefaultSliceSize
|
||||||
|
if partseq+1 == count {
|
||||||
|
byteSize = lastBlockSize
|
||||||
|
}
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
params := map[string]string{
|
||||||
|
"method": "upload",
|
||||||
|
"access_token": d.AccessToken,
|
||||||
|
"type": "tmpfile",
|
||||||
|
"path": path,
|
||||||
|
"uploadid": precreateResp.Uploadid,
|
||||||
|
"partseq": strconv.Itoa(partseq),
|
||||||
|
}
|
||||||
|
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||||
|
precreateResp.BlockList[i] = -1
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
// 如果属于用户主动取消,则保存上传进度
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.3 创建文件
|
||||||
|
var newFile File
|
||||||
|
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fileToObj(newFile), nil
|
||||||
|
}
|
||||||
|
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||||
|
res, err := base.RestyClient.R().
|
||||||
|
SetContext(ctx).
|
||||||
|
SetQueryParams(params).
|
||||||
|
SetFileReader("file", fileName, file).
|
||||||
|
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("%+v", precreateResp)
|
log.Debugln(res.RawResponse.Status + res.String())
|
||||||
if precreateResp.ReturnType == 2 {
|
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||||
return nil
|
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
|
if errCode != 0 || errNo != 0 {
|
||||||
|
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
||||||
}
|
}
|
||||||
params = map[string]string{
|
return nil
|
||||||
"method": "upload",
|
|
||||||
"access_token": d.AccessToken,
|
|
||||||
"type": "tmpfile",
|
|
||||||
"path": path,
|
|
||||||
"uploadid": precreateResp.Uploadid,
|
|
||||||
}
|
|
||||||
left = stream.GetSize()
|
|
||||||
for i, partseq := range precreateResp.BlockList {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
byteSize := Default
|
|
||||||
var byteData []byte
|
|
||||||
if left < Default {
|
|
||||||
byteSize = left
|
|
||||||
byteData = make([]byte, byteSize)
|
|
||||||
} else {
|
|
||||||
byteData = defaultByteData
|
|
||||||
}
|
|
||||||
left -= byteSize
|
|
||||||
_, err = io.ReadFull(tempFile, byteData)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
u := "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
|
|
||||||
params["partseq"] = strconv.Itoa(partseq)
|
|
||||||
res, err := base.RestyClient.R().
|
|
||||||
SetContext(ctx).
|
|
||||||
SetQueryParams(params).
|
|
||||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
|
||||||
Post(u)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debugln(res.String())
|
|
||||||
if len(precreateResp.BlockList) > 0 {
|
|
||||||
up(i * 100 / len(precreateResp.BlockList))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
|||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||||
AccessToken string
|
AccessToken string
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package baidu_netdisk
|
package baidu_netdisk
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -17,10 +18,8 @@ type File struct {
|
|||||||
//OwnerType int `json:"owner_type"`
|
//OwnerType int `json:"owner_type"`
|
||||||
//Category int `json:"category"`
|
//Category int `json:"category"`
|
||||||
//RealCategory string `json:"real_category"`
|
//RealCategory string `json:"real_category"`
|
||||||
FsId int64 `json:"fs_id"`
|
FsId int64 `json:"fs_id"`
|
||||||
ServerMtime int64 `json:"server_mtime"`
|
|
||||||
//OperId int `json:"oper_id"`
|
//OperId int `json:"oper_id"`
|
||||||
//ServerCtime int `json:"server_ctime"`
|
|
||||||
Thumbs struct {
|
Thumbs struct {
|
||||||
//Icon string `json:"icon"`
|
//Icon string `json:"icon"`
|
||||||
Url3 string `json:"url3"`
|
Url3 string `json:"url3"`
|
||||||
@ -28,25 +27,41 @@ type File struct {
|
|||||||
//Url1 string `json:"url1"`
|
//Url1 string `json:"url1"`
|
||||||
} `json:"thumbs"`
|
} `json:"thumbs"`
|
||||||
//Wpfile int `json:"wpfile"`
|
//Wpfile int `json:"wpfile"`
|
||||||
//LocalMtime int `json:"local_mtime"`
|
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
//Share int `json:"share"`
|
//Share int `json:"share"`
|
||||||
//ServerAtime int `json:"server_atime"`
|
|
||||||
//Pl int `json:"pl"`
|
//Pl int `json:"pl"`
|
||||||
//LocalCtime int `json:"local_ctime"`
|
|
||||||
ServerFilename string `json:"server_filename"`
|
ServerFilename string `json:"server_filename"`
|
||||||
//Md5 string `json:"md5"`
|
Md5 string `json:"md5"`
|
||||||
//OwnerId int `json:"owner_id"`
|
//OwnerId int `json:"owner_id"`
|
||||||
//Unlist int `json:"unlist"`
|
//Unlist int `json:"unlist"`
|
||||||
Isdir int `json:"isdir"`
|
Isdir int `json:"isdir"`
|
||||||
|
|
||||||
|
// list resp
|
||||||
|
//ServerCtime int64 `json:"server_ctime"`
|
||||||
|
ServerMtime int64 `json:"server_mtime"`
|
||||||
|
//ServerAtime int64 `json:"server_atime"`
|
||||||
|
//LocalCtime int64 `json:"local_ctime"`
|
||||||
|
//LocalMtime int64 `json:"local_mtime"`
|
||||||
|
|
||||||
|
// only create and precreate resp
|
||||||
|
Ctime int64 `json:"ctime"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
func fileToObj(f File) *model.ObjThumb {
|
||||||
|
if f.ServerFilename == "" {
|
||||||
|
f.ServerFilename = path.Base(f.Path)
|
||||||
|
}
|
||||||
|
if f.ServerMtime == 0 {
|
||||||
|
f.ServerMtime = int64(f.Mtime)
|
||||||
|
}
|
||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
ID: strconv.FormatInt(f.FsId, 10),
|
ID: strconv.FormatInt(f.FsId, 10),
|
||||||
|
Path: f.Path,
|
||||||
Name: f.ServerFilename,
|
Name: f.ServerFilename,
|
||||||
Size: f.Size,
|
Size: f.Size,
|
||||||
Modified: time.Unix(f.ServerMtime, 0),
|
Modified: time.Unix(f.ServerMtime, 0),
|
||||||
@ -154,10 +169,15 @@ type DownloadResp2 struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PrecreateResp struct {
|
type PrecreateResp struct {
|
||||||
Path string `json:"path"`
|
Errno int `json:"errno"`
|
||||||
Uploadid string `json:"uploadid"`
|
RequestId int64 `json:"request_id"`
|
||||||
ReturnType int `json:"return_type"`
|
ReturnType int `json:"return_type"`
|
||||||
BlockList []int `json:"block_list"`
|
|
||||||
Errno int `json:"errno"`
|
// return_type=1
|
||||||
RequestId int64 `json:"request_id"`
|
Path string `json:"path"`
|
||||||
|
Uploadid string `json:"uploadid"`
|
||||||
|
BlockList []int `json:"block_list"`
|
||||||
|
|
||||||
|
// return_type=2
|
||||||
|
File File `json:"info"`
|
||||||
}
|
}
|
||||||
|
@ -6,13 +6,16 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
@ -50,30 +53,45 @@ func (d *BaiduNetdisk) _refreshToken() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
req := base.RestyClient.R()
|
var result []byte
|
||||||
req.SetQueryParam("access_token", d.AccessToken)
|
err := retry.Do(func() error {
|
||||||
if callback != nil {
|
req := base.RestyClient.R()
|
||||||
callback(req)
|
req.SetQueryParam("access_token", d.AccessToken)
|
||||||
}
|
if callback != nil {
|
||||||
if resp != nil {
|
callback(req)
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
res, err := req.Execute(method, furl)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
|
||||||
if errno != 0 {
|
|
||||||
if errno == -6 {
|
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.request(furl, method, callback, resp)
|
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("errno: %d, refer to https://pan.baidu.com/union/doc/", errno)
|
if resp != nil {
|
||||||
}
|
req.SetResult(resp)
|
||||||
return res.Body(), nil
|
}
|
||||||
|
res, err := req.Execute(method, furl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Debugf("[baidu_netdisk] req: %s, resp: %s", furl, res.String())
|
||||||
|
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||||
|
if errno != 0 {
|
||||||
|
if utils.SliceContains([]int{111, -6}, errno) {
|
||||||
|
log.Info("refreshing baidu_netdisk token.")
|
||||||
|
err2 := d.refreshToken()
|
||||||
|
if err2 != nil {
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err2 := fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||||
|
if !utils.SliceContains([]int{2}, errno) {
|
||||||
|
err2 = retry.Unrecoverable(err2)
|
||||||
|
}
|
||||||
|
return err2
|
||||||
|
}
|
||||||
|
result = res.Body()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
retry.Attempts(5),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
||||||
@ -170,20 +188,17 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
|
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemanager",
|
"method": "filemanager",
|
||||||
"opera": opera,
|
"opera": opera,
|
||||||
}
|
}
|
||||||
marshal, err := utils.Json.Marshal(filelist)
|
marshal, _ := utils.Json.MarshalToString(filelist)
|
||||||
if err != nil {
|
data := fmt.Sprintf("async=0&filelist=%s&ondup=fail", marshal)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
|
|
||||||
return d.post("/xpan/file", params, data, nil)
|
return d.post("/xpan/file", params, data, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "create",
|
"method": "create",
|
||||||
}
|
}
|
||||||
@ -191,7 +206,7 @@ func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, bloc
|
|||||||
if uploadid != "" {
|
if uploadid != "" {
|
||||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||||
}
|
}
|
||||||
return d.post("/xpan/file", params, data, nil)
|
return d.post("/xpan/file", params, data, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeURIComponent(str string) string {
|
func encodeURIComponent(str string) string {
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
@ -11,11 +12,15 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -26,6 +31,8 @@ type BaiduPhoto struct {
|
|||||||
AccessToken string
|
AccessToken string
|
||||||
Uk int64
|
Uk int64
|
||||||
root model.Obj
|
root model.Obj
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Config() driver.Config {
|
func (d *BaiduPhoto) Config() driver.Config {
|
||||||
@ -37,6 +44,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
|
|
||||||
if err := d.refreshToken(); err != nil {
|
if err := d.refreshToken(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -126,7 +138,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
|||||||
case *File:
|
case *File:
|
||||||
return d.linkFile(ctx, file, args)
|
return d.linkFile(ctx, file, args)
|
||||||
case *AlbumFile:
|
case *AlbumFile:
|
||||||
return d.linkAlbum(ctx, file, args)
|
f, err := d.CopyAlbumFile(ctx, file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return d.linkFile(ctx, f, args)
|
||||||
|
// 有概率无法获取到链接
|
||||||
|
//return d.linkAlbum(ctx, file, args)
|
||||||
}
|
}
|
||||||
return nil, errs.NotFile
|
return nil, errs.NotFile
|
||||||
}
|
}
|
||||||
@ -169,9 +187,9 @@ func (d *BaiduPhoto) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
// 仅支持相册之间移动
|
|
||||||
if file, ok := srcObj.(*AlbumFile); ok {
|
if file, ok := srcObj.(*AlbumFile); ok {
|
||||||
if _, ok := dstDir.(*Album); ok {
|
switch dstDir.(type) {
|
||||||
|
case *Album, *Root: // albumfile -> root -> album or albumfile -> root
|
||||||
newObj, err := d.Copy(ctx, srcObj, dstDir)
|
newObj, err := d.Copy(ctx, srcObj, dstDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -205,8 +223,13 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// 不支持大小为0的文件
|
||||||
|
if stream.GetSize() == 0 {
|
||||||
|
return nil, fmt.Errorf("file size cannot be zero")
|
||||||
|
}
|
||||||
|
|
||||||
// 需要获取完整文件md5,必须支持 io.Seek
|
// 需要获取完整文件md5,必须支持 io.Seek
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -215,35 +238,43 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// 计算需要的数据
|
const DEFAULT int64 = 1 << 22
|
||||||
const DEFAULT = 1 << 22
|
const SliceSize int64 = 1 << 18
|
||||||
const SliceSize = 1 << 18
|
|
||||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
|
||||||
|
|
||||||
|
// 计算需要的数据
|
||||||
|
streamSize := stream.GetSize()
|
||||||
|
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
||||||
|
lastBlockSize := streamSize % DEFAULT
|
||||||
|
if lastBlockSize == 0 {
|
||||||
|
lastBlockSize = DEFAULT
|
||||||
|
}
|
||||||
|
|
||||||
|
// step.1 计算MD5
|
||||||
sliceMD5List := make([]string, 0, count)
|
sliceMD5List := make([]string, 0, count)
|
||||||
fileMd5 := md5.New()
|
byteSize := int64(DEFAULT)
|
||||||
sliceMd5 := md5.New()
|
fileMd5H := md5.New()
|
||||||
sliceMd52 := md5.New()
|
sliceMd5H := md5.New()
|
||||||
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
|
sliceMd5H2 := md5.New()
|
||||||
|
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(ctx) {
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
|
if i == count {
|
||||||
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
|
byteSize = lastBlockSize
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
}
|
||||||
|
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
|
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||||
sliceMd5.Reset()
|
sliceMd5H.Reset()
|
||||||
}
|
}
|
||||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||||
return nil, err
|
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||||
}
|
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||||
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
|
|
||||||
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
|
|
||||||
|
|
||||||
// 开始执行上传
|
// step.2 预上传
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"autoinit": "1",
|
"autoinit": "1",
|
||||||
"isdir": "0",
|
"isdir": "0",
|
||||||
@ -251,46 +282,69 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
"ctype": "11",
|
"ctype": "11",
|
||||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||||
"size": fmt.Sprint(stream.GetSize()),
|
"size": fmt.Sprint(stream.GetSize()),
|
||||||
"slice-md5": slice_md5,
|
"slice-md5": sliceMd5,
|
||||||
"content-md5": content_md5,
|
"content-md5": contentMd5,
|
||||||
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
|
"block_list": blockListStr,
|
||||||
}
|
}
|
||||||
|
|
||||||
// 预上传
|
// 尝试获取之前的进度
|
||||||
var precreateResp PrecreateResp
|
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
if !ok {
|
||||||
r.SetContext(ctx)
|
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||||
r.SetFormData(params)
|
r.SetContext(ctx)
|
||||||
}, &precreateResp)
|
r.SetFormData(params)
|
||||||
if err != nil {
|
}, &precreateResp)
|
||||||
return nil, err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch precreateResp.ReturnType {
|
switch precreateResp.ReturnType {
|
||||||
case 1: // 上传文件
|
case 1: //step.3 上传文件切片
|
||||||
uploadParams := map[string]string{
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
"method": "upload",
|
retry.Attempts(3),
|
||||||
"path": params["path"],
|
retry.Delay(time.Second),
|
||||||
"uploadid": precreateResp.UploadID,
|
retry.DelayType(retry.BackOffDelay))
|
||||||
}
|
for i, partseq := range precreateResp.BlockList {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||||
if utils.IsCanceled(ctx) {
|
if partseq+1 == count {
|
||||||
return nil, ctx.Err()
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
uploadParams["partseq"] = fmt.Sprint(i)
|
|
||||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
r.SetContext(ctx)
|
uploadParams := map[string]string{
|
||||||
r.SetQueryParams(uploadParams)
|
"method": "upload",
|
||||||
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
|
"path": params["path"],
|
||||||
}, nil)
|
"partseq": fmt.Sprint(partseq),
|
||||||
if err != nil {
|
"uploadid": precreateResp.UploadID,
|
||||||
return nil, err
|
}
|
||||||
|
|
||||||
|
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(uploadParams)
|
||||||
|
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||||
|
precreateResp.BlockList[i] = -1
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||||
|
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||||
}
|
}
|
||||||
up(i * 100 / count)
|
return nil, err
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case 2: // 创建文件
|
case 2: //step.4 创建文件
|
||||||
params["uploadid"] = precreateResp.UploadID
|
params["uploadid"] = precreateResp.UploadID
|
||||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||||
r.SetContext(ctx)
|
r.SetContext(ctx)
|
||||||
@ -300,7 +354,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case 3: // 增加到相册
|
case 3: //step.5 增加到相册
|
||||||
rootfile := precreateResp.Data.toFile()
|
rootfile := precreateResp.Data.toFile()
|
||||||
if album, ok := dstDir.(*Album); ok {
|
if album, ok := dstDir.(*Album); ok {
|
||||||
return d.AddAlbumFile(ctx, album, rootfile)
|
return d.AddAlbumFile(ctx, album, rootfile)
|
||||||
|
@ -69,3 +69,10 @@ func renameAlbum(album *Album, newName string) *Album {
|
|||||||
Mtime: time.Now().Unix(),
|
Mtime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BoolToIntStr(b bool) string {
|
||||||
|
if b {
|
||||||
|
return "1"
|
||||||
|
}
|
||||||
|
return "0"
|
||||||
|
}
|
||||||
|
@ -10,8 +10,10 @@ type Addition struct {
|
|||||||
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
||||||
AlbumID string `json:"album_id"`
|
AlbumID string `json:"album_id"`
|
||||||
//AlbumPassword string `json:"album_password"`
|
//AlbumPassword string `json:"album_password"`
|
||||||
|
DeleteOrigin bool `json:"delete_origin"`
|
||||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -160,9 +160,9 @@ type (
|
|||||||
CreateFileResp
|
CreateFileResp
|
||||||
|
|
||||||
//不存在返回
|
//不存在返回
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
UploadID string `json:"uploadid"`
|
UploadID string `json:"uploadid"`
|
||||||
Blocklist []int64 `json:"block_list"`
|
BlockList []int `json:"block_list"`
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ const (
|
|||||||
FILE_API_URL_V2 = API_URL + "/file/v2"
|
FILE_API_URL_V2 = API_URL + "/file/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
req := base.RestyClient.R().
|
req := base.RestyClient.R().
|
||||||
SetQueryParam("access_token", d.AccessToken)
|
SetQueryParam("access_token", d.AccessToken)
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
@ -52,9 +52,17 @@ func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallba
|
|||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
// res, err := d.request(furl, method, callback, resp)
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// return res.Body(), nil
|
||||||
|
//}
|
||||||
|
|
||||||
func (d *BaiduPhoto) refreshToken() error {
|
func (d *BaiduPhoto) refreshToken() error {
|
||||||
u := "https://openapi.baidu.com/oauth/2.0/token"
|
u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||||
var resp base.TokenResp
|
var resp base.TokenResp
|
||||||
@ -79,11 +87,11 @@ func (d *BaiduPhoto) refreshToken() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(furl, http.MethodGet, callback, resp)
|
return d.Request(furl, http.MethodGet, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||||
return d.Request(furl, http.MethodPost, callback, resp)
|
return d.Request(furl, http.MethodPost, callback, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,7 +231,7 @@ func (d *BaiduPhoto) DeleteAlbum(ctx context.Context, album *Album) error {
|
|||||||
r.SetFormData(map[string]string{
|
r.SetFormData(map[string]string{
|
||||||
"album_id": album.AlbumID,
|
"album_id": album.AlbumID,
|
||||||
"tid": fmt.Sprint(album.Tid),
|
"tid": fmt.Sprint(album.Tid),
|
||||||
"delete_origin_image": "0", // 是否删除原图 0 不删除 1 删除
|
"delete_origin_image": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -237,7 +245,7 @@ func (d *BaiduPhoto) DeleteAlbumFile(ctx context.Context, file *AlbumFile) error
|
|||||||
"album_id": fmt.Sprint(file.AlbumID),
|
"album_id": fmt.Sprint(file.AlbumID),
|
||||||
"tid": fmt.Sprint(file.Tid),
|
"tid": fmt.Sprint(file.Tid),
|
||||||
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
|
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
|
||||||
"del_origin": "0", // 是否删除原图 0 不删除 1 删除
|
"del_origin": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||||
})
|
})
|
||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
@ -391,6 +399,49 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
|
|||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*func (d *BaiduPhoto) linkStreamAlbum(ctx context.Context, file *AlbumFile) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
Header: http.Header{},
|
||||||
|
Writer: func(w io.Writer) error {
|
||||||
|
res, err := d.Get(ALBUM_API_URL+"/streaming", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
|
"album_id": file.AlbumID,
|
||||||
|
"tid": fmt.Sprint(file.Tid),
|
||||||
|
"uk": fmt.Sprint(file.Uk),
|
||||||
|
}).SetDoNotParseResponse(true)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.RawBody().Close()
|
||||||
|
_, err = io.Copy(w, res.RawBody())
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}*/
|
||||||
|
|
||||||
|
/*func (d *BaiduPhoto) linkStream(ctx context.Context, file *File) (*model.Link, error) {
|
||||||
|
return &model.Link{
|
||||||
|
Header: http.Header{},
|
||||||
|
Writer: func(w io.Writer) error {
|
||||||
|
res, err := d.Get(FILE_API_URL_V1+"/streaming", func(r *resty.Request) {
|
||||||
|
r.SetContext(ctx)
|
||||||
|
r.SetQueryParams(map[string]string{
|
||||||
|
"fsid": fmt.Sprint(file.Fsid),
|
||||||
|
}).SetDoNotParseResponse(true)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.RawBody().Close()
|
||||||
|
_, err = io.Copy(w, res.RawBody())
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}*/
|
||||||
|
|
||||||
// 获取uk
|
// 获取uk
|
||||||
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
||||||
var info UInfo
|
var info UInfo
|
||||||
|
31
drivers/base/upload.go
Normal file
31
drivers/base/upload.go
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
package base
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Xhofe/go-cache"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// storage upload progress, for upload recovery
|
||||||
|
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
|
||||||
|
|
||||||
|
// Save upload progress for 20 minutes
|
||||||
|
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
|
||||||
|
return UploadStateCache.Set(
|
||||||
|
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
|
||||||
|
state,
|
||||||
|
cache.WithEx[any](time.Minute*20))
|
||||||
|
}
|
||||||
|
|
||||||
|
// An upload progress can only be made by one process alone,
|
||||||
|
// so here you need to get it and then delete it.
|
||||||
|
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
|
||||||
|
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
|
||||||
|
if ok {
|
||||||
|
state, ok = v.(T)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
@ -1,30 +1 @@
|
|||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
func HandleRange(link *model.Link, file io.ReadSeekCloser, header http.Header, size int64) {
|
|
||||||
if header.Get("Range") != "" {
|
|
||||||
r, err := http_range.ParseRange(header.Get("Range"), size)
|
|
||||||
if err == nil && len(r) > 0 {
|
|
||||||
_, err := file.Seek(r[0].Start, io.SeekStart)
|
|
||||||
if err == nil {
|
|
||||||
link.Data = utils.NewLimitReadCloser(file, func() error {
|
|
||||||
return file.Close()
|
|
||||||
}, r[0].Length)
|
|
||||||
link.Status = http.StatusPartialContent
|
|
||||||
link.Header = http.Header{
|
|
||||||
"Content-Range": []string{r[0].ContentRange(size)},
|
|
||||||
"Content-Length": []string{strconv.FormatInt(r[0].Length, 10)},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
|||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Cookie string `json:"cookie"`
|
Cookie string `json:"cookie"`
|
||||||
|
CustomUA string `json:"custom_ua"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -22,15 +22,18 @@ const loginPath = "/user/session"
|
|||||||
|
|
||||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||||
u := d.Address + "/api/v3" + path
|
u := d.Address + "/api/v3" + path
|
||||||
|
ua := d.CustomUA
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
"User-Agent": ua,
|
||||||
})
|
})
|
||||||
|
|
||||||
var r Resp
|
var r Resp
|
||||||
|
|
||||||
req.SetResult(&r)
|
req.SetResult(&r)
|
||||||
|
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
|
411
drivers/crypt/driver.go
Normal file
411
drivers/crypt/driver.go
Normal file
@ -0,0 +1,411 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
stdpath "path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
rcCrypt "github.com/rclone/rclone/backend/crypt"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Crypt struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
cipher *rcCrypt.Cipher
|
||||||
|
remoteStorage driver.Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
const obfuscatedPrefix = "___Obfuscated___"
|
||||||
|
|
||||||
|
func (d *Crypt) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Init(ctx context.Context) error {
|
||||||
|
//obfuscate credentials if it's updated or just created
|
||||||
|
err := d.updateObfusParm(&d.Password)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to obfuscate password: %w", err)
|
||||||
|
}
|
||||||
|
err = d.updateObfusParm(&d.Salt)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to obfuscate salt: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
isCryptExt := regexp.MustCompile(`^[.][A-Za-z0-9-_]{2,}$`).MatchString
|
||||||
|
if !isCryptExt(d.EncryptedSuffix) {
|
||||||
|
return fmt.Errorf("EncryptedSuffix is Illegal")
|
||||||
|
}
|
||||||
|
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
|
||||||
|
//need remote storage exist
|
||||||
|
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't find remote storage: %w", err)
|
||||||
|
}
|
||||||
|
d.remoteStorage = storage
|
||||||
|
|
||||||
|
p, _ := strings.CutPrefix(d.Password, obfuscatedPrefix)
|
||||||
|
p2, _ := strings.CutPrefix(d.Salt, obfuscatedPrefix)
|
||||||
|
config := configmap.Simple{
|
||||||
|
"password": p,
|
||||||
|
"password2": p2,
|
||||||
|
"filename_encryption": d.FileNameEnc,
|
||||||
|
"directory_name_encryption": d.DirNameEnc,
|
||||||
|
"filename_encoding": "base64",
|
||||||
|
"suffix": d.EncryptedSuffix,
|
||||||
|
"pass_bad_blocks": "",
|
||||||
|
}
|
||||||
|
c, err := rcCrypt.NewCipher(config)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create Cipher: %w", err)
|
||||||
|
}
|
||||||
|
d.cipher = c
|
||||||
|
|
||||||
|
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) updateObfusParm(str *string) error {
|
||||||
|
temp := *str
|
||||||
|
if !strings.HasPrefix(temp, obfuscatedPrefix) {
|
||||||
|
temp, err := obscure.Obscure(temp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
temp = obfuscatedPrefix + temp
|
||||||
|
*str = temp
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
path := dir.GetPath()
|
||||||
|
//return d.list(ctx, d.RemotePath, path)
|
||||||
|
//remoteFull
|
||||||
|
|
||||||
|
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
|
||||||
|
// the obj must implement the model.SetPath interface
|
||||||
|
// return objs, err
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result []model.Obj
|
||||||
|
for _, obj := range objs {
|
||||||
|
if obj.IsDir() {
|
||||||
|
name, err := d.cipher.DecryptDirName(obj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: 0,
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
}
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
thumb, ok := model.GetThumb(obj)
|
||||||
|
size, err := d.cipher.DecryptedSize(obj.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, err := d.cipher.DecryptFileName(obj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
//filter illegal files
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objRes := model.Object{
|
||||||
|
Name: name,
|
||||||
|
Size: size,
|
||||||
|
Modified: obj.ModTime(),
|
||||||
|
IsFolder: obj.IsDir(),
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
result = append(result, &objRes)
|
||||||
|
} else {
|
||||||
|
objWithThumb := model.ObjThumb{
|
||||||
|
Object: objRes,
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
result = append(result, &objWithThumb)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
if utils.PathEqual(path, "/") {
|
||||||
|
return &model.Object{
|
||||||
|
Name: "Root",
|
||||||
|
IsFolder: true,
|
||||||
|
Path: "/",
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
remoteFullPath := ""
|
||||||
|
var remoteObj model.Obj
|
||||||
|
var err, err2 error
|
||||||
|
firstTryIsFolder, secondTry := guessPath(path)
|
||||||
|
remoteFullPath = d.getPathForRemote(path, firstTryIsFolder)
|
||||||
|
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err != nil {
|
||||||
|
if errs.IsObjectNotFound(err) && secondTry {
|
||||||
|
//try the opposite
|
||||||
|
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
|
||||||
|
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err2 != nil {
|
||||||
|
return nil, err2
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var size int64 = 0
|
||||||
|
name := ""
|
||||||
|
if !remoteObj.IsDir() {
|
||||||
|
size, err = d.cipher.DecryptedSize(remoteObj.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptedSize failed for %s ,will use original size, err:%s", path, err)
|
||||||
|
size = remoteObj.GetSize()
|
||||||
|
}
|
||||||
|
name, err = d.cipher.DecryptFileName(remoteObj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptFileName failed for %s ,will use original name, err:%s", path, err)
|
||||||
|
name = remoteObj.GetName()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
name, err = d.cipher.DecryptDirName(remoteObj.GetName())
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("DecryptDirName failed for %s ,will use original name, err:%s", path, err)
|
||||||
|
name = remoteObj.GetName()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj := &model.Object{
|
||||||
|
Path: path,
|
||||||
|
Name: name,
|
||||||
|
Size: size,
|
||||||
|
Modified: remoteObj.ModTime(),
|
||||||
|
IsFolder: remoteObj.IsDir(),
|
||||||
|
}
|
||||||
|
return obj, nil
|
||||||
|
//return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
remoteLink, remoteFile, err := op.Link(ctx, d.remoteStorage, dstDirActualPath, args)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 {
|
||||||
|
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||||
|
}
|
||||||
|
remoteFileSize := remoteFile.GetSize()
|
||||||
|
remoteClosers := utils.NewClosers()
|
||||||
|
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||||
|
length := underlyingLength
|
||||||
|
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||||
|
length = -1
|
||||||
|
}
|
||||||
|
if remoteLink.RangeReadCloser.RangeReader != nil {
|
||||||
|
//remoteRangeReader, err :=
|
||||||
|
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length})
|
||||||
|
remoteClosers.Add(remoteLink.RangeReadCloser.Closers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return remoteReader, nil
|
||||||
|
}
|
||||||
|
if remoteLink.ReadSeekCloser != nil {
|
||||||
|
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//remoteClosers.Add(remoteLink.ReadSeekCloser)
|
||||||
|
//keep reuse same ReadSeekCloser and close at last.
|
||||||
|
return io.NopCloser(remoteLink.ReadSeekCloser), nil
|
||||||
|
}
|
||||||
|
if len(remoteLink.URL) > 0 {
|
||||||
|
rangedRemoteLink := &model.Link{
|
||||||
|
URL: remoteLink.URL,
|
||||||
|
Header: remoteLink.Header,
|
||||||
|
}
|
||||||
|
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
|
||||||
|
//remoteClosers.Add(response.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
|
||||||
|
}
|
||||||
|
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
|
||||||
|
return response.Body, nil
|
||||||
|
} else if response.StatusCode == http.StatusOK {
|
||||||
|
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||||
|
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readCloser, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return response.Body, nil
|
||||||
|
}
|
||||||
|
//if remoteLink.Data != nil {
|
||||||
|
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
|
||||||
|
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
|
||||||
|
// remoteCloser = remoteLink.Data
|
||||||
|
// if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
// }
|
||||||
|
// return readCloser, nil
|
||||||
|
//}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
|
||||||
|
}
|
||||||
|
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
|
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return readSeeker, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||||
|
resultLink := &model.Link{
|
||||||
|
Header: remoteLink.Header,
|
||||||
|
RangeReadCloser: *resultRangeReadCloser,
|
||||||
|
Expiration: remoteLink.Expiration,
|
||||||
|
}
|
||||||
|
|
||||||
|
return resultLink, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(parentDir.GetPath(), true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dir := d.cipher.EncryptDirName(dirName)
|
||||||
|
return op.MakeDir(ctx, d.remoteStorage, stdpath.Join(dstDirActualPath, dir))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Move(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
remoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
var newEncryptedName string
|
||||||
|
if srcObj.IsDir() {
|
||||||
|
newEncryptedName = d.cipher.EncryptDirName(newName)
|
||||||
|
} else {
|
||||||
|
newEncryptedName = d.cipher.EncryptFileName(newName)
|
||||||
|
}
|
||||||
|
return op.Rename(ctx, d.remoteStorage, remoteActualPath, newEncryptedName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
remoteActualPath, err := d.getActualPathForRemote(obj.GetPath(), obj.IsDir())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
in := stream.GetReadCloser()
|
||||||
|
// Encrypt the data into wrappedIn
|
||||||
|
wrappedIn, err := d.cipher.EncryptData(in)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
streamOut := &model.FileStream{
|
||||||
|
Obj: &model.Object{
|
||||||
|
ID: stream.GetID(),
|
||||||
|
Path: stream.GetPath(),
|
||||||
|
Name: d.cipher.EncryptFileName(stream.GetName()),
|
||||||
|
Size: d.cipher.EncryptedSize(stream.GetSize()),
|
||||||
|
Modified: stream.ModTime(),
|
||||||
|
IsFolder: stream.IsDir(),
|
||||||
|
},
|
||||||
|
ReadCloser: io.NopCloser(wrappedIn),
|
||||||
|
Mimetype: "application/octet-stream",
|
||||||
|
WebPutAsTask: stream.NeedStore(),
|
||||||
|
Old: stream.GetOld(),
|
||||||
|
}
|
||||||
|
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Crypt)(nil)
|
47
drivers/crypt/meta.go
Normal file
47
drivers/crypt/meta.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
//driver.RootPath
|
||||||
|
//driver.RootID
|
||||||
|
// define other
|
||||||
|
|
||||||
|
FileNameEnc string `json:"filename_encryption" type:"select" required:"true" options:"off,standard,obfuscate" default:"off"`
|
||||||
|
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
|
||||||
|
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
|
||||||
|
|
||||||
|
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||||
|
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password'. Optional but recommended"`
|
||||||
|
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"encrypted files will have this suffix"`
|
||||||
|
}
|
||||||
|
|
||||||
|
/*// inMemory contains decrypted confidential info and other temp data. will not persist these info anywhere
|
||||||
|
type inMemory struct {
|
||||||
|
password string
|
||||||
|
salt string
|
||||||
|
}*/
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Crypt",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: true,
|
||||||
|
NoCache: true,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "/",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Crypt{}
|
||||||
|
})
|
||||||
|
}
|
1
drivers/crypt/types.go
Normal file
1
drivers/crypt/types.go
Normal file
@ -0,0 +1 @@
|
|||||||
|
package crypt
|
55
drivers/crypt/util.go
Normal file
55
drivers/crypt/util.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package crypt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
stdpath "path"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
)
|
||||||
|
|
||||||
|
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||||
|
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||||
|
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||||
|
|
||||||
|
return net.RequestHttp("GET", header, link.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
// will give the best guessing based on the path
|
||||||
|
func guessPath(path string) (isFolder, secondTry bool) {
|
||||||
|
if strings.HasSuffix(path, "/") {
|
||||||
|
//confirmed a folder
|
||||||
|
return true, false
|
||||||
|
}
|
||||||
|
lastSlash := strings.LastIndex(path, "/")
|
||||||
|
if strings.Index(path[lastSlash:], ".") < 0 {
|
||||||
|
//no dot, try folder then try file
|
||||||
|
return true, true
|
||||||
|
}
|
||||||
|
return false, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Crypt) getPathForRemote(path string, isFolder bool) (remoteFullPath string) {
|
||||||
|
if isFolder && !strings.HasSuffix(path, "/") {
|
||||||
|
path = path + "/"
|
||||||
|
}
|
||||||
|
dir, fileName := filepath.Split(path)
|
||||||
|
|
||||||
|
remoteDir := d.cipher.EncryptDirName(dir)
|
||||||
|
remoteFileName := ""
|
||||||
|
if len(strings.TrimSpace(fileName)) > 0 {
|
||||||
|
remoteFileName = d.cipher.EncryptFileName(fileName)
|
||||||
|
}
|
||||||
|
return stdpath.Join(d.RemotePath, remoteDir, remoteFileName)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// actual path is used for internal only. any link for user should come from remoteFullPath
|
||||||
|
func (d *Crypt) getActualPathForRemote(path string, isFolder bool) (string, error) {
|
||||||
|
_, remoteActualPath, err := op.GetStorageAndActualPath(d.getPathForRemote(path, isFolder))
|
||||||
|
return remoteActualPath, err
|
||||||
|
}
|
@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -67,9 +66,8 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||||||
|
|
||||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
r := NewFTPFileReader(d.conn, file.GetPath())
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
Data: r,
|
ReadSeekCloser: r,
|
||||||
}
|
}
|
||||||
base.HandleRange(link, r, args.Header, file.GetSize())
|
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,9 +2,7 @@ package lanzou
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -19,6 +17,8 @@ type LanZou struct {
|
|||||||
model.Storage
|
model.Storage
|
||||||
uid string
|
uid string
|
||||||
vei string
|
vei string
|
||||||
|
|
||||||
|
flag int32
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Config() driver.Config {
|
func (d *LanZou) Config() driver.Config {
|
||||||
@ -30,16 +30,18 @@ func (d *LanZou) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Init(ctx context.Context) (err error) {
|
func (d *LanZou) Init(ctx context.Context) (err error) {
|
||||||
if d.IsCookie() {
|
switch d.Type {
|
||||||
|
case "account":
|
||||||
|
_, err := d.Login()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case "cookie":
|
||||||
if d.RootFolderID == "" {
|
if d.RootFolderID == "" {
|
||||||
d.RootFolderID = "-1"
|
d.RootFolderID = "-1"
|
||||||
}
|
}
|
||||||
ylogin := regexp.MustCompile("ylogin=(.*?);").FindStringSubmatch(d.Cookie)
|
d.vei, d.uid, err = d.getVeiAndUid()
|
||||||
if len(ylogin) < 2 {
|
|
||||||
return fmt.Errorf("cookie does not contain ylogin")
|
|
||||||
}
|
|
||||||
d.uid = ylogin[1]
|
|
||||||
d.vei, err = d.getVei()
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -51,7 +53,7 @@ func (d *LanZou) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
// 获取的大小和时间不准确
|
// 获取的大小和时间不准确
|
||||||
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
return d.GetAllFiles(dir.GetID())
|
return d.GetAllFiles(dir.GetID())
|
||||||
} else {
|
} else {
|
||||||
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
|
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
|
||||||
@ -119,7 +121,7 @@ func (d *LanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
data, err := d.doupload(func(req *resty.Request) {
|
data, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
@ -137,11 +139,11 @@ func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
|||||||
FolID: utils.Json.Get(data, "text").ToString(),
|
FolID: utils.Json.Get(data, "text").ToString(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
if !srcObj.IsDir() {
|
if !srcObj.IsDir() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -157,11 +159,11 @@ func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
|||||||
return srcObj, nil
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
if !srcObj.IsDir() {
|
if !srcObj.IsDir() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
@ -179,11 +181,11 @@ func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (
|
|||||||
return srcObj, nil
|
return srcObj, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
_, err := d.doupload(func(req *resty.Request) {
|
_, err := d.doupload(func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
if obj.IsDir() {
|
if obj.IsDir() {
|
||||||
@ -200,13 +202,13 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}, nil)
|
}, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return errs.NotImplement
|
return errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
if d.IsCookie() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
var resp RespText[[]FileOrFolder]
|
var resp RespText[[]FileOrFolder]
|
||||||
_, err := d._post(d.BaseUrl+"/fileup.php", func(req *resty.Request) {
|
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||||
req.SetFormData(map[string]string{
|
req.SetFormData(map[string]string{
|
||||||
"task": "1",
|
"task": "1",
|
||||||
"vie": "2",
|
"vie": "2",
|
||||||
@ -221,5 +223,5 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
return &resp.Text[0], nil
|
return &resp.Text[0], nil
|
||||||
}
|
}
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package lanzou
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -124,6 +125,83 @@ func findJSVarFunc(key, data string) string {
|
|||||||
return values[1]
|
return values[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
|
||||||
|
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
|
||||||
|
|
||||||
|
// 查找所有方法位置
|
||||||
|
func findJSFunctionIndex(data string, all bool) [][2]int {
|
||||||
|
findFunction := findFunction
|
||||||
|
if all {
|
||||||
|
findFunction = findFunctionAll
|
||||||
|
}
|
||||||
|
|
||||||
|
indexs := findFunction.FindAllStringIndex(data, -1)
|
||||||
|
fIndexs := make([][2]int, 0, len(indexs))
|
||||||
|
|
||||||
|
for _, index := range indexs {
|
||||||
|
if len(index) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
count, data := 0, data[index[1]:]
|
||||||
|
for ii, v := range data {
|
||||||
|
if v == ' ' && count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if v == '{' {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
if v == '}' {
|
||||||
|
count--
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fIndexs
|
||||||
|
}
|
||||||
|
|
||||||
|
// 删除JS全局方法
|
||||||
|
func removeJSGlobalFunction(html string) string {
|
||||||
|
indexs := findJSFunctionIndex(html, false)
|
||||||
|
block := make([]string, len(indexs))
|
||||||
|
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
|
||||||
|
index := indexs[i]
|
||||||
|
block[i] = html[index[1]:next]
|
||||||
|
next = index[0]
|
||||||
|
}
|
||||||
|
return strings.Join(block, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// 根据名称获取方法
|
||||||
|
func getJSFunctionByName(html string, name string) (string, error) {
|
||||||
|
indexs := findJSFunctionIndex(html, true)
|
||||||
|
for _, index := range indexs {
|
||||||
|
data := html[index[0]:index[1]]
|
||||||
|
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("not find %s function", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 解析html中的JSON,选择最长的数据
|
||||||
|
func htmlJsonToMap2(html string) (map[string]string, error) {
|
||||||
|
datas := findDataReg.FindAllStringSubmatch(html, -1)
|
||||||
|
var sData string
|
||||||
|
for _, data := range datas {
|
||||||
|
if len(datas) > 0 && len(data[1]) > len(sData) {
|
||||||
|
sData = data[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if sData == "" {
|
||||||
|
return nil, fmt.Errorf("not find data")
|
||||||
|
}
|
||||||
|
return jsonToMap(sData, html), nil
|
||||||
|
}
|
||||||
|
|
||||||
// 解析html中的JSON
|
// 解析html中的JSON
|
||||||
func htmlJsonToMap(html string) (map[string]string, error) {
|
func htmlJsonToMap(html string) (map[string]string, error) {
|
||||||
datas := findDataReg.FindStringSubmatch(html)
|
datas := findDataReg.FindStringSubmatch(html)
|
||||||
@ -190,3 +268,14 @@ func GetExpirationTime(url string) (etime time.Duration) {
|
|||||||
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
|
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CookieToString(cookies []*http.Cookie) string {
|
||||||
|
if cookies == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
cookieStrings := make([]string, len(cookies))
|
||||||
|
for i, cookie := range cookies {
|
||||||
|
cookieStrings[i] = cookie.Name + "=" + cookie.Value
|
||||||
|
}
|
||||||
|
return strings.Join(cookieStrings, ";")
|
||||||
|
}
|
||||||
|
@ -6,8 +6,13 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
Type string `json:"type" type:"select" options:"cookie,url" default:"cookie"`
|
Type string `json:"type" type:"select" options:"account,cookie,url" default:"cookie"`
|
||||||
Cookie string `json:"cookie" required:"true" help:"about 15 days valid, ignore if shareUrl is used"`
|
|
||||||
|
Account string `json:"account"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
|
||||||
|
Cookie string `json:"cookie" help:"about 15 days valid, ignore if shareUrl is used"`
|
||||||
|
|
||||||
driver.RootID
|
driver.RootID
|
||||||
SharePassword string `json:"share_password"`
|
SharePassword string `json:"share_password"`
|
||||||
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
|
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
|
||||||
@ -19,6 +24,10 @@ func (a *Addition) IsCookie() bool {
|
|||||||
return a.Type == "cookie"
|
return a.Type == "cookie"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Addition) IsAccount() bool {
|
||||||
|
return a.Type == "account"
|
||||||
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "Lanzou",
|
Name: "Lanzou",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
|
|
||||||
var ErrFileShareCancel = errors.New("file sharing cancellation")
|
var ErrFileShareCancel = errors.New("file sharing cancellation")
|
||||||
var ErrFileNotExist = errors.New("file does not exist")
|
var ErrFileNotExist = errors.New("file does not exist")
|
||||||
|
var ErrCookieExpiration = errors.New("cookie expiration")
|
||||||
|
|
||||||
type RespText[T any] struct {
|
type RespText[T any] struct {
|
||||||
Text T `json:"text"`
|
Text T `json:"text"`
|
||||||
|
@ -5,13 +5,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -37,7 +40,24 @@ func (d *LanZou) get(url string, callback base.ReqCallback) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
return d._post(url, callback, resp, false)
|
data, err := d._post(url, callback, resp, false)
|
||||||
|
if err == ErrCookieExpiration && d.IsAccount() {
|
||||||
|
if atomic.CompareAndSwapInt32(&d.flag, 0, 1) {
|
||||||
|
_, err2 := d.Login()
|
||||||
|
atomic.SwapInt32(&d.flag, 0)
|
||||||
|
if err2 != nil {
|
||||||
|
err = errors.Join(err, err2)
|
||||||
|
d.Status = err.Error()
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for atomic.LoadInt32(&d.flag) != 0 {
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
return d._post(url, callback, resp, false)
|
||||||
|
}
|
||||||
|
return data, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
|
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
|
||||||
@ -49,10 +69,12 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
callback(req)
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
}, up)
|
}, up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return data, err
|
||||||
}
|
}
|
||||||
switch utils.Json.Get(data, "zt").ToInt() {
|
switch utils.Json.Get(data, "zt").ToInt() {
|
||||||
case 1, 2, 4:
|
case 1, 2, 4:
|
||||||
@ -61,12 +83,14 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
|||||||
utils.Json.Unmarshal(data, resp)
|
utils.Json.Unmarshal(data, resp)
|
||||||
}
|
}
|
||||||
return data, nil
|
return data, nil
|
||||||
|
case 9: // 登录过期
|
||||||
|
return data, ErrCookieExpiration
|
||||||
default:
|
default:
|
||||||
info := utils.Json.Get(data, "inf").ToString()
|
info := utils.Json.Get(data, "inf").ToString()
|
||||||
if info == "" {
|
if info == "" {
|
||||||
info = utils.Json.Get(data, "info").ToString()
|
info = utils.Json.Get(data, "info").ToString()
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf(info)
|
return data, fmt.Errorf(info)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,6 +125,28 @@ func (d *LanZou) request(url string, method string, callback base.ReqCallback, u
|
|||||||
return res.Body(), err
|
return res.Body(), err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *LanZou) Login() ([]*http.Cookie, error) {
|
||||||
|
resp, err := base.NewRestyClient().SetRedirectPolicy(resty.NoRedirectPolicy()).
|
||||||
|
R().SetFormData(map[string]string{
|
||||||
|
"task": "3",
|
||||||
|
"uid": d.Account,
|
||||||
|
"pwd": d.Password,
|
||||||
|
"setSessionId": "",
|
||||||
|
"setSig": "",
|
||||||
|
"setScene": "",
|
||||||
|
"setTocen": "",
|
||||||
|
"formhash": "",
|
||||||
|
}).Post("https://up.woozooo.com/mlogin.php")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if utils.Json.Get(resp.Body(), "zt").ToInt() != 1 {
|
||||||
|
return nil, fmt.Errorf("login err: %s", resp.Body())
|
||||||
|
}
|
||||||
|
d.Cookie = CookieToString(resp.Cookies())
|
||||||
|
return resp.Cookies(), nil
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
通过cookie获取数据
|
通过cookie获取数据
|
||||||
*/
|
*/
|
||||||
@ -300,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
|||||||
|
|
||||||
// 需要密码
|
// 需要密码
|
||||||
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
||||||
param, err := htmlFormToMap(sharePageData)
|
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
param, err := htmlJsonToMap(sharePageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -324,8 +374,7 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
nextPageData := RemoveNotes(string(data))
|
nextPageData := removeJSGlobalFunction(RemoveNotes(string(data)))
|
||||||
|
|
||||||
param, err = htmlJsonToMap(nextPageData)
|
param, err = htmlJsonToMap(nextPageData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -451,21 +500,32 @@ func (d *LanZou) getFileRealInfo(downURL string) (*int64, *time.Time) {
|
|||||||
return &size, &time
|
return &size, &time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) getVei() (string, error) {
|
func (d *LanZou) getVeiAndUid() (vei string, uid string, err error) {
|
||||||
resp, err := d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
var resp []byte
|
||||||
|
resp, err = d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
"item": "files",
|
"item": "files",
|
||||||
"action": "index",
|
"action": "index",
|
||||||
"u": d.uid,
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return
|
||||||
}
|
}
|
||||||
|
// uid
|
||||||
|
uids := regexp.MustCompile(`uid=([^'"&;]+)`).FindStringSubmatch(string(resp))
|
||||||
|
if len(uids) < 2 {
|
||||||
|
err = fmt.Errorf("uid variable not find")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uid = uids[1]
|
||||||
|
|
||||||
|
// vei
|
||||||
html := RemoveNotes(string(resp))
|
html := RemoveNotes(string(resp))
|
||||||
data, err := htmlJsonToMap(html)
|
data, err := htmlJsonToMap(html)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return
|
||||||
}
|
}
|
||||||
return data["vei"], nil
|
vei = data["vei"]
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,12 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
@ -80,36 +82,54 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
|||||||
if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") {
|
if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
thumb := ""
|
file := d.FileInfoToObj(f, args.ReqPath, fullPath)
|
||||||
if d.Thumbnail {
|
files = append(files, file)
|
||||||
typeName := utils.GetFileType(f.Name())
|
|
||||||
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
|
||||||
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, f.Name())
|
|
||||||
thumb = utils.EncodePath(thumb, true)
|
|
||||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(args.ReqPath, f.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
|
||||||
var size int64
|
|
||||||
if !isFolder {
|
|
||||||
size = f.Size()
|
|
||||||
}
|
|
||||||
file := model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
Path: filepath.Join(dir.GetPath(), f.Name()),
|
|
||||||
Name: f.Name(),
|
|
||||||
Modified: f.ModTime(),
|
|
||||||
Size: size,
|
|
||||||
IsFolder: isFolder,
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{
|
|
||||||
Thumbnail: thumb,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
files = append(files, &file)
|
|
||||||
}
|
}
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||||
|
thumb := ""
|
||||||
|
if d.Thumbnail {
|
||||||
|
typeName := utils.GetFileType(f.Name())
|
||||||
|
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
||||||
|
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", reqPath, f.Name())
|
||||||
|
thumb = utils.EncodePath(thumb, true)
|
||||||
|
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||||
|
var size int64
|
||||||
|
if !isFolder {
|
||||||
|
size = f.Size()
|
||||||
|
}
|
||||||
|
file := model.ObjThumb{
|
||||||
|
Object: model.Object{
|
||||||
|
Path: filepath.Join(fullPath, f.Name()),
|
||||||
|
Name: f.Name(),
|
||||||
|
Modified: f.ModTime(),
|
||||||
|
Size: size,
|
||||||
|
IsFolder: isFolder,
|
||||||
|
},
|
||||||
|
Thumbnail: model.Thumbnail{
|
||||||
|
Thumbnail: thumb,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &file
|
||||||
|
|
||||||
|
}
|
||||||
|
func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) {
|
||||||
|
f, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
file := d.FileInfoToObj(f, path, path)
|
||||||
|
//h := "123123"
|
||||||
|
//if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") {
|
||||||
|
// s.SetHash(h,"SHA1")
|
||||||
|
//}
|
||||||
|
return file, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||||
path = filepath.Join(d.GetRootPath(), path)
|
path = filepath.Join(d.GetRootPath(), path)
|
||||||
@ -147,13 +167,29 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
"Content-Type": []string{"image/png"},
|
"Content-Type": []string{"image/png"},
|
||||||
}
|
}
|
||||||
if thumbPath != nil {
|
if thumbPath != nil {
|
||||||
link.FilePath = thumbPath
|
open, err := os.Open(*thumbPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
link.ReadSeekCloser = open
|
||||||
} else {
|
} else {
|
||||||
link.Data = io.NopCloser(buf)
|
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes()))
|
||||||
link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
link.FilePath = &fullPath
|
open, err := os.Open(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
link.ReadSeekCloser = struct {
|
||||||
|
io.Reader
|
||||||
|
io.Seeker
|
||||||
|
io.Closer
|
||||||
|
}{
|
||||||
|
Reader: open,
|
||||||
|
Seeker: open,
|
||||||
|
Closer: open,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return &link, nil
|
return &link, nil
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func readDir(dirname string) ([]fs.FileInfo, error) {
|
|||||||
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||||
fullPath := file.GetPath()
|
fullPath := file.GetPath()
|
||||||
thumbPrefix := "alist_thumb_"
|
thumbPrefix := "alist_thumb_"
|
||||||
thumbName := thumbPrefix + utils.GetMD5Encode(fullPath) + ".png"
|
thumbName := thumbPrefix + utils.GetMD5EncodeStr(fullPath) + ".png"
|
||||||
if d.ThumbCacheFolder != "" {
|
if d.ThumbCacheFolder != "" {
|
||||||
// skip if the file is a thumbnail
|
// skip if the file is a thumbnail
|
||||||
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
||||||
@ -91,7 +91,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
|||||||
srcBuf = imgBuf
|
srcBuf = imgBuf
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := imaging.Decode(srcBuf)
|
image, err := imaging.Decode(srcBuf, imaging.AutoOrientation(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
@ -64,51 +67,41 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
|||||||
|
|
||||||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
if node, ok := file.(*MegaNode); ok {
|
if node, ok := file.(*MegaNode); ok {
|
||||||
//link, err := d.c.Link(node.Node, true)
|
|
||||||
|
//down, err := d.c.NewDownload(node.Node)
|
||||||
//if err != nil {
|
//if err != nil {
|
||||||
// return nil, err
|
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||||
//}
|
//}
|
||||||
//return &model.Link{URL: link}, nil
|
|
||||||
down, err := d.c.NewDownload(node.Node)
|
size := file.GetSize()
|
||||||
if err != nil {
|
var finalClosers utils.Closers
|
||||||
return nil, err
|
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
}
|
length := httpRange.Length
|
||||||
//u := down.GetResourceUrl()
|
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||||
//u = strings.Replace(u, "http", "https", 1)
|
length = -1
|
||||||
//return &model.Link{URL: u}, nil
|
|
||||||
r, w := io.Pipe()
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
_ = recover()
|
|
||||||
}()
|
|
||||||
log.Debugf("chunk size: %d", down.Chunks())
|
|
||||||
var (
|
|
||||||
chunk []byte
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
for id := 0; id < down.Chunks(); id++ {
|
|
||||||
chunk, err = down.DownloadChunk(id)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("mega down: %+v", err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
log.Debugf("id: %d,len: %d", id, len(chunk))
|
|
||||||
//_, _, err = down.ChunkLocation(id)
|
|
||||||
//if err != nil {
|
|
||||||
// log.Errorf("mega down: %+v", err)
|
|
||||||
// return
|
|
||||||
//}
|
|
||||||
//_, err = c.Write(chunk)
|
|
||||||
if _, err = w.Write(chunk); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
err = w.CloseWithError(err)
|
var down *mega.Download
|
||||||
|
err := utils.Retry(3, time.Second, func() (err error) {
|
||||||
|
down, err = d.c.NewDownload(node.Node)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("mega down: %+v", err)
|
return nil, fmt.Errorf("open download file failed: %w", err)
|
||||||
}
|
}
|
||||||
}()
|
oo := &openObject{
|
||||||
return &model.Link{Data: r}, nil
|
ctx: ctx,
|
||||||
|
d: down,
|
||||||
|
skip: httpRange.Start,
|
||||||
|
}
|
||||||
|
finalClosers.Add(oo)
|
||||||
|
|
||||||
|
return readers.NewLimitedReadCloser(oo, length), nil
|
||||||
|
}
|
||||||
|
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers}
|
||||||
|
resultLink := &model.Link{
|
||||||
|
RangeReadCloser: *resultRangeReadCloser,
|
||||||
|
}
|
||||||
|
return resultLink, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,92 @@
|
|||||||
package mega
|
package mega
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/t3rm1n4l/go-mega"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
// do others that not defined in Driver interface
|
||||||
|
// openObject represents a download in progress
|
||||||
|
type openObject struct {
|
||||||
|
ctx context.Context
|
||||||
|
mu sync.Mutex
|
||||||
|
d *mega.Download
|
||||||
|
id int
|
||||||
|
skip int64
|
||||||
|
chunk []byte
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the next chunk
|
||||||
|
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||||
|
if oo.id >= oo.d.Chunks() {
|
||||||
|
return io.EOF
|
||||||
|
}
|
||||||
|
var chunk []byte
|
||||||
|
err = utils.Retry(3, time.Second, func() (err error) {
|
||||||
|
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.chunk = chunk
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads up to len(p) bytes into p.
|
||||||
|
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return 0, fmt.Errorf("read on closed file")
|
||||||
|
}
|
||||||
|
// Skip data at the start if requested
|
||||||
|
for oo.skip > 0 {
|
||||||
|
_, size, err := oo.d.ChunkLocation(oo.id)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip < int64(size) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
oo.id++
|
||||||
|
oo.skip -= int64(size)
|
||||||
|
}
|
||||||
|
if len(oo.chunk) == 0 {
|
||||||
|
err = oo.getChunk(oo.ctx)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if oo.skip > 0 {
|
||||||
|
oo.chunk = oo.chunk[oo.skip:]
|
||||||
|
oo.skip = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n = copy(p, oo.chunk)
|
||||||
|
oo.chunk = oo.chunk[n:]
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closed the file - MAC errors are reported here
|
||||||
|
func (oo *openObject) Close() (err error) {
|
||||||
|
oo.mu.Lock()
|
||||||
|
defer oo.mu.Unlock()
|
||||||
|
if oo.closed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = utils.Retry(3, 500*time.Millisecond, func() (err error) {
|
||||||
|
return oo.d.Finish()
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to finish download: %w", err)
|
||||||
|
}
|
||||||
|
oo.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -7,12 +7,14 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/avast/retry-go"
|
"github.com/avast/retry-go"
|
||||||
"github.com/foxxorcat/mopan-sdk-go"
|
"github.com/foxxorcat/mopan-sdk-go"
|
||||||
@ -23,7 +25,8 @@ type MoPan struct {
|
|||||||
Addition
|
Addition
|
||||||
client *mopan.MoClient
|
client *mopan.MoClient
|
||||||
|
|
||||||
userID string
|
userID string
|
||||||
|
uploadThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *MoPan) Config() driver.Config {
|
func (d *MoPan) Config() driver.Config {
|
||||||
@ -35,6 +38,10 @@ func (d *MoPan) GetAddition() driver.Additional {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *MoPan) Init(ctx context.Context) error {
|
func (d *MoPan) Init(ctx context.Context) error {
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 3, "3"
|
||||||
|
}
|
||||||
login := func() error {
|
login := func() error {
|
||||||
data, err := d.client.Login(d.Phone, d.Password)
|
data, err := d.client.Login(d.Phone, d.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -49,7 +56,7 @@ func (d *MoPan) Init(ctx context.Context) error {
|
|||||||
d.userID = info.UserID
|
d.userID = info.UserID
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
d.client = mopan.NewMoClient().
|
d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()).
|
||||||
SetRestyClient(base.RestyClient).
|
SetRestyClient(base.RestyClient).
|
||||||
SetOnAuthorizationExpired(func(_ error) error {
|
SetOnAuthorizationExpired(func(_ error) error {
|
||||||
err := login()
|
err := login()
|
||||||
@ -212,7 +219,7 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
file, err := utils.CreateTempFile(stream)
|
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -221,59 +228,80 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
_ = os.Remove(file.Name())
|
_ = os.Remove(file.Name())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
initUpdload, err := d.client.InitMultiUpload(ctx, mopan.UpdloadFileParam{
|
// step.1
|
||||||
|
uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{
|
||||||
ParentFolderId: dstDir.GetID(),
|
ParentFolderId: dstDir.GetID(),
|
||||||
FileName: stream.GetName(),
|
FileName: stream.GetName(),
|
||||||
FileSize: stream.GetSize(),
|
FileSize: stream.GetSize(),
|
||||||
File: file,
|
File: file,
|
||||||
}, mopan.WarpParamOption(
|
})
|
||||||
mopan.ParamOptionShareFile(d.CloudID),
|
|
||||||
))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !initUpdload.FileDataExists {
|
// 尝试恢复进度
|
||||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfo)
|
initUpdload, ok := base.GetUploadProgress[*mopan.InitMultiUploadData](d, d.client.Authorization, uploadPartData.FileMd5)
|
||||||
|
if !ok {
|
||||||
|
// step.2
|
||||||
|
initUpdload, err = d.client.InitMultiUpload(ctx, *uploadPartData, mopan.WarpParamOption(
|
||||||
|
mopan.ParamOptionShareFile(d.CloudID),
|
||||||
|
))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.client.CloudDiskStartBusiness()
|
}
|
||||||
|
|
||||||
|
if !initUpdload.FileDataExists {
|
||||||
|
fmt.Println(d.client.CloudDiskStartBusiness())
|
||||||
|
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
|
// step.3
|
||||||
|
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for i, part := range parts {
|
for i, part := range parts {
|
||||||
if utils.IsCanceled(ctx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
return nil, ctx.Err()
|
break
|
||||||
|
}
|
||||||
|
i, part, byteSize := i, part, initUpdload.PartSize
|
||||||
|
if part.PartNumber == uploadPartData.PartTotal {
|
||||||
|
byteSize = initUpdload.LastPartSize
|
||||||
}
|
}
|
||||||
|
|
||||||
err := retry.Do(func() error {
|
// step.4
|
||||||
if _, err := file.Seek(int64(part.PartNumber-1)*int64(initUpdload.PartSize), io.SeekStart); err != nil {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
return retry.Unrecoverable(err)
|
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||||
}
|
|
||||||
|
|
||||||
req, err := part.NewRequest(ctx, io.LimitReader(file, int64(initUpdload.PartSize)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := base.HttpClient.Do(req)
|
resp, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
resp.Body.Close()
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
up(100 * int(threadG.Success()) / len(parts))
|
||||||
|
initUpdload.PartInfos[i] = ""
|
||||||
return nil
|
return nil
|
||||||
},
|
})
|
||||||
retry.Context(ctx),
|
}
|
||||||
retry.Attempts(3),
|
if err = threadG.Wait(); err != nil {
|
||||||
retry.Delay(time.Second),
|
if errors.Is(err, context.Canceled) {
|
||||||
retry.MaxDelay(5*time.Second))
|
initUpdload.PartInfos = utils.SliceFilter(initUpdload.PartInfos, func(s string) bool { return s != "" })
|
||||||
if err != nil {
|
base.SaveUploadProgress(d, initUpdload, d.client.Authorization, uploadPartData.FileMd5)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
up(100 * (i + 1) / len(parts))
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
//step.5
|
||||||
uFile, err := d.client.CommitMultiUploadFile(initUpdload.UploadFileID, nil)
|
uFile, err := d.client.CommitMultiUploadFile(initUpdload.UploadFileID, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -17,6 +17,8 @@ type Addition struct {
|
|||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
|
||||||
DeviceInfo string `json:"device_info"`
|
DeviceInfo string `json:"device_info"`
|
||||||
|
|
||||||
|
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Addition) GetRootId() string {
|
func (a *Addition) GetRootId() string {
|
||||||
@ -24,7 +26,7 @@ func (a *Addition) GetRootId() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "MoPan",
|
Name: "MoPan",
|
||||||
// DefaultRoot: "root, / or other",
|
// DefaultRoot: "root, / or other",
|
||||||
CheckStatus: true,
|
CheckStatus: true,
|
||||||
Alert: "warning|This network disk may store your password in clear text. Please set your password carefully",
|
Alert: "warning|This network disk may store your password in clear text. Please set your password carefully",
|
||||||
|
@ -12,7 +12,7 @@ func fileToObj(f mopan.File) model.Obj {
|
|||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
ID: string(f.ID),
|
ID: string(f.ID),
|
||||||
Name: f.Name,
|
Name: f.Name,
|
||||||
Size: f.Size,
|
Size: int64(f.Size),
|
||||||
Modified: time.Time(f.LastOpTime),
|
Modified: time.Time(f.LastOpTime),
|
||||||
},
|
},
|
||||||
Thumbnail: model.Thumbnail{
|
Thumbnail: model.Thumbnail{
|
||||||
|
@ -2,8 +2,6 @@ package pikpak
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha1"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -19,7 +17,6 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
jsoniter "github.com/json-iterator/go"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -66,7 +63,7 @@ func (d *PikPak) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
link := model.Link{
|
link := model.Link{
|
||||||
URL: resp.WebContentLink,
|
URL: resp.WebContentLink,
|
||||||
}
|
}
|
||||||
if len(resp.Medias) > 0 && resp.Medias[0].Link.Url != "" {
|
if !d.DisableMediaLink && len(resp.Medias) > 0 && resp.Medias[0].Link.Url != "" {
|
||||||
log.Debugln("use media link")
|
log.Debugln("use media link")
|
||||||
link.URL = resp.Medias[0].Link.Url
|
link.URL = resp.Medias[0].Link.Url
|
||||||
}
|
}
|
||||||
@ -127,7 +124,7 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -135,9 +132,8 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
_ = os.Remove(tempFile.Name())
|
_ = os.Remove(tempFile.Name())
|
||||||
}()
|
}()
|
||||||
// cal sha1
|
// cal gcid
|
||||||
s := sha1.New()
|
sha1Str, err := getGcid(tempFile, stream.GetSize())
|
||||||
_, err = io.Copy(s, tempFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -145,37 +141,33 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sha1Str := hex.EncodeToString(s.Sum(nil))
|
var resp UploadTaskData
|
||||||
data := base.Json{
|
|
||||||
"kind": "drive#file",
|
|
||||||
"name": stream.GetName(),
|
|
||||||
"size": stream.GetSize(),
|
|
||||||
"hash": strings.ToUpper(sha1Str),
|
|
||||||
"upload_type": "UPLOAD_TYPE_RESUMABLE",
|
|
||||||
"objProvider": base.Json{"provider": "UPLOAD_TYPE_UNKNOWN"},
|
|
||||||
"parent_id": dstDir.GetID(),
|
|
||||||
}
|
|
||||||
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(data)
|
req.SetBody(base.Json{
|
||||||
}, nil)
|
"kind": "drive#file",
|
||||||
|
"name": stream.GetName(),
|
||||||
|
"size": stream.GetSize(),
|
||||||
|
"hash": strings.ToUpper(sha1Str),
|
||||||
|
"upload_type": "UPLOAD_TYPE_RESUMABLE",
|
||||||
|
"objProvider": base.Json{"provider": "UPLOAD_TYPE_UNKNOWN"},
|
||||||
|
"parent_id": dstDir.GetID(),
|
||||||
|
"folder_type": "NORMAL",
|
||||||
|
})
|
||||||
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if stream.GetSize() == 0 {
|
|
||||||
|
// 秒传成功
|
||||||
|
if resp.Resumable == nil {
|
||||||
log.Debugln(string(res))
|
log.Debugln(string(res))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
params := jsoniter.Get(res, "resumable").Get("params")
|
|
||||||
endpoint := params.Get("endpoint").ToString()
|
params := resp.Resumable.Params
|
||||||
endpointS := strings.Split(endpoint, ".")
|
endpoint := strings.Join(strings.Split(params.Endpoint, ".")[1:], ".")
|
||||||
endpoint = strings.Join(endpointS[1:], ".")
|
|
||||||
accessKeyId := params.Get("access_key_id").ToString()
|
|
||||||
accessKeySecret := params.Get("access_key_secret").ToString()
|
|
||||||
securityToken := params.Get("security_token").ToString()
|
|
||||||
key := params.Get("key").ToString()
|
|
||||||
bucket := params.Get("bucket").ToString()
|
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
Credentials: credentials.NewStaticCredentials(accessKeyId, accessKeySecret, securityToken),
|
Credentials: credentials.NewStaticCredentials(params.AccessKeyID, params.AccessKeySecret, params.SecurityToken),
|
||||||
Region: aws.String("pikpak"),
|
Region: aws.String("pikpak"),
|
||||||
Endpoint: &endpoint,
|
Endpoint: &endpoint,
|
||||||
}
|
}
|
||||||
@ -185,8 +177,8 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(ss)
|
uploader := s3manager.NewUploader(ss)
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &bucket,
|
Bucket: ¶ms.Bucket,
|
||||||
Key: &key,
|
Key: ¶ms.Key,
|
||||||
Body: tempFile,
|
Body: tempFile,
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
|
@ -7,8 +7,9 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootID
|
driver.RootID
|
||||||
Username string `json:"username" required:"true"`
|
Username string `json:"username" required:"true"`
|
||||||
Password string `json:"password" required:"true"`
|
Password string `json:"password" required:"true"`
|
||||||
|
DisableMediaLink bool `json:"disable_media_link"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -73,3 +73,23 @@ type Media struct {
|
|||||||
IsVisible bool `json:"is_visible"`
|
IsVisible bool `json:"is_visible"`
|
||||||
Category string `json:"category"`
|
Category string `json:"category"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type UploadTaskData struct {
|
||||||
|
UploadType string `json:"upload_type"`
|
||||||
|
//UPLOAD_TYPE_RESUMABLE
|
||||||
|
Resumable *struct {
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
Params struct {
|
||||||
|
AccessKeyID string `json:"access_key_id"`
|
||||||
|
AccessKeySecret string `json:"access_key_secret"`
|
||||||
|
Bucket string `json:"bucket"`
|
||||||
|
Endpoint string `json:"endpoint"`
|
||||||
|
Expiration time.Time `json:"expiration"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
SecurityToken string `json:"security_token"`
|
||||||
|
} `json:"params"`
|
||||||
|
Provider string `json:"provider"`
|
||||||
|
} `json:"resumable"`
|
||||||
|
|
||||||
|
File File `json:"file"`
|
||||||
|
}
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
package pikpak
|
package pikpak
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@ -123,3 +126,28 @@ func (d *PikPak) getFiles(id string) ([]File, error) {
|
|||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getGcid(r io.Reader, size int64) (string, error) {
|
||||||
|
calcBlockSize := func(j int64) int64 {
|
||||||
|
var psize int64 = 0x40000
|
||||||
|
for float64(j)/float64(psize) > 0x200 && psize < 0x200000 {
|
||||||
|
psize = psize << 1
|
||||||
|
}
|
||||||
|
return psize
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := sha1.New()
|
||||||
|
hash2 := sha1.New()
|
||||||
|
readSize := calcBlockSize(size)
|
||||||
|
for {
|
||||||
|
hash2.Reset()
|
||||||
|
if n, err := io.CopyN(hash2, r, readSize); err != nil && n == 0 {
|
||||||
|
if err != io.EOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
hash1.Write(hash2.Sum(nil))
|
||||||
|
}
|
||||||
|
return hex.EncodeToString(hash1.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
@ -5,18 +5,15 @@ import (
|
|||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -69,62 +66,17 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u := resp.Data[0].DownloadUrl
|
|
||||||
start, end := int64(0), file.GetSize()
|
|
||||||
link := model.Link{
|
|
||||||
Header: http.Header{},
|
|
||||||
}
|
|
||||||
if rg := args.Header.Get("Range"); rg != "" {
|
|
||||||
parseRange, err := http_range.ParseRange(rg, file.GetSize())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
start, end = parseRange[0].Start, parseRange[0].Start+parseRange[0].Length
|
|
||||||
link.Header.Set("Content-Range", parseRange[0].ContentRange(file.GetSize()))
|
|
||||||
link.Header.Set("Content-Length", strconv.FormatInt(parseRange[0].Length, 10))
|
|
||||||
link.Status = http.StatusPartialContent
|
|
||||||
} else {
|
|
||||||
link.Header.Set("Content-Length", strconv.FormatInt(file.GetSize(), 10))
|
|
||||||
link.Status = http.StatusOK
|
|
||||||
}
|
|
||||||
link.Writer = func(w io.Writer) error {
|
|
||||||
// request 10 MB at a time
|
|
||||||
chunkSize := int64(10 * 1024 * 1024)
|
|
||||||
for start < end {
|
|
||||||
_end := start + chunkSize
|
|
||||||
if _end > end {
|
|
||||||
_end = end
|
|
||||||
}
|
|
||||||
_range := "bytes=" + strconv.FormatInt(start, 10) + "-" + strconv.FormatInt(_end-1, 10)
|
|
||||||
start = _end
|
|
||||||
err = func() error {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("Range", _range)
|
|
||||||
req.Header.Set("User-Agent", ua)
|
|
||||||
req.Header.Set("Cookie", d.Cookie)
|
|
||||||
req.Header.Set("Referer", d.conf.referer)
|
|
||||||
resp, err := base.HttpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusPartialContent {
|
|
||||||
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
_, err = io.Copy(w, resp.Body)
|
|
||||||
return err
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
return &model.Link{
|
||||||
return nil
|
URL: resp.Data[0].DownloadUrl,
|
||||||
}
|
Header: http.Header{
|
||||||
return &link, nil
|
"Cookie": []string{d.Cookie},
|
||||||
|
"Referer": []string{d.conf.referer},
|
||||||
|
"User-Agent": []string{ua},
|
||||||
|
},
|
||||||
|
Concurrency: 2,
|
||||||
|
PartSize: 10 * 1024 * 1024,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *QuarkOrUC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *QuarkOrUC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
@ -184,7 +136,7 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -53,9 +53,9 @@ func (d *S3) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *S3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *S3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
if d.ListObjectVersion == "v2" {
|
if d.ListObjectVersion == "v2" {
|
||||||
return d.listV2(dir.GetPath())
|
return d.listV2(dir.GetPath(), args)
|
||||||
}
|
}
|
||||||
return d.listV1(dir.GetPath())
|
return d.listV1(dir.GetPath(), args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
@ -12,6 +12,7 @@ type Addition struct {
|
|||||||
Region string `json:"region"`
|
Region string `json:"region"`
|
||||||
AccessKeyID string `json:"access_key_id" required:"true"`
|
AccessKeyID string `json:"access_key_id" required:"true"`
|
||||||
SecretAccessKey string `json:"secret_access_key" required:"true"`
|
SecretAccessKey string `json:"secret_access_key" required:"true"`
|
||||||
|
SessionToken string `json:"session_token"`
|
||||||
CustomHost string `json:"custom_host"`
|
CustomHost string `json:"custom_host"`
|
||||||
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"`
|
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"`
|
||||||
Placeholder string `json:"placeholder"`
|
Placeholder string `json:"placeholder"`
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
func (d *S3) initSession() error {
|
func (d *S3) initSession() error {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
Credentials: credentials.NewStaticCredentials(d.AccessKeyID, d.SecretAccessKey, ""),
|
Credentials: credentials.NewStaticCredentials(d.AccessKeyID, d.SecretAccessKey, d.SessionToken),
|
||||||
Region: &d.Region,
|
Region: &d.Region,
|
||||||
Endpoint: &d.Endpoint,
|
Endpoint: &d.Endpoint,
|
||||||
S3ForcePathStyle: aws.Bool(d.ForcePathStyle),
|
S3ForcePathStyle: aws.Bool(d.ForcePathStyle),
|
||||||
@ -69,7 +69,7 @@ func getPlaceholderName(placeholder string) string {
|
|||||||
return placeholder
|
return placeholder
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) listV1(prefix string) ([]model.Obj, error) {
|
func (d *S3) listV1(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||||
prefix = getKey(prefix, true)
|
prefix = getKey(prefix, true)
|
||||||
log.Debugf("list: %s", prefix)
|
log.Debugf("list: %s", prefix)
|
||||||
files := make([]model.Obj, 0)
|
files := make([]model.Obj, 0)
|
||||||
@ -97,7 +97,7 @@ func (d *S3) listV1(prefix string) ([]model.Obj, error) {
|
|||||||
}
|
}
|
||||||
for _, object := range listObjectsResult.Contents {
|
for _, object := range listObjectsResult.Contents {
|
||||||
name := path.Base(*object.Key)
|
name := path.Base(*object.Key)
|
||||||
if name == getPlaceholderName(d.Placeholder) || name == d.Placeholder {
|
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
file := model.Object{
|
file := model.Object{
|
||||||
@ -120,7 +120,7 @@ func (d *S3) listV1(prefix string) ([]model.Obj, error) {
|
|||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) listV2(prefix string) ([]model.Obj, error) {
|
func (d *S3) listV2(prefix string, args model.ListArgs) ([]model.Obj, error) {
|
||||||
prefix = getKey(prefix, true)
|
prefix = getKey(prefix, true)
|
||||||
files := make([]model.Obj, 0)
|
files := make([]model.Obj, 0)
|
||||||
var continuationToken, startAfter *string
|
var continuationToken, startAfter *string
|
||||||
@ -152,7 +152,7 @@ func (d *S3) listV2(prefix string) ([]model.Obj, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
name := path.Base(*object.Key)
|
name := path.Base(*object.Key)
|
||||||
if name == getPlaceholderName(d.Placeholder) || name == d.Placeholder {
|
if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
file := model.Object{
|
file := model.Object{
|
||||||
@ -198,7 +198,7 @@ func (d *S3) copyFile(ctx context.Context, src string, dst string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) copyDir(ctx context.Context, src string, dst string) error {
|
func (d *S3) copyDir(ctx context.Context, src string, dst string) error {
|
||||||
objs, err := op.List(ctx, d, src, model.ListArgs{})
|
objs, err := op.List(ctx, d, src, model.ListArgs{S3ShowPlaceholder: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -5,12 +5,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SFTP struct {
|
type SFTP struct {
|
||||||
@ -39,13 +39,15 @@ func (d *SFTP) Drop(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *SFTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *SFTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
log.Debugf("[sftp] list dir: %s", dir.GetPath())
|
||||||
files, err := d.client.ReadDir(dir.GetPath())
|
files, err := d.client.ReadDir(dir.GetPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return utils.SliceConvert(files, func(src os.FileInfo) (model.Obj, error) {
|
objs, err := utils.SliceConvert(files, func(src os.FileInfo) (model.Obj, error) {
|
||||||
return fileToObj(src), nil
|
return d.fileToObj(src, dir.GetPath())
|
||||||
})
|
})
|
||||||
|
return objs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
@ -54,9 +56,8 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
Data: remoteFile,
|
ReadSeekCloser: remoteFile,
|
||||||
}
|
}
|
||||||
base.HandleRange(link, remoteFile, args.Header, file.GetSize())
|
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,15 +2,44 @@ package sftp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
stdpath "path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
func fileToObj(f os.FileInfo) model.Obj {
|
func (d *SFTP) fileToObj(f os.FileInfo, dir string) (model.Obj, error) {
|
||||||
return &model.Object{
|
symlink := f.Mode()&os.ModeSymlink != 0
|
||||||
Name: f.Name(),
|
if !symlink {
|
||||||
Size: f.Size(),
|
return &model.Object{
|
||||||
Modified: f.ModTime(),
|
Name: f.Name(),
|
||||||
IsFolder: f.IsDir(),
|
Size: f.Size(),
|
||||||
|
Modified: f.ModTime(),
|
||||||
|
IsFolder: f.IsDir(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
path := stdpath.Join(dir, f.Name())
|
||||||
|
// set target path
|
||||||
|
target, err := d.client.ReadLink(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(target, "/") {
|
||||||
|
target = stdpath.Join(dir, target)
|
||||||
|
}
|
||||||
|
_f, err := d.client.Stat(target)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// set basic info
|
||||||
|
obj := &model.Object{
|
||||||
|
Name: f.Name(),
|
||||||
|
Size: _f.Size(),
|
||||||
|
Modified: _f.ModTime(),
|
||||||
|
IsFolder: _f.IsDir(),
|
||||||
|
Path: target,
|
||||||
|
}
|
||||||
|
log.Debugf("[sftp] obj: %+v, is symlink: %v", obj, symlink)
|
||||||
|
return obj, nil
|
||||||
}
|
}
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
@ -80,9 +79,8 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
link := &model.Link{
|
link := &model.Link{
|
||||||
Data: remoteFile,
|
ReadSeekCloser: remoteFile,
|
||||||
}
|
}
|
||||||
base.HandleRange(link, remoteFile, args.Header, file.GetSize())
|
|
||||||
d.updateLastConnTime()
|
d.updateLastConnTime()
|
||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -3,15 +3,16 @@ package terbox
|
|||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
@ -139,6 +140,11 @@ func (d *Terabox) linkOfficial(file model.Obj, args model.LinkArgs) (*model.Link
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(resp.Dlink) == 0 {
|
||||||
|
return nil, fmt.Errorf("fid %s no dlink found, errno: %d", file.GetID(), resp.Errno)
|
||||||
|
}
|
||||||
|
|
||||||
res, err := base.NoRedirectClient.R().SetHeader("Cookie", d.Cookie).SetHeader("User-Agent", base.UserAgent).Get(resp.Dlink[0].Dlink)
|
res, err := base.NoRedirectClient.R().SetHeader("Cookie", d.Cookie).SetHeader("User-Agent", base.UserAgent).Get(resp.Dlink[0].Dlink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -56,7 +56,7 @@ func (x *Thunder) Init(ctx context.Context) (err error) {
|
|||||||
"j",
|
"j",
|
||||||
"4scKJNdd7F27Hv7tbt",
|
"4scKJNdd7F27Hv7tbt",
|
||||||
},
|
},
|
||||||
DeviceID: utils.GetMD5Encode(x.Username + x.Password),
|
DeviceID: utils.GetMD5EncodeStr(x.Username + x.Password),
|
||||||
ClientID: "Xp6vsxz_7IYVw2BB",
|
ClientID: "Xp6vsxz_7IYVw2BB",
|
||||||
ClientSecret: "Xp6vsy4tN9toTVdMSpomVdXpRmES",
|
ClientSecret: "Xp6vsy4tN9toTVdMSpomVdXpRmES",
|
||||||
ClientVersion: "7.51.0.8196",
|
ClientVersion: "7.51.0.8196",
|
||||||
@ -137,7 +137,7 @@ func (x *ThunderExpert) Init(ctx context.Context) (err error) {
|
|||||||
|
|
||||||
DeviceID: func() string {
|
DeviceID: func() string {
|
||||||
if len(x.DeviceID) != 32 {
|
if len(x.DeviceID) != 32 {
|
||||||
return utils.GetMD5Encode(x.DeviceID)
|
return utils.GetMD5EncodeStr(x.DeviceID)
|
||||||
}
|
}
|
||||||
return x.DeviceID
|
return x.DeviceID
|
||||||
}(),
|
}(),
|
||||||
@ -333,7 +333,7 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ type Addition struct {
|
|||||||
|
|
||||||
// 登录特征,用于判断是否重新登录
|
// 登录特征,用于判断是否重新登录
|
||||||
func (i *Addition) GetIdentity() string {
|
func (i *Addition) GetIdentity() string {
|
||||||
return utils.GetMD5Encode(i.Username + i.Password)
|
return utils.GetMD5EncodeStr(i.Username + i.Password)
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -100,7 +100,7 @@ func (c *Common) GetCaptchaSign() (timestamp, sign string) {
|
|||||||
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||||
str := fmt.Sprint(c.ClientID, c.ClientVersion, c.PackageName, c.DeviceID, timestamp)
|
str := fmt.Sprint(c.ClientID, c.ClientVersion, c.PackageName, c.DeviceID, timestamp)
|
||||||
for _, algorithm := range c.Algorithms {
|
for _, algorithm := range c.Algorithms {
|
||||||
str = utils.GetMD5Encode(str + algorithm)
|
str = utils.GetMD5EncodeStr(str + algorithm)
|
||||||
}
|
}
|
||||||
sign = "1." + str
|
sign = "1." + str
|
||||||
return
|
return
|
||||||
|
@ -81,7 +81,7 @@ func (d *USS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
|||||||
expireAt := time.Now().Add(downExp).Unix()
|
expireAt := time.Now().Add(downExp).Unix()
|
||||||
upd := url.QueryEscape(path.Base(file.GetPath()))
|
upd := url.QueryEscape(path.Base(file.GetPath()))
|
||||||
signStr := strings.Join([]string{d.OperatorPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&")
|
signStr := strings.Join([]string{d.OperatorPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&")
|
||||||
upt := utils.GetMD5Encode(signStr)[12:20] + fmt.Sprint(expireAt)
|
upt := utils.GetMD5EncodeStr(signStr)[12:20] + fmt.Sprint(expireAt)
|
||||||
link := fmt.Sprintf("%s?_upd=%s&_upt=%s", u, upd, upt)
|
link := fmt.Sprintf("%s?_upd=%s&_upt=%s", u, upd, upt)
|
||||||
return &model.Link{URL: link}, nil
|
return &model.Link{URL: link}, nil
|
||||||
}
|
}
|
||||||
|
@ -52,9 +52,18 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
|||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type nopReadSeekCloser struct {
|
||||||
|
io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nopReadSeekCloser) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
func (nopReadSeekCloser) Close() error { return nil }
|
||||||
|
|
||||||
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
return &model.Link{
|
return &model.Link{
|
||||||
Data: io.NopCloser(io.LimitReader(random.Rand, file.GetSize())),
|
ReadSeekCloser: nopReadSeekCloser{io.LimitReader(random.Rand, file.GetSize())},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
402
drivers/weiyun/driver.go
Normal file
402
drivers/weiyun/driver.go
Normal file
@ -0,0 +1,402 @@
|
|||||||
|
package weiyun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/cron"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WeiYun struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
|
||||||
|
client *weiyunsdkgo.WeiYunClient
|
||||||
|
cron *cron.Cron
|
||||||
|
rootFolder *Folder
|
||||||
|
|
||||||
|
uploadThread int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Init(ctx context.Context) error {
|
||||||
|
// 限制上传线程数
|
||||||
|
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||||
|
if d.uploadThread < 4 || d.uploadThread > 32 {
|
||||||
|
d.uploadThread, d.UploadThread = 4, "4"
|
||||||
|
}
|
||||||
|
|
||||||
|
d.client = weiyunsdkgo.NewWeiYunClientWithRestyClient(base.NewRestyClient())
|
||||||
|
err := d.client.SetCookiesStr(d.Cookies).RefreshCtoken()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cookie过期回调
|
||||||
|
d.client.SetOnCookieExpired(func(err error) {
|
||||||
|
d.Status = err.Error()
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
})
|
||||||
|
|
||||||
|
// cookie更新回调
|
||||||
|
d.client.SetOnCookieUpload(func(c []*http.Cookie) {
|
||||||
|
d.Cookies = weiyunsdkgo.CookieToString(weiyunsdkgo.ClearCookie(c))
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
})
|
||||||
|
|
||||||
|
// qqCookie保活
|
||||||
|
if d.client.LoginType() == 1 {
|
||||||
|
d.cron = cron.NewCron(time.Minute * 5)
|
||||||
|
d.cron.Do(func() {
|
||||||
|
d.client.KeepAlive()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// 获取默认根目录dirKey
|
||||||
|
if d.RootFolderID == "" {
|
||||||
|
userInfo, err := d.client.DiskUserInfoGet()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.RootFolderID = userInfo.MainDirKey
|
||||||
|
}
|
||||||
|
|
||||||
|
// 处理目录ID,找到PdirKey
|
||||||
|
folders, err := d.client.LibDirPathGet(d.RootFolderID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(folders) == 0 {
|
||||||
|
return fmt.Errorf("invalid directory ID")
|
||||||
|
}
|
||||||
|
|
||||||
|
folder := folders[len(folders)-1]
|
||||||
|
d.rootFolder = &Folder{
|
||||||
|
PFolder: &Folder{
|
||||||
|
Folder: weiyunsdkgo.Folder{
|
||||||
|
DirKey: folder.PdirKey,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Folder: folder.Folder,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Drop(ctx context.Context) error {
|
||||||
|
d.client = nil
|
||||||
|
if d.cron != nil {
|
||||||
|
d.cron.Stop()
|
||||||
|
d.cron = nil
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||||
|
return d.rootFolder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
if folder, ok := dir.(*Folder); ok {
|
||||||
|
var files []model.Obj
|
||||||
|
for {
|
||||||
|
data, err := d.client.DiskDirFileList(folder.GetID(), weiyunsdkgo.WarpParamOption(
|
||||||
|
weiyunsdkgo.QueryFileOptionOffest(int64(len(files))),
|
||||||
|
weiyunsdkgo.QueryFileOptionGetType(weiyunsdkgo.FileAndDir),
|
||||||
|
weiyunsdkgo.QueryFileOptionSort(func() weiyunsdkgo.OrderBy {
|
||||||
|
switch d.OrderBy {
|
||||||
|
case "name":
|
||||||
|
return weiyunsdkgo.FileName
|
||||||
|
case "size":
|
||||||
|
return weiyunsdkgo.FileSize
|
||||||
|
case "updated_at":
|
||||||
|
return weiyunsdkgo.FileMtime
|
||||||
|
default:
|
||||||
|
return weiyunsdkgo.FileName
|
||||||
|
}
|
||||||
|
}(), d.OrderDirection == "desc"),
|
||||||
|
))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if files == nil {
|
||||||
|
files = make([]model.Obj, 0, data.TotalDirCount+data.TotalFileCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dir := range data.DirList {
|
||||||
|
files = append(files, &Folder{
|
||||||
|
PFolder: folder,
|
||||||
|
Folder: dir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range data.FileList {
|
||||||
|
files = append(files, &File{
|
||||||
|
PFolder: folder,
|
||||||
|
File: file,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.FinishFlag || len(data.DirList)+len(data.FileList) == 0 {
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if file, ok := file.(*File); ok {
|
||||||
|
data, err := d.client.DiskFileDownload(weiyunsdkgo.FileParam{PdirKey: file.GetPKey(), FileID: file.GetID()})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: data.DownloadUrl,
|
||||||
|
Header: http.Header{
|
||||||
|
"Cookie": []string{data.CookieName + "=" + data.CookieValue},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
if folder, ok := parentDir.(*Folder); ok {
|
||||||
|
newFolder, err := d.client.DiskDirCreate(weiyunsdkgo.FolderParam{
|
||||||
|
PPdirKey: folder.GetPKey(),
|
||||||
|
PdirKey: folder.DirKey,
|
||||||
|
DirName: dirName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Folder{
|
||||||
|
PFolder: folder,
|
||||||
|
Folder: *newFolder,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO: 默认策略为重命名,使用缓存可能出现冲突。微云app也有这个冲突,不知道腾讯怎么搞的
|
||||||
|
if dstDir, ok := dstDir.(*Folder); ok {
|
||||||
|
dstParam := weiyunsdkgo.FolderParam{
|
||||||
|
PdirKey: dstDir.GetPKey(),
|
||||||
|
DirKey: dstDir.GetID(),
|
||||||
|
DirName: dstDir.GetName(),
|
||||||
|
}
|
||||||
|
switch srcObj := srcObj.(type) {
|
||||||
|
case *File:
|
||||||
|
err := d.client.DiskFileMove(weiyunsdkgo.FileParam{
|
||||||
|
PPdirKey: srcObj.PFolder.GetPKey(),
|
||||||
|
PdirKey: srcObj.GetPKey(),
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
FileName: srcObj.GetName(),
|
||||||
|
}, dstParam)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &File{
|
||||||
|
PFolder: dstDir,
|
||||||
|
File: srcObj.File,
|
||||||
|
}, nil
|
||||||
|
case *Folder:
|
||||||
|
err := d.client.DiskDirMove(weiyunsdkgo.FolderParam{
|
||||||
|
PPdirKey: srcObj.PFolder.GetPKey(),
|
||||||
|
PdirKey: srcObj.GetPKey(),
|
||||||
|
DirKey: srcObj.GetID(),
|
||||||
|
DirName: srcObj.GetName(),
|
||||||
|
}, dstParam)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Folder{
|
||||||
|
PFolder: dstDir,
|
||||||
|
Folder: srcObj.Folder,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
switch srcObj := srcObj.(type) {
|
||||||
|
case *File:
|
||||||
|
err := d.client.DiskFileRename(weiyunsdkgo.FileParam{
|
||||||
|
PPdirKey: srcObj.PFolder.GetPKey(),
|
||||||
|
PdirKey: srcObj.GetPKey(),
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
FileName: srcObj.GetName(),
|
||||||
|
}, newName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newFile := srcObj.File
|
||||||
|
newFile.FileName = newName
|
||||||
|
newFile.FileCtime = weiyunsdkgo.TimeStamp(time.Now())
|
||||||
|
return &File{
|
||||||
|
PFolder: srcObj.PFolder,
|
||||||
|
File: newFile,
|
||||||
|
}, nil
|
||||||
|
case *Folder:
|
||||||
|
err := d.client.DiskDirAttrModify(weiyunsdkgo.FolderParam{
|
||||||
|
PPdirKey: srcObj.PFolder.GetPKey(),
|
||||||
|
PdirKey: srcObj.GetPKey(),
|
||||||
|
DirKey: srcObj.GetID(),
|
||||||
|
DirName: srcObj.GetName(),
|
||||||
|
}, newName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
newFolder := srcObj.Folder
|
||||||
|
newFolder.DirName = newName
|
||||||
|
newFolder.DirCtime = weiyunsdkgo.TimeStamp(time.Now())
|
||||||
|
return &Folder{
|
||||||
|
PFolder: srcObj.PFolder,
|
||||||
|
Folder: newFolder,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
switch obj := obj.(type) {
|
||||||
|
case *File:
|
||||||
|
return d.client.DiskFileDelete(weiyunsdkgo.FileParam{
|
||||||
|
PPdirKey: obj.PFolder.GetPKey(),
|
||||||
|
PdirKey: obj.GetPKey(),
|
||||||
|
FileID: obj.GetID(),
|
||||||
|
FileName: obj.GetName(),
|
||||||
|
})
|
||||||
|
case *Folder:
|
||||||
|
return d.client.DiskDirDelete(weiyunsdkgo.FolderParam{
|
||||||
|
PPdirKey: obj.PFolder.GetPKey(),
|
||||||
|
PdirKey: obj.GetPKey(),
|
||||||
|
DirKey: obj.GetID(),
|
||||||
|
DirName: obj.GetName(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
if folder, ok := dstDir.(*Folder); ok {
|
||||||
|
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = file.Close()
|
||||||
|
_ = os.Remove(file.Name())
|
||||||
|
}()
|
||||||
|
|
||||||
|
// step 1.
|
||||||
|
preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
|
||||||
|
PdirKey: folder.GetPKey(),
|
||||||
|
DirKey: folder.DirKey,
|
||||||
|
|
||||||
|
FileName: stream.GetName(),
|
||||||
|
FileSize: stream.GetSize(),
|
||||||
|
File: file,
|
||||||
|
|
||||||
|
ChannelCount: 4,
|
||||||
|
FileExistOption: 1,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// fast upload
|
||||||
|
if !preData.FileExist {
|
||||||
|
// step.2 增加上传通道
|
||||||
|
if len(preData.ChannelList) < d.uploadThread {
|
||||||
|
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
|
||||||
|
}
|
||||||
|
// step.3 上传
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
|
for _, channel := range preData.ChannelList {
|
||||||
|
if utils.IsCanceled(upCtx) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var channel = channel
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
for {
|
||||||
|
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
||||||
|
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
||||||
|
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 上传完成
|
||||||
|
if upData.UploadState != 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
channel = upData.Channel
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &File{
|
||||||
|
PFolder: folder,
|
||||||
|
File: preData.File,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
// }
|
||||||
|
|
||||||
|
var _ driver.Driver = (*WeiYun)(nil)
|
||||||
|
var _ driver.GetRooter = (*WeiYun)(nil)
|
||||||
|
var _ driver.MkdirResult = (*WeiYun)(nil)
|
||||||
|
|
||||||
|
// var _ driver.CopyResult = (*WeiYun)(nil)
|
||||||
|
var _ driver.MoveResult = (*WeiYun)(nil)
|
||||||
|
var _ driver.Remove = (*WeiYun)(nil)
|
||||||
|
|
||||||
|
var _ driver.PutResult = (*WeiYun)(nil)
|
||||||
|
var _ driver.RenameResult = (*WeiYun)(nil)
|
29
drivers/weiyun/meta.go
Normal file
29
drivers/weiyun/meta.go
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package weiyun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
RootFolderID string `json:"root_folder_id"`
|
||||||
|
Cookies string `json:"cookies" required:"true"`
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at" default:"name"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
|
UploadThread string `json:"upload_thread" default:"4" help:"4<=thread<=32"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "WeiYun",
|
||||||
|
LocalSort: false,
|
||||||
|
OnlyProxy: true,
|
||||||
|
CheckStatus: true,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &WeiYun{}
|
||||||
|
})
|
||||||
|
}
|
39
drivers/weiyun/types.go
Normal file
39
drivers/weiyun/types.go
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
package weiyun
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
PFolder *Folder
|
||||||
|
weiyunsdkgo.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) GetID() string { return f.FileID }
|
||||||
|
func (f *File) GetSize() int64 { return f.FileSize }
|
||||||
|
func (f *File) GetName() string { return f.FileName }
|
||||||
|
func (f *File) ModTime() time.Time { return time.Time(f.FileMtime) }
|
||||||
|
func (f *File) IsDir() bool { return false }
|
||||||
|
func (f *File) GetPath() string { return "" }
|
||||||
|
|
||||||
|
func (f *File) GetPKey() string {
|
||||||
|
return f.PFolder.DirKey
|
||||||
|
}
|
||||||
|
|
||||||
|
type Folder struct {
|
||||||
|
PFolder *Folder
|
||||||
|
weiyunsdkgo.Folder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Folder) GetID() string { return f.DirKey }
|
||||||
|
func (f *Folder) GetSize() int64 { return 0 }
|
||||||
|
func (f *Folder) GetName() string { return f.DirName }
|
||||||
|
func (f *Folder) ModTime() time.Time { return time.Time(f.DirMtime) }
|
||||||
|
func (f *Folder) IsDir() bool { return true }
|
||||||
|
func (f *Folder) GetPath() string { return "" }
|
||||||
|
|
||||||
|
func (f *Folder) GetPKey() string {
|
||||||
|
return f.PFolder.DirKey
|
||||||
|
}
|
@ -3,6 +3,7 @@ package template
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/Xhofe/wopan-sdk-go"
|
"github.com/Xhofe/wopan-sdk-go"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -15,7 +16,8 @@ import (
|
|||||||
type Wopan struct {
|
type Wopan struct {
|
||||||
model.Storage
|
model.Storage
|
||||||
Addition
|
Addition
|
||||||
client *wopan.WoClient
|
client *wopan.WoClient
|
||||||
|
defaultFamilyID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Wopan) Config() driver.Config {
|
func (d *Wopan) Config() driver.Config {
|
||||||
@ -34,6 +36,11 @@ func (d *Wopan) Init(ctx context.Context) error {
|
|||||||
d.RefreshToken = refreshToken
|
d.RefreshToken = refreshToken
|
||||||
op.MustSaveDriverStorage(d)
|
op.MustSaveDriverStorage(d)
|
||||||
})
|
})
|
||||||
|
fml, err := d.client.FamilyUserCurrentEncode()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d.defaultFamilyID = strconv.Itoa(fml.DefaultHomeId)
|
||||||
return d.client.InitData()
|
return d.client.InitData()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,7 +88,11 @@ func (d *Wopan) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Wopan) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *Wopan) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
_, err := d.client.CreateDirectory(d.getSpaceType(), parentDir.GetID(), dirName, d.FamilyID, func(req *resty.Request) {
|
familyID := d.FamilyID
|
||||||
|
if familyID == "" {
|
||||||
|
familyID = d.defaultFamilyID
|
||||||
|
}
|
||||||
|
_, err := d.client.CreateDirectory(d.getSpaceType(), parentDir.GetID(), dirName, familyID, func(req *resty.Request) {
|
||||||
req.SetContext(ctx)
|
req.SetContext(ctx)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user