Compare commits
141 Commits
Author | SHA1 | Date | |
---|---|---|---|
a64dd4885e | |||
0f03a747d8 | |||
30977cdc6d | |||
106cf720c1 | |||
882112ed1c | |||
2a6ab77295 | |||
f0981a0c8d | |||
57eea4db17 | |||
234852ca61 | |||
809105b67e | |||
02e8c31506 | |||
19b39a5c04 | |||
28e2731594 | |||
b1a279cbcc | |||
352a6a741a | |||
109015567a | |||
9e0fa77ca2 | |||
335b11c698 | |||
8e433355e6 | |||
3504f017b9 | |||
cd2f8077fa | |||
d5b68a91d2 | |||
623c7dcea5 | |||
ecbd6d86cd | |||
7200344ace | |||
b313ac4daa | |||
f2f312b43a | |||
6f6d20e1ba | |||
3231c3d930 | |||
b604e21c69 | |||
3c66db9845 | |||
f6ab1f7f61 | |||
8e40465e86 | |||
37dffd0fce | |||
e7c0d94b44 | |||
8102142007 | |||
7c6dec5d47 | |||
dd10c0c5d0 | |||
34fadecc2c | |||
cb8867fcc1 | |||
092ed06833 | |||
6308f1c35d | |||
ce10c9f120 | |||
6c4736fc8f | |||
b301b791c7 | |||
19d34e2eb8 | |||
a3748af772 | |||
9b765ef696 | |||
8f493cccc4 | |||
31a033dff1 | |||
8c3337b88b | |||
7238243664 | |||
ba2b15ab24 | |||
28dc8822b7 | |||
358c5055e9 | |||
b6cd40e6d3 | |||
7d96d8070d | |||
d482fb5f26 | |||
60402ce1fc | |||
1e3950c847 | |||
ed550594da | |||
3bbae29f93 | |||
3b74f8cd9a | |||
e9bdb91e01 | |||
1aa024ed6b | |||
13e8d36e1a | |||
5606c23768 | |||
0b675d6c02 | |||
c1db3a36ad | |||
c59dbb4f9e | |||
df6b306fce | |||
9d45718e5f | |||
b91ed7a78a | |||
95386d777b | |||
635809c376 | |||
af6bb2a6aa | |||
a797494aa3 | |||
353dd7f796 | |||
1c00d64952 | |||
ff5cf3f4fa | |||
5b6b2f427a | |||
7877184bee | |||
e9cb37122e | |||
a425392a2b | |||
75acbcc115 | |||
30415cefbe | |||
1d06a0019f | |||
3686075a7f | |||
6c1c7e5cc0 | |||
c4f901b201 | |||
4b7acb1389 | |||
15b7169df4 | |||
861948bcf3 | |||
e5ffd39cf2 | |||
8b353da0d2 | |||
49bde82426 | |||
3e285aaec4 | |||
355fc576b1 | |||
a69d72aa20 | |||
e5d123c5d3 | |||
220eb33f88 | |||
5238850036 | |||
81ac963567 | |||
3c21a9a520 | |||
1dc1dd1f07 | |||
c9ea9bce81 | |||
9f08353d31 | |||
ce0c3626c2 | |||
06f46206db | |||
579f0c06af | |||
b12d92acc9 | |||
e700ce15e5 | |||
7dbef7d559 | |||
7e9cdd8b07 | |||
cee6bc6b5d | |||
cfd23c05b4 | |||
0c1acd72ca | |||
e2ca06dcca | |||
0828fd787d | |||
2e23ea68d4 | |||
4afa822bec | |||
f2ca9b40db | |||
4c2535cb22 | |||
d4ea8787c9 | |||
a4de04528a | |||
f60aae7499 | |||
de8f9e9eee | |||
cace9db12f | |||
ec2fb82836 | |||
afcfbf02ea | |||
cad04e07dd | |||
30f732138c | |||
04034bd03b | |||
6ec9a8d4c7 | |||
3f7882b467 | |||
a4511c1963 | |||
9d1f122717 | |||
5dd73d80d8 | |||
fce872bc1b | |||
df6c4c80c2 | |||
d2ff040cf8 |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -3,7 +3,7 @@
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
ko_fi: xhofe # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
|
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
43
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -7,28 +7,44 @@ body:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
|
||||
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Please make sure of the following things
|
||||
description: You may select more than one, even select all.
|
||||
description: |
|
||||
You must check all the following, otherwise your issue may be closed directly. Or you can go to the [discussions](https://github.com/alist-org/alist/discussions)
|
||||
您必须勾选以下所有内容,否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
|
||||
options:
|
||||
- label: I have read the [documentation](https://alist.nn.ci).
|
||||
- label: I'm sure there are no duplicate issues or discussions.
|
||||
- label: I'm sure it's due to `alist` and not something else(such as `Dependencies` or `Operational`).
|
||||
- label: I'm sure I'm using the latest version
|
||||
- label: |
|
||||
I have read the [documentation](https://alist.nn.ci).
|
||||
我已经阅读了[文档](https://alist.nn.ci)。
|
||||
- label: |
|
||||
I'm sure there are no duplicate issues or discussions.
|
||||
我确定没有重复的issue或讨论。
|
||||
- label: |
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
- label: |
|
||||
I'm sure this issue is not fixed in the latest version.
|
||||
我确定这个问题在最新版本中没有被修复。
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Alist Version / Alist 版本
|
||||
description: What version of our software are you running?
|
||||
placeholder: v2.0.0
|
||||
label: AList Version / AList 版本
|
||||
description: |
|
||||
What version of our software are you running? Do not use `latest` or `master` as an answer.
|
||||
您使用的是哪个版本的软件?请不要使用`latest`或`master`作为答案。
|
||||
placeholder: v3.xx.xx
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
id: driver
|
||||
attributes:
|
||||
label: Driver used / 使用的存储驱动
|
||||
description: What storage driver are you using?
|
||||
description: |
|
||||
What storage driver are you using?
|
||||
您使用的是哪个存储驱动?
|
||||
placeholder: "for example: Onedrive"
|
||||
validations:
|
||||
required: true
|
||||
@ -47,6 +63,15 @@ body:
|
||||
请提供能复现此问题的链接,请知悉如果不提供它你的issue可能会被直接关闭。
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Config / 配置
|
||||
description: |
|
||||
Please provide the configuration file of your `AList` application and take a screenshot of the relevant storage configuration. (hide privacy field)
|
||||
请提供您的`AList`应用的配置文件,并截图相关存储配置。(隐藏隐私字段)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
|
8
.github/workflows/auto_lang.yml
vendored
8
.github/workflows/auto_lang.yml
vendored
@ -11,6 +11,10 @@ on:
|
||||
- 'cmd/lang.go'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
auto_lang:
|
||||
strategy:
|
||||
@ -53,8 +57,8 @@ jobs:
|
||||
run: |
|
||||
cd alist-web
|
||||
git add .
|
||||
git config --local user.email "i@nn.ci"
|
||||
git config --local user.name "Andy Hsu"
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
|
||||
cd ..
|
||||
|
||||
|
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@ -6,6 +6,10 @@ on:
|
||||
pull_request:
|
||||
branches: [ 'main' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
|
8
.github/workflows/build_docker.yml
vendored
8
.github/workflows/build_docker.yml
vendored
@ -4,6 +4,10 @@ on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_docker:
|
||||
name: Build docker
|
||||
@ -53,8 +57,8 @@ jobs:
|
||||
|
||||
- name: Commit
|
||||
run: |
|
||||
git config --local user.email "i@nn.ci"
|
||||
git config --local user.name "Noah Hsu"
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
|
||||
|
||||
- name: Push commit
|
||||
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
||||
if: github.event.label.name == 'question'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3.4.0
|
||||
uses: actions-cool/issues-helper@v3.5.2
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
17
.github/workflows/issue_rm_working.yml
vendored
Normal file
17
.github/workflows/issue_rm_working.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
name: Remove working label when issue closed
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [closed]
|
||||
|
||||
jobs:
|
||||
rm-working:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Remove working label
|
||||
uses: actions-cool/issues-helper@v3
|
||||
with:
|
||||
actions: 'remove-labels'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
labels: 'working'
|
12
.github/workflows/release.yml
vendored
12
.github/workflows/release.yml
vendored
@ -41,17 +41,11 @@ jobs:
|
||||
run: |
|
||||
bash build.sh release
|
||||
|
||||
- name: Release latest
|
||||
uses: irongut/EditRelease@v1.2.0
|
||||
with:
|
||||
token: ${{ secrets.MY_TOKEN }}
|
||||
id: ${{ github.event.release.id }}
|
||||
prerelease: false
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: build/compress/*
|
||||
prerelease: false
|
||||
|
||||
release_desktop:
|
||||
needs: release
|
||||
@ -68,8 +62,8 @@ jobs:
|
||||
|
||||
- name: Add tag
|
||||
run: |
|
||||
git config --local user.email "i@nn.ci"
|
||||
git config --local user.name "Andy Hsu"
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
|
||||
git tag -a $version -m "release $version"
|
||||
|
||||
|
4
.github/workflows/release_docker.yml
vendored
4
.github/workflows/release_docker.yml
vendored
@ -56,8 +56,8 @@ jobs:
|
||||
|
||||
- name: Add tag
|
||||
run: |
|
||||
git config --local user.email "i@nn.ci"
|
||||
git config --local user.name "Andy Hsu"
|
||||
git config --local user.email "bot@nn.ci"
|
||||
git config --local user.name "IlaBot"
|
||||
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
|
||||
|
||||
- name: Push tags
|
||||
|
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: release_linux_musl
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.20' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: build/compress/*
|
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
34
.github/workflows/release_linux_musl_arm.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: release_linux_musl_arm
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl_arm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.20' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl_arm
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: build/compress/*
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -29,3 +29,5 @@ output/
|
||||
/daemon/
|
||||
/public/dist/*
|
||||
/!public/dist/README.md
|
||||
|
||||
.VSCodeCounter
|
@ -7,7 +7,7 @@
|
||||
Prerequisites:
|
||||
|
||||
- [git](https://git-scm.com)
|
||||
- [Go 1.19+](https://golang.org/doc/install)
|
||||
- [Go 1.20+](https://golang.org/doc/install)
|
||||
- [gcc](https://gcc.gnu.org/)
|
||||
- [nodejs](https://nodejs.org/)
|
||||
|
||||
|
6
README.md
Executable file → Normal file
6
README.md
Executable file → Normal file
@ -39,7 +39,7 @@
|
||||
|
||||
---
|
||||
|
||||
English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## Features
|
||||
|
||||
@ -91,6 +91,7 @@ English | [中文](./README_cn.md) | [Contributing](./CONTRIBUTING.md) | [CODE_O
|
||||
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
||||
- [x] Offline download
|
||||
- [x] Copy files between two storage
|
||||
- [x] Multi-thread downloading acceleration for single-thread download/stream
|
||||
|
||||
## Document
|
||||
|
||||
@ -112,8 +113,7 @@ https://alist.nn.ci/guide/sponsor.html
|
||||
### Special sponsors
|
||||
|
||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||
|
||||
## Contributors
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
---
|
||||
|
||||
[English](./README.md) | 中文 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
[English](./README.md) | 中文 | [日本語](./README_ja.md) | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## 功能
|
||||
|
||||
@ -90,6 +90,7 @@
|
||||
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
|
||||
- [x] 离线下载
|
||||
- [x] 跨存储复制文件
|
||||
- [x] 单线程下载/串流的多线程下载加速
|
||||
|
||||
## 文档
|
||||
|
||||
@ -110,8 +111,7 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
|
||||
### 特别赞助
|
||||
|
||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
|
||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.la/)
|
||||
- [KinhDown 百度云盘不限速下载!永久免费!已稳定运行3年!非常可靠!Q群 -> 786799372](https://kinhdown.com)
|
||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||
|
||||
## 贡献者
|
||||
|
138
README_ja.md
Normal file
138
README_ja.md
Normal file
@ -0,0 +1,138 @@
|
||||
<div align="center">
|
||||
<a href="https://alist.nn.ci"><img height="100px" alt="logo" src="https://cdn.jsdelivr.net/gh/alist-org/logo@main/logo.svg"/></a>
|
||||
<p><em>🗂️Gin と Solidjs による、複数のストレージをサポートするファイルリストプログラム。</em></p>
|
||||
<div>
|
||||
<a href="https://goreportcard.com/report/github.com/alist-org/alist/v3">
|
||||
<img src="https://goreportcard.com/badge/github.com/alist-org/alist/v3" alt="latest version" />
|
||||
</a>
|
||||
<a href="https://github.com/Xhofe/alist/blob/main/LICENSE">
|
||||
<img src="https://img.shields.io/github/license/Xhofe/alist" alt="License" />
|
||||
</a>
|
||||
<a href="https://github.com/Xhofe/alist/actions?query=workflow%3ABuild">
|
||||
<img src="https://img.shields.io/github/actions/workflow/status/Xhofe/alist/build.yml?branch=main" alt="Build status" />
|
||||
</a>
|
||||
<a href="https://github.com/Xhofe/alist/releases">
|
||||
<img src="https://img.shields.io/github/release/Xhofe/alist" alt="latest version" />
|
||||
</a>
|
||||
<a title="Crowdin" target="_blank" href="https://crwd.in/alist">
|
||||
<img src="https://badges.crowdin.net/alist/localized.svg">
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<a href="https://github.com/Xhofe/alist/discussions">
|
||||
<img src="https://img.shields.io/github/discussions/Xhofe/alist?color=%23ED8936" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://discord.gg/F4ymsH4xv2">
|
||||
<img src="https://img.shields.io/discord/1018870125102895134?logo=discord" alt="discussions" />
|
||||
</a>
|
||||
<a href="https://github.com/Xhofe/alist/releases">
|
||||
<img src="https://img.shields.io/github/downloads/Xhofe/alist/total?color=%239F7AEA&logo=github" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/xhofe/alist">
|
||||
<img src="https://img.shields.io/docker/pulls/xhofe/alist?color=%2348BB78&logo=docker&label=pulls" alt="Downloads" />
|
||||
</a>
|
||||
<a href="https://alist.nn.ci/guide/sponsor.html">
|
||||
<img src="https://img.shields.io/badge/%24-sponsor-F87171.svg" alt="sponsor" />
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
[English](./README.md) | [中文](./README_cn.md) | 日本語 | [Contributing](./CONTRIBUTING.md) | [CODE_OF_CONDUCT](./CODE_OF_CONDUCT.md)
|
||||
|
||||
## 特徴
|
||||
|
||||
- [x] マルチストレージ
|
||||
- [x] ローカルストレージ
|
||||
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||
- [x] OneDrive / Sharepoint ([グローバル](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||
- [x] [189cloud](https://cloud.189.cn) (Personal, Family)
|
||||
- [x] [GoogleDrive](https://drive.google.com/)
|
||||
- [x] [123pan](https://www.123pan.com/)
|
||||
- [x] FTP / SFTP
|
||||
- [x] [PikPak](https://www.mypikpak.com/)
|
||||
- [x] [S3](https://aws.amazon.com/s3/)
|
||||
- [x] [Seafile](https://seafile.com/)
|
||||
- [x] [UPYUN Storage Service](https://www.upyun.com/products/file-storage)
|
||||
- [x] WebDav(Support OneDrive/SharePoint without API)
|
||||
- [x] Teambition([China](https://www.teambition.com/ ),[International](https://us.teambition.com/ ))
|
||||
- [x] [Mediatrack](https://www.mediatrack.cn/)
|
||||
- [x] [139yun](https://yun.139.com/) (Personal, Family)
|
||||
- [x] [YandexDisk](https://disk.yandex.com/)
|
||||
- [x] [BaiduNetdisk](http://pan.baidu.com/)
|
||||
- [x] [Terabox](https://www.terabox.com/main)
|
||||
- [x] [UC](https://drive.uc.cn)
|
||||
- [x] [Quark](https://pan.quark.cn)
|
||||
- [x] [Thunder](https://pan.xunlei.com)
|
||||
- [x] [Lanzou](https://www.lanzou.com/)
|
||||
- [x] [Aliyundrive share](https://www.aliyundrive.com/)
|
||||
- [x] [Google photo](https://photos.google.com/)
|
||||
- [x] [Mega.nz](https://mega.nz)
|
||||
- [x] [Baidu photo](https://photo.baidu.com/)
|
||||
- [x] SMB
|
||||
- [x] [115](https://115.com/)
|
||||
- [X] Cloudreve
|
||||
- [x] [Dropbox](https://www.dropbox.com/)
|
||||
- [x] デプロイが簡単で、すぐに使える
|
||||
- [x] ファイルプレビュー (PDF, マークダウン, コード, プレーンテキスト, ...)
|
||||
- [x] ギャラリーモードでの画像プレビュー
|
||||
- [x] ビデオとオーディオのプレビュー、歌詞と字幕のサポート
|
||||
- [x] Office ドキュメントのプレビュー (docx, pptx, xlsx, ...)
|
||||
- [x] `README.md` のプレビューレンダリング
|
||||
- [x] ファイルのパーマリンクコピーと直接ダウンロード
|
||||
- [x] ダークモード
|
||||
- [x] 国際化
|
||||
- [x] 保護されたルート (パスワード保護と認証)
|
||||
- [x] WebDav (詳細は https://alist.nn.ci/guide/webdav.html を参照)
|
||||
- [x] [Docker デプロイ](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare ワーカープロキシ
|
||||
- [x] ファイル/フォルダパッケージのダウンロード
|
||||
- [x] ウェブアップロード(訪問者にアップロードを許可できる), 削除, mkdir, 名前変更, 移動, コピー
|
||||
- [x] オフラインダウンロード
|
||||
- [x] 二つのストレージ間でファイルをコピー
|
||||
- [x] シングルスレッドのダウンロード/ストリーム向けのマルチスレッド ダウンロード アクセラレーション
|
||||
|
||||
## ドキュメント
|
||||
|
||||
<https://alist.nn.ci/>
|
||||
|
||||
## デモ
|
||||
|
||||
<https://al.nn.ci>
|
||||
|
||||
## ディスカッション
|
||||
|
||||
一般的なご質問は[ディスカッションフォーラム](https://github.com/Xhofe/alist/discussions)をご利用ください。**問題はバグレポートと機能リクエストのみです。**
|
||||
|
||||
## スポンサー
|
||||
|
||||
AList はオープンソースのソフトウェアです。もしあなたがこのプロジェクトを気に入ってくださり、続けて欲しいと思ってくださるなら、ぜひスポンサーになってくださるか、1口でも寄付をしてくださるようご検討ください!すべての愛とサポートに感謝します:
|
||||
https://alist.nn.ci/guide/sponsor.html
|
||||
|
||||
### スペシャルスポンサー
|
||||
|
||||
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
|
||||
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
|
||||
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
|
||||
|
||||
## コントリビューター
|
||||
|
||||
これらの素晴らしい人々に感謝します:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## ライセンス
|
||||
|
||||
`AList` は AGPL-3.0 ライセンスの下でライセンスされたオープンソースソフトウェアです。
|
||||
|
||||
## 免責事項
|
||||
- このプログラムはフリーでオープンソースのプロジェクトです。ネットワークディスク上でファイルを共有するように設計されており、golang のダウンロードや学習に便利です。利用にあたっては関連法規を遵守し、悪用しないようお願いします;
|
||||
- このプログラムは、公式インターフェースの動作を破壊することなく、公式 sdk/インターフェースを呼び出すことで実装されています;
|
||||
- このプログラムは、302リダイレクト/トラフィック転送のみを行い、いかなるユーザーデータも傍受、保存、改ざんしません;
|
||||
- このプログラムを使用する前に、アカウントの禁止、ダウンロード速度の制限など、対応するリスクを理解し、負担する必要があります;
|
||||
- もし侵害があれば、[メール](mailto:i@nn.ci)で私に連絡してください。
|
||||
|
||||
---
|
||||
|
||||
> [@Blog](https://nn.ci/) · [@GitHub](https://github.com/Xhofe) · [@TelegramGroup](https://t.me/alist_chat) · [@Discord](https://discord.gg/F4ymsH4xv2)
|
71
build.sh
71
build.sh
@ -89,18 +89,31 @@ BuildDocker() {
|
||||
}
|
||||
|
||||
BuildRelease() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMusl() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
BASE="https://musl.nn.ci/"
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross arm-linux-musleabihf-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
curl -L -o "${i}.tgz" "${url}"
|
||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||
rm -f "${i}.tgz"
|
||||
done
|
||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-arm linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc arm-linux-musleabihf-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
|
||||
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
@ -111,13 +124,39 @@ BuildRelease() {
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMuslArm() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
BASE="https://musl.nn.ci/"
|
||||
# FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armeb-linux-musleabi-cross armeb-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||
FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
|
||||
for i in "${FILES[@]}"; do
|
||||
url="${BASE}${i}.tgz"
|
||||
curl -L -o "${i}.tgz" "${url}"
|
||||
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
|
||||
rm -f "${i}.tgz"
|
||||
done
|
||||
# OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armeb linux-musleabihf-armeb linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||
# CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armeb-linux-musleabi-gcc armeb-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||
# GOARMS=('' '' '' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||
OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
|
||||
CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
|
||||
GOARMS=('' '' '' '' '5' '5' '6' '6' '7' '7' '7')
|
||||
for i in "${!OS_ARCHES[@]}"; do
|
||||
os_arch=${OS_ARCHES[$i]}
|
||||
cgo_cc=${CGO_ARGS[$i]}
|
||||
arm=${GOARMS[$i]}
|
||||
echo building for ${os_arch}
|
||||
export GOOS=linux
|
||||
export GOARCH=arm
|
||||
export CC=${cgo_cc}
|
||||
export CGO_ENABLED=1
|
||||
export GOARM=${arm}
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
}
|
||||
|
||||
MakeRelease() {
|
||||
@ -139,8 +178,8 @@ MakeRelease() {
|
||||
rm -f alist.exe
|
||||
done
|
||||
cd compress
|
||||
find . -type f -print0 | xargs -0 md5sum >md5.txt
|
||||
cat md5.txt
|
||||
find . -type f -print0 | xargs -0 md5sum >"$1"
|
||||
cat "$1"
|
||||
cd ../..
|
||||
}
|
||||
|
||||
@ -155,9 +194,15 @@ elif [ "$1" = "release" ]; then
|
||||
FetchWebRelease
|
||||
if [ "$2" = "docker" ]; then
|
||||
BuildDocker
|
||||
elif [ "$2" = "linux_musl_arm" ]; then
|
||||
BuildReleaseLinuxMuslArm
|
||||
MakeRelease "md5-linux-musl-arm.txt"
|
||||
elif [ "$2" = "linux_musl" ]; then
|
||||
BuildReleaseLinuxMusl
|
||||
MakeRelease "md5-linux-musl.txt"
|
||||
else
|
||||
BuildRelease
|
||||
MakeRelease
|
||||
MakeRelease "md5.txt"
|
||||
fi
|
||||
else
|
||||
echo -e "Parameter error"
|
||||
|
72
cmd/admin.go
72
cmd/admin.go
@ -4,30 +4,90 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// PasswordCmd represents the password command
|
||||
var PasswordCmd = &cobra.Command{
|
||||
// AdminCmd represents the password command
|
||||
var AdminCmd = &cobra.Command{
|
||||
Use: "admin",
|
||||
Aliases: []string{"password"},
|
||||
Short: "Show admin user's info",
|
||||
Short: "Show admin user's info and some operations about admin user's password",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("admin user's info: \nusername: %s\npassword: %s", admin.Username, admin.Password)
|
||||
utils.Log.Infof("Admin user's username: %s", admin.Username)
|
||||
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
|
||||
utils.Log.Infof("You can reset the password with a random string by running [alist admin random]")
|
||||
utils.Log.Infof("You can also set a new password by running [alist admin set NEW_PASSWORD]")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(PasswordCmd)
|
||||
var RandomPasswordCmd = &cobra.Command{
|
||||
Use: "random",
|
||||
Short: "Reset admin user's password to a random string",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
newPwd := random.String(8)
|
||||
setAdminPassword(newPwd)
|
||||
},
|
||||
}
|
||||
|
||||
var SetPasswordCmd = &cobra.Command{
|
||||
Use: "set",
|
||||
Short: "Set admin user's password",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
utils.Log.Errorf("Please enter the new password")
|
||||
return
|
||||
}
|
||||
setAdminPassword(args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var ShowTokenCmd = &cobra.Command{
|
||||
Use: "token",
|
||||
Short: "Show admin token",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
token := setting.GetStr(conf.Token)
|
||||
utils.Log.Infof("Admin token: %s", token)
|
||||
},
|
||||
}
|
||||
|
||||
func setAdminPassword(pwd string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||
return
|
||||
}
|
||||
admin.SetPassword(pwd)
|
||||
if err := op.UpdateUser(admin); err != nil {
|
||||
utils.Log.Errorf("failed update admin user: %+v", err)
|
||||
return
|
||||
}
|
||||
utils.Log.Infof("admin user has been updated:")
|
||||
utils.Log.Infof("username: %s", admin.Username)
|
||||
utils.Log.Infof("password: %s", pwd)
|
||||
DelAdminCacheOnline()
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(AdminCmd)
|
||||
AdminCmd.AddCommand(RandomPasswordCmd)
|
||||
AdminCmd.AddCommand(SetPasswordCmd)
|
||||
AdminCmd.AddCommand(ShowTokenCmd)
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
@ -15,6 +15,7 @@ var Cancel2FACmd = &cobra.Command{
|
||||
Short: "Delete 2FA of admin user",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to get admin user: %+v", err)
|
||||
@ -24,6 +25,7 @@ var Cancel2FACmd = &cobra.Command{
|
||||
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
|
||||
} else {
|
||||
utils.Log.Info("2FA canceled")
|
||||
DelAdminCacheOnline()
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -19,6 +20,10 @@ func Init() {
|
||||
bootstrap.InitIndex()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
|
@ -100,7 +100,7 @@ the address is defined in config file`,
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
|
||||
Release()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
|
153
cmd/storage.go
153
cmd/storage.go
@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -15,31 +21,136 @@ var storageCmd = &cobra.Command{
|
||||
Short: "Manage storage",
|
||||
}
|
||||
|
||||
func init() {
|
||||
var mountPath string
|
||||
var disable = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) < 1 {
|
||||
utils.Log.Errorf("mount path is required")
|
||||
return
|
||||
}
|
||||
mountPath := args[0]
|
||||
Init()
|
||||
defer Release()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
},
|
||||
}
|
||||
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disable)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var baseStyle = lipgloss.NewStyle().
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240"))
|
||||
|
||||
type model struct {
|
||||
table table.Model
|
||||
}
|
||||
|
||||
func (m model) Init() tea.Cmd { return nil }
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmd tea.Cmd
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "esc":
|
||||
if m.table.Focused() {
|
||||
m.table.Blur()
|
||||
} else {
|
||||
m.table.Focus()
|
||||
}
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
//case "enter":
|
||||
// return m, tea.Batch(
|
||||
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
|
||||
// )
|
||||
}
|
||||
}
|
||||
m.table, cmd = m.table.Update(msg)
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
return baseStyle.Render(m.table.View()) + "\n"
|
||||
}
|
||||
|
||||
var storageTableHeight int
|
||||
var listStorageCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all storages",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
storages, _, err := db.GetStorages(1, -1)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Found %d storages", len(storages))
|
||||
columns := []table.Column{
|
||||
{Title: "ID", Width: 4},
|
||||
{Title: "Driver", Width: 16},
|
||||
{Title: "Mount Path", Width: 30},
|
||||
{Title: "Enabled", Width: 7},
|
||||
}
|
||||
|
||||
var rows []table.Row
|
||||
for i := range storages {
|
||||
storage := storages[i]
|
||||
enabled := "true"
|
||||
if storage.Disabled {
|
||||
enabled = "false"
|
||||
}
|
||||
rows = append(rows, table.Row{
|
||||
strconv.Itoa(int(storage.ID)),
|
||||
storage.Driver,
|
||||
storage.MountPath,
|
||||
enabled,
|
||||
})
|
||||
}
|
||||
t := table.New(
|
||||
table.WithColumns(columns),
|
||||
table.WithRows(rows),
|
||||
table.WithFocused(true),
|
||||
table.WithHeight(storageTableHeight),
|
||||
)
|
||||
|
||||
s := table.DefaultStyles()
|
||||
s.Header = s.Header.
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240")).
|
||||
BorderBottom(true).
|
||||
Bold(false)
|
||||
s.Selected = s.Selected.
|
||||
Foreground(lipgloss.Color("229")).
|
||||
Background(lipgloss.Color("57")).
|
||||
Bold(false)
|
||||
t.SetStyles(s)
|
||||
|
||||
m := model{t}
|
||||
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||
utils.Log.Errorf("failed to run program: %+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
52
cmd/user.go
Normal file
52
cmd/user.go
Normal file
@ -0,0 +1,52 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
func DelAdminCacheOnline() {
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("[del_admin_cache] get admin error: %+v", err)
|
||||
return
|
||||
}
|
||||
DelUserCacheOnline(admin.Username)
|
||||
}
|
||||
|
||||
func DelUserCacheOnline(username string) {
|
||||
client := resty.New().SetTimeout(1 * time.Second).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
token := setting.GetStr(conf.Token)
|
||||
port := conf.Conf.Scheme.HttpPort
|
||||
u := fmt.Sprintf("http://localhost:%d/api/admin/user/del_cache", port)
|
||||
if port == -1 {
|
||||
if conf.Conf.Scheme.HttpsPort == -1 {
|
||||
utils.Log.Warnf("[del_user_cache] no open port")
|
||||
return
|
||||
}
|
||||
u = fmt.Sprintf("https://localhost:%d/api/admin/user/del_cache", conf.Conf.Scheme.HttpsPort)
|
||||
}
|
||||
res, err := client.R().SetHeader("Authorization", token).SetQueryParam("username", username).Post(u)
|
||||
if err != nil {
|
||||
utils.Log.Warnf("[del_user_cache_online] failed: %+v", err)
|
||||
return
|
||||
}
|
||||
if res.StatusCode() != 200 {
|
||||
utils.Log.Warnf("[del_user_cache_online] failed: %+v", res.String())
|
||||
return
|
||||
}
|
||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||
msg := utils.Json.Get(res.Body(), "message").ToString()
|
||||
if code != 200 {
|
||||
utils.Log.Errorf("[del_user_cache_online] error: %s", msg)
|
||||
return
|
||||
}
|
||||
utils.Log.Debugf("[del_user_cache_online] del user [%s] cache success", username)
|
||||
}
|
@ -2,13 +2,13 @@ package _115
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Pan115 struct {
|
||||
@ -38,17 +38,14 @@ func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
|
||||
return src, nil
|
||||
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
|
||||
return &src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadInfo, err := d.client.
|
||||
SetUserAgent(driver115.UA115Browser).
|
||||
Download(file.(driver115.File).PickCode)
|
||||
// recover for upload
|
||||
d.client.SetUserAgent(driver115.UA115Desktop)
|
||||
DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -83,15 +80,67 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
var (
|
||||
fastInfo *driver115.UploadInitResp
|
||||
dirID = dstDir.GetID()
|
||||
)
|
||||
|
||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||
return driver115.ErrUploadTooLarge
|
||||
}
|
||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
const PreHashSize int64 = 128 * utils.KB
|
||||
hashSize := PreHashSize
|
||||
if stream.GetSize() < PreHashSize {
|
||||
hashSize = stream.GetSize()
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
|
||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) <= 0 {
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fullHash = strings.ToUpper(fullHash)
|
||||
|
||||
// rapid-upload
|
||||
// note that 115 add timeout for rapid-upload,
|
||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if matched, err := fastInfo.Ok(); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
@ -3,6 +3,20 @@ package _115
|
||||
import (
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ model.Obj = (*driver.File)(nil)
|
||||
var _ model.Obj = (*FileObj)(nil)
|
||||
|
||||
type FileObj struct {
|
||||
driver.File
|
||||
}
|
||||
|
||||
func (f *FileObj) CreateTime() time.Time {
|
||||
return f.File.CreateTime
|
||||
}
|
||||
|
||||
func (f *FileObj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||
}
|
||||
|
@ -1,10 +1,26 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/orzogc/fake115uploader/cipher"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -14,9 +30,11 @@ func (d *Pan115) login() error {
|
||||
var err error
|
||||
opts := []driver.Option{
|
||||
driver.UA(UserAgent),
|
||||
func(c *driver.Pan115Client) {
|
||||
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
|
||||
},
|
||||
}
|
||||
d.client = driver.New(opts...)
|
||||
d.client.SetHttpClient(base.HttpClient)
|
||||
cr := &driver.Credential{}
|
||||
if d.Addition.QRCodeToken != "" {
|
||||
s := &driver.QRCodeSession{
|
||||
@ -38,8 +56,8 @@ func (d *Pan115) login() error {
|
||||
return d.client.LoginCheck()
|
||||
}
|
||||
|
||||
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
res := make([]driver.File, 0)
|
||||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||
res := make([]FileObj, 0)
|
||||
if d.PageSize <= 0 {
|
||||
d.PageSize = driver.FileListLimit
|
||||
}
|
||||
@ -48,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, file := range *files {
|
||||
res = append(res, file)
|
||||
res = append(res, FileObj{file})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
appVer = "2.0.3.6"
|
||||
)
|
||||
|
||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||
var (
|
||||
ecdhCipher *cipher.EcdhCipher
|
||||
encrypted []byte
|
||||
decrypted []byte
|
||||
encodedToken string
|
||||
err error
|
||||
target = "U_1_" + dirID
|
||||
bodyBytes []byte
|
||||
result = driver115.UploadInitResp{}
|
||||
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||||
)
|
||||
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID := strconv.FormatInt(d.client.UserID, 10)
|
||||
form := url.Values{}
|
||||
form.Set("appid", "0")
|
||||
form.Set("appversion", appVer)
|
||||
form.Set("userid", userID)
|
||||
form.Set("filename", fileName)
|
||||
form.Set("filesize", fileSizeStr)
|
||||
form.Set("fileid", fileID)
|
||||
form.Set("target", target)
|
||||
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||||
|
||||
signKey, signVal := "", ""
|
||||
for retry := true; retry; {
|
||||
t := driver115.Now()
|
||||
|
||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := map[string]string{
|
||||
"k_ec": encodedToken,
|
||||
}
|
||||
|
||||
form.Set("t", t.String())
|
||||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
if signKey != "" && signVal != "" {
|
||||
form.Set("sign_key", signKey)
|
||||
form.Set("sign_val", signVal)
|
||||
}
|
||||
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := d.client.NewRequest().
|
||||
SetQueryParams(params).
|
||||
SetBody(encrypted).
|
||||
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||||
SetDoNotParseResponse(true)
|
||||
resp, err := req.Post(driver115.ApiUploadInit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := resp.RawBody()
|
||||
defer data.Close()
|
||||
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Status == 7 {
|
||||
// Update signKey & signVal
|
||||
signKey = result.SignKey
|
||||
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
retry = false
|
||||
}
|
||||
result.SHA1 = fileID
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||||
var start, end int64
|
||||
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length := end - start + 1
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
result = strings.ToUpper(hashStr)
|
||||
return
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
imur oss.InitiateMultipartUploadResult
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
ossToken *driver115.UploadOSSTokenResp
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := driver115.DefalutUploadMultipartOptions()
|
||||
if len(opts) > 0 {
|
||||
for _, f := range opts {
|
||||
f(options)
|
||||
}
|
||||
}
|
||||
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||
ticker := time.NewTicker(options.TokenRefreshTime)
|
||||
defer ticker.Stop()
|
||||
// 设置超时
|
||||
timeout := time.NewTimer(options.Timeout)
|
||||
|
||||
if chunks, err = SplitFile(fileSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(chunks))
|
||||
|
||||
chunksCh := make(chan oss.FileChunk)
|
||||
errCh := make(chan error)
|
||||
UploadedPartsCh := make(chan oss.UploadPart)
|
||||
quit := make(chan struct{})
|
||||
|
||||
// producer
|
||||
go chunksProducer(chunksCh, chunks)
|
||||
go func() {
|
||||
wg.Wait()
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
// consumers
|
||||
for i := 0; i < options.ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||
}
|
||||
}()
|
||||
for chunk := range chunksCh {
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for part := range UploadedPartsCh {
|
||||
parts = append(parts, part)
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 到时重新获取ossToken
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-quit:
|
||||
break LOOP
|
||||
case <-errCh:
|
||||
return err
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("time out")
|
||||
}
|
||||
}
|
||||
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.checkUploadStatus(dirID, params.SHA1)
|
||||
}
|
||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
ch <- chunk
|
||||
}
|
||||
}
|
||||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||
// 验证上传是否成功
|
||||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||
opts := []driver115.GetFileOptions{
|
||||
driver115.WithOrder(driver115.FileOrderByTime),
|
||||
driver115.WithShowDirEnable(false),
|
||||
driver115.WithAsc(false),
|
||||
driver115.WithLimit(500),
|
||||
}
|
||||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fileInfo := range fResp.Files {
|
||||
if fileInfo.Sha1 == sha1 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return driver115.ErrUploadFailed
|
||||
}
|
||||
|
||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||
for i := int64(1); i < 10; i++ {
|
||||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// 单个分片大小不能小于100KB
|
||||
if chunks[0].Size < 100*utils.KB {
|
||||
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
if int64(chunkNum) > fileSize {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (fileSize / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||||
} else {
|
||||
chunk.Size = fileSize / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
var chunkN = fileSize / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if fileSize%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = fileSize % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
@ -1,18 +1,11 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -24,6 +17,9 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Pan123 struct {
|
||||
@ -45,6 +41,9 @@ func (d *Pan123) Init(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *Pan123) Drop(ctx context.Context) error {
|
||||
_, _ = d.request(Logout, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{})
|
||||
}, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -180,40 +179,22 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
const DEFAULT int64 = 10485760
|
||||
var uploadFile io.Reader
|
||||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
if d.StreamUpload && stream.GetSize() > DEFAULT {
|
||||
// 只计算前10MIB
|
||||
buf := bytes.NewBuffer(make([]byte, 0, DEFAULT))
|
||||
if n, err := io.CopyN(io.MultiWriter(buf, h), stream, DEFAULT); err != io.EOF && n == 0 {
|
||||
return err
|
||||
}
|
||||
// 增加额外参数防止MD5碰撞
|
||||
h.Write([]byte(stream.GetName()))
|
||||
num := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(num, uint64(stream.GetSize()))
|
||||
h.Write(num)
|
||||
// 拼装
|
||||
uploadFile = io.MultiReader(buf, stream)
|
||||
} else {
|
||||
// 计算完整文件MD5
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if _, err = io.Copy(h, tempFile); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadFile = tempFile
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
}()
|
||||
if _, err = io.Copy(h, tempFile); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etag := hex.EncodeToString(h.Sum(nil))
|
||||
data := base.Json{
|
||||
@ -237,7 +218,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
return nil
|
||||
}
|
||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||
err = d.newUpload(ctx, &resp, stream, uploadFile, up)
|
||||
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
||||
return err
|
||||
} else {
|
||||
cfg := &aws.Config{
|
||||
@ -254,7 +235,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: &resp.Data.Bucket,
|
||||
Key: &resp.Data.Key,
|
||||
Body: uploadFile,
|
||||
Body: tempFile,
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
}
|
||||
|
@ -11,7 +11,6 @@ type Addition struct {
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
StreamUpload bool `json:"stream_upload"`
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,11 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -18,6 +22,14 @@ type File struct {
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
@ -42,7 +54,30 @@ func (f File) GetID() string {
|
||||
return strconv.FormatInt(f.FileId, 10)
|
||||
}
|
||||
|
||||
func (f File) Thumb() string {
|
||||
if f.DownloadUrl == "" {
|
||||
return ""
|
||||
}
|
||||
du, err := url.Parse(f.DownloadUrl)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||
query := du.Query()
|
||||
query.Set("w", "70")
|
||||
query.Set("h", "70")
|
||||
if !query.Has("type") {
|
||||
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||
}
|
||||
if !query.Has("trade_key") {
|
||||
query.Set("trade_key", "123pan-thumbnail")
|
||||
}
|
||||
du.RawQuery = query.Encode()
|
||||
return du.String()
|
||||
}
|
||||
|
||||
var _ model.Obj = (*File)(nil)
|
||||
var _ model.Thumb = (*File)(nil)
|
||||
|
||||
//func (f File) Thumb() string {
|
||||
//
|
||||
|
@ -34,6 +34,25 @@ func (d *Pan123) getS3PreSignedUrls(ctx context.Context, upReq *UploadResp, star
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) getS3Auth(ctx context.Context, upReq *UploadResp, start, end int) (*S3PreSignedURLs, error) {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
"bucket": upReq.Data.Bucket,
|
||||
"key": upReq.Data.Key,
|
||||
"partNumberEnd": end,
|
||||
"partNumberStart": start,
|
||||
"uploadId": upReq.Data.UploadId,
|
||||
}
|
||||
var s3PreSignedUrls S3PreSignedURLs
|
||||
_, err := d.request(S3Auth, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetContext(ctx)
|
||||
}, &s3PreSignedUrls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3PreSignedUrls, nil
|
||||
}
|
||||
|
||||
func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.FileStreamer, isMultipart bool) error {
|
||||
data := base.Json{
|
||||
"StorageNode": upReq.Data.StorageNode,
|
||||
@ -51,11 +70,17 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
||||
}
|
||||
|
||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
||||
chunkSize := int64(1024 * 1024 * 5)
|
||||
chunkSize := int64(1024 * 1024 * 16)
|
||||
// fetch s3 pre signed urls
|
||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||
// upload 10 chunks each batch
|
||||
batchSize := 10
|
||||
// only 1 batch is allowed
|
||||
isMultipart := chunkCount > 1
|
||||
batchSize := 1
|
||||
getS3UploadUrl := d.getS3Auth
|
||||
if isMultipart {
|
||||
batchSize = 10
|
||||
getS3UploadUrl = d.getS3PreSignedUrls
|
||||
}
|
||||
for i := 1; i <= chunkCount; i += batchSize {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -65,7 +90,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if end > chunkCount+1 {
|
||||
end = chunkCount + 1
|
||||
}
|
||||
s3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, start, end)
|
||||
s3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, start, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -78,7 +103,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if j == chunkCount {
|
||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||
}
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false)
|
||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -89,7 +114,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
return d.completeS3(ctx, upReq, file, chunkCount > 1)
|
||||
}
|
||||
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool) error {
|
||||
func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSignedUrls *S3PreSignedURLs, cur, end int, reader io.Reader, curSize int64, retry bool, getS3UploadUrl func(ctx context.Context, upReq *UploadResp, start int, end int) (*S3PreSignedURLs, error)) error {
|
||||
uploadUrl := s3PreSignedUrls.Data.PreSignedUrls[strconv.Itoa(cur)]
|
||||
if uploadUrl == "" {
|
||||
return fmt.Errorf("upload url is empty, s3PreSignedUrls: %+v", s3PreSignedUrls)
|
||||
@ -111,13 +136,13 @@ func (d *Pan123) uploadS3Chunk(ctx context.Context, upReq *UploadResp, s3PreSign
|
||||
return fmt.Errorf("upload s3 chunk %d failed, status code: %d", cur, res.StatusCode)
|
||||
}
|
||||
// refresh s3 pre signed urls
|
||||
newS3PreSignedUrls, err := d.getS3PreSignedUrls(ctx, upReq, cur, end)
|
||||
newS3PreSignedUrls, err := getS3UploadUrl(ctx, upReq, cur, end)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3PreSignedUrls.Data.PreSignedUrls = newS3PreSignedUrls.Data.PreSignedUrls
|
||||
// retry
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true)
|
||||
return d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, cur, end, reader, curSize, true, getS3UploadUrl)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
body, err := io.ReadAll(res.Body)
|
||||
|
@ -15,10 +15,12 @@ import (
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
const (
|
||||
Api = "https://www.123pan.com/api"
|
||||
AApi = "https://www.123pan.com/a/api"
|
||||
BApi = "https://www.123pan.com/b/api"
|
||||
MainApi = AApi
|
||||
MainApi = Api
|
||||
SignIn = MainApi + "/user/sign_in"
|
||||
Logout = MainApi + "/user/logout"
|
||||
UserInfo = MainApi + "/user/info"
|
||||
FileList = MainApi + "/file/list/new"
|
||||
DownloadInfo = MainApi + "/file/download_info"
|
||||
@ -32,6 +34,7 @@ const (
|
||||
S3Auth = MainApi + "/file/s3_upload_object/auth"
|
||||
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
|
||||
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
func (d *Pan123) login() error {
|
||||
@ -50,6 +53,14 @@ func (d *Pan123) login() error {
|
||||
}
|
||||
}
|
||||
res, err := base.RestyClient.R().
|
||||
SetHeaders(map[string]string{
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
"user-agent": "Dart/2.19(dart:io)",
|
||||
"platform": "android",
|
||||
"app-version": "36",
|
||||
//"user-agent": base.UserAgent,
|
||||
}).
|
||||
SetBody(body).Post(SignIn)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -62,14 +73,30 @@ func (d *Pan123) login() error {
|
||||
return err
|
||||
}
|
||||
|
||||
//func authKey(reqUrl string) (*string, error) {
|
||||
// reqURL, err := url.Parse(reqUrl)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
//
|
||||
// nowUnix := time.Now().Unix()
|
||||
// random := rand.Intn(0x989680)
|
||||
//
|
||||
// p4 := fmt.Sprintf("%d|%d|%s|%s|%s|%s", nowUnix, random, reqURL.Path, "web", "3", AuthKeySalt)
|
||||
// authKey := fmt.Sprintf("%d-%d-%x", nowUnix, random, md5.Sum([]byte(p4)))
|
||||
// return &authKey, nil
|
||||
//}
|
||||
|
||||
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
"authorization": "Bearer " + d.AccessToken,
|
||||
"platform": "web",
|
||||
"app-version": "1.2",
|
||||
"user-agent": "Dart/2.19(dart:io)",
|
||||
"platform": "android",
|
||||
"app-version": "36",
|
||||
//"user-agent": base.UserAgent,
|
||||
})
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
@ -77,6 +104,11 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
//authKey, err := authKey(url)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//req.SetQueryParam("auth-key", *authKey)
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
77
drivers/123_link/driver.go
Normal file
77
drivers/123_link/driver.go
Normal file
@ -0,0 +1,77 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Pan123Link struct {
|
||||
model.Storage
|
||||
Addition
|
||||
root *Node
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Pan123Link) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Init(ctx context.Context) error {
|
||||
node, err := BuildTree(d.OriginURLs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node.calSize()
|
||||
d.root = node
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
node := GetNodeFromRootByPath(d.root, path)
|
||||
return nodeToObj(node, path)
|
||||
}
|
||||
|
||||
func (d *Pan123Link) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
node := GetNodeFromRootByPath(d.root, dir.GetPath())
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if node.isFile() {
|
||||
return nil, errs.NotFolder
|
||||
}
|
||||
return utils.SliceConvert(node.Children, func(node *Node) (model.Obj, error) {
|
||||
return nodeToObj(node, stdpath.Join(dir.GetPath(), node.Name))
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
node := GetNodeFromRootByPath(d.root, file.GetPath())
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if node.isFile() {
|
||||
signUrl, err := SignURL(node.Url, d.PrivateKey, d.UID, time.Duration(d.ValidDuration)*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: signUrl,
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan123Link)(nil)
|
23
drivers/123_link/meta.go
Normal file
23
drivers/123_link/meta.go
Normal file
@ -0,0 +1,23 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
OriginURLs string `json:"origin_urls" type:"text" required:"true" default:"https://vip.123pan.com/29/folder/file.mp3" help:"structure:FolderName:\n [FileSize:][Modified:]Url"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
UID uint64 `json:"uid" type:"number"`
|
||||
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "123PanLink",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123Link{}
|
||||
})
|
||||
}
|
152
drivers/123_link/parse.go
Normal file
152
drivers/123_link/parse.go
Normal file
@ -0,0 +1,152 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
url2 "net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// build tree from text, text structure definition:
|
||||
/**
|
||||
* FolderName:
|
||||
* [FileSize:][Modified:]Url
|
||||
*/
|
||||
/**
|
||||
* For example:
|
||||
* folder1:
|
||||
* name1:url1
|
||||
* url2
|
||||
* folder2:
|
||||
* url3
|
||||
* url4
|
||||
* url5
|
||||
* folder3:
|
||||
* url6
|
||||
* url7
|
||||
* url8
|
||||
*/
|
||||
// if there are no name, use the last segment of url as name
|
||||
func BuildTree(text string) (*Node, error) {
|
||||
lines := strings.Split(text, "\n")
|
||||
var root = &Node{Level: -1, Name: "root"}
|
||||
stack := []*Node{root}
|
||||
for _, line := range lines {
|
||||
// calculate indent
|
||||
indent := 0
|
||||
for i := 0; i < len(line); i++ {
|
||||
if line[i] != ' ' {
|
||||
break
|
||||
}
|
||||
indent++
|
||||
}
|
||||
// if indent is not a multiple of 2, it is an error
|
||||
if indent%2 != 0 {
|
||||
return nil, fmt.Errorf("the line '%s' is not a multiple of 2", line)
|
||||
}
|
||||
// calculate level
|
||||
level := indent / 2
|
||||
line = strings.TrimSpace(line[indent:])
|
||||
// if the line is empty, skip
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
// if level isn't greater than the level of the top of the stack
|
||||
// it is not the child of the top of the stack
|
||||
for level <= stack[len(stack)-1].Level {
|
||||
// pop the top of the stack
|
||||
stack = stack[:len(stack)-1]
|
||||
}
|
||||
// if the line is a folder
|
||||
if isFolder(line) {
|
||||
// create a new node
|
||||
node := &Node{
|
||||
Level: level,
|
||||
Name: strings.TrimSuffix(line, ":"),
|
||||
}
|
||||
// add the node to the top of the stack
|
||||
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||
// push the node to the stack
|
||||
stack = append(stack, node)
|
||||
} else {
|
||||
// if the line is a file
|
||||
// create a new node
|
||||
node, err := parseFileLine(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.Level = level
|
||||
// add the node to the top of the stack
|
||||
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||
}
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func isFolder(line string) bool {
|
||||
return strings.HasSuffix(line, ":")
|
||||
}
|
||||
|
||||
// line definition:
|
||||
// [FileSize:][Modified:]Url
|
||||
func parseFileLine(line string) (*Node, error) {
|
||||
// if there is no url, it is an error
|
||||
if !strings.Contains(line, "http://") && !strings.Contains(line, "https://") {
|
||||
return nil, fmt.Errorf("invalid line: %s, because url is required for file", line)
|
||||
}
|
||||
index := strings.Index(line, "http://")
|
||||
if index == -1 {
|
||||
index = strings.Index(line, "https://")
|
||||
}
|
||||
url := line[index:]
|
||||
info := line[:index]
|
||||
node := &Node{
|
||||
Url: url,
|
||||
}
|
||||
name := stdpath.Base(url)
|
||||
unescape, err := url2.PathUnescape(name)
|
||||
if err == nil {
|
||||
name = unescape
|
||||
}
|
||||
node.Name = name
|
||||
if index > 0 {
|
||||
if !strings.HasSuffix(info, ":") {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file info must end with ':'", line)
|
||||
}
|
||||
info = info[:len(info)-1]
|
||||
if info == "" {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file name can't be empty", line)
|
||||
}
|
||||
infoParts := strings.Split(info, ":")
|
||||
size, err := strconv.ParseInt(infoParts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file size must be an integer", line)
|
||||
}
|
||||
node.Size = size
|
||||
if len(infoParts) > 1 {
|
||||
modified, err := strconv.ParseInt(infoParts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file modified must be an unix timestamp", line)
|
||||
}
|
||||
node.Modified = modified
|
||||
} else {
|
||||
node.Modified = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func splitPath(path string) []string {
|
||||
if path == "/" {
|
||||
return []string{"root"}
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
parts[0] = "root"
|
||||
return parts
|
||||
}
|
||||
|
||||
func GetNodeFromRootByPath(root *Node, path string) *Node {
|
||||
return root.getByPath(splitPath(path))
|
||||
}
|
66
drivers/123_link/types.go
Normal file
66
drivers/123_link/types.go
Normal file
@ -0,0 +1,66 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
// Node is a node in the folder tree
|
||||
type Node struct {
|
||||
Url string
|
||||
Name string
|
||||
Level int
|
||||
Modified int64
|
||||
Size int64
|
||||
Children []*Node
|
||||
}
|
||||
|
||||
func (node *Node) getByPath(paths []string) *Node {
|
||||
if len(paths) == 0 || node == nil {
|
||||
return nil
|
||||
}
|
||||
if node.Name != paths[0] {
|
||||
return nil
|
||||
}
|
||||
if len(paths) == 1 {
|
||||
return node
|
||||
}
|
||||
for _, child := range node.Children {
|
||||
tmp := child.getByPath(paths[1:])
|
||||
if tmp != nil {
|
||||
return tmp
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *Node) isFile() bool {
|
||||
return node.Url != ""
|
||||
}
|
||||
|
||||
func (node *Node) calSize() int64 {
|
||||
if node.isFile() {
|
||||
return node.Size
|
||||
}
|
||||
var size int64 = 0
|
||||
for _, child := range node.Children {
|
||||
size += child.calSize()
|
||||
}
|
||||
node.Size = size
|
||||
return size
|
||||
}
|
||||
|
||||
func nodeToObj(node *Node, path string) (model.Obj, error) {
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return &model.Object{
|
||||
Name: node.Name,
|
||||
Size: node.Size,
|
||||
Modified: time.Unix(node.Modified, 0),
|
||||
IsFolder: !node.isFile(),
|
||||
Path: path,
|
||||
}, nil
|
||||
}
|
30
drivers/123_link/util.go
Normal file
30
drivers/123_link/util.go
Normal file
@ -0,0 +1,30 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||
if privateKey == "" {
|
||||
return originURL, nil
|
||||
}
|
||||
var (
|
||||
ts = time.Now().Add(validDuration).Unix() // 有效时间戳
|
||||
rInt = rand.Int() // 随机正整数
|
||||
objURL *url.URL
|
||||
)
|
||||
objURL, err = url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
authKey := fmt.Sprintf("%d-%d-%d-%x", ts, rInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
|
||||
objURL.Path, ts, rInt, uid, privateKey))))
|
||||
v := objURL.Query()
|
||||
v.Add("auth_key", authKey)
|
||||
objURL.RawQuery = v.Encode()
|
||||
return objURL.String(), nil
|
||||
}
|
149
drivers/123_share/driver.go
Normal file
149
drivers/123_share/driver.go
Normal file
@ -0,0 +1,149 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Pan123Share struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Pan123Share) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Init(ctx context.Context) error {
|
||||
// TODO login / refresh token
|
||||
//op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
// TODO return the files list, required
|
||||
files, err := d.getFiles(dir.GetID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
||||
return src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
// TODO return link of file, required
|
||||
if f, ok := file.(File); ok {
|
||||
//var resp DownResp
|
||||
var headers map[string]string
|
||||
if !utils.IsLocalIPAddr(args.IP) {
|
||||
headers = map[string]string{
|
||||
//"X-Real-IP": "1.1.1.1",
|
||||
"X-Forwarded-For": args.IP,
|
||||
}
|
||||
}
|
||||
data := base.Json{
|
||||
"shareKey": d.ShareKey,
|
||||
"SharePwd": d.SharePwd,
|
||||
"etag": f.Etag,
|
||||
"fileId": f.FileId,
|
||||
"s3keyFlag": f.S3KeyFlag,
|
||||
"size": f.Size,
|
||||
}
|
||||
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(data).SetHeaders(headers)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
|
||||
u, err := url.Parse(downloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nu := u.Query().Get("params")
|
||||
if nu != "" {
|
||||
du, _ := base64.StdEncoding.DecodeString(nu)
|
||||
u, err = url.Parse(string(du))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
u_ := u.String()
|
||||
log.Debug("download url: ", u_)
|
||||
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(res.String())
|
||||
link := model.Link{
|
||||
URL: u_,
|
||||
}
|
||||
log.Debugln("res code: ", res.StatusCode())
|
||||
if res.StatusCode() == 302 {
|
||||
link.URL = res.Header().Get("location")
|
||||
} else if res.StatusCode() < 300 {
|
||||
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
|
||||
}
|
||||
link.Header = http.Header{
|
||||
"Referer": []string{"https://www.123pan.com/"},
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't convert obj")
|
||||
}
|
||||
|
||||
func (d *Pan123Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
// TODO create folder, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO move obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
// TODO rename obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Remove(ctx context.Context, obj model.Obj) error {
|
||||
// TODO remove obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// TODO upload file, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
//func (d *Pan123Share) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Pan123Share)(nil)
|
34
drivers/123_share/meta.go
Normal file
34
drivers/123_share/meta.go
Normal file
@ -0,0 +1,34 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
ShareKey string `json:"sharekey" required:"true"`
|
||||
SharePwd string `json:"sharepassword" required:"true"`
|
||||
driver.RootID
|
||||
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "123PanShare",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: false,
|
||||
NoCache: false,
|
||||
NoUpload: true,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "0",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123Share{}
|
||||
})
|
||||
}
|
99
drivers/123_share/types.go
Normal file
99
drivers/123_share/types.go
Normal file
@ -0,0 +1,99 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
FileName string `json:"FileName"`
|
||||
Size int64 `json:"Size"`
|
||||
UpdateAt time.Time `json:"UpdateAt"`
|
||||
FileId int64 `json:"FileId"`
|
||||
Type int `json:"Type"`
|
||||
Etag string `json:"Etag"`
|
||||
S3KeyFlag string `json:"S3KeyFlag"`
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (f File) GetSize() int64 {
|
||||
return f.Size
|
||||
}
|
||||
|
||||
func (f File) GetName() string {
|
||||
return f.FileName
|
||||
}
|
||||
|
||||
func (f File) ModTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
return f.Type == 1
|
||||
}
|
||||
|
||||
func (f File) GetID() string {
|
||||
return strconv.FormatInt(f.FileId, 10)
|
||||
}
|
||||
|
||||
func (f File) Thumb() string {
|
||||
if f.DownloadUrl == "" {
|
||||
return ""
|
||||
}
|
||||
du, err := url.Parse(f.DownloadUrl)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
|
||||
query := du.Query()
|
||||
query.Set("w", "70")
|
||||
query.Set("h", "70")
|
||||
if !query.Has("type") {
|
||||
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
|
||||
}
|
||||
if !query.Has("trade_key") {
|
||||
query.Set("trade_key", "123pan-thumbnail")
|
||||
}
|
||||
du.RawQuery = query.Encode()
|
||||
return du.String()
|
||||
}
|
||||
|
||||
var _ model.Obj = (*File)(nil)
|
||||
var _ model.Thumb = (*File)(nil)
|
||||
|
||||
//func (f File) Thumb() string {
|
||||
//
|
||||
//}
|
||||
//var _ model.Thumb = (*File)(nil)
|
||||
|
||||
type Files struct {
|
||||
//BaseResp
|
||||
Data struct {
|
||||
InfoList []File `json:"InfoList"`
|
||||
Next string `json:"Next"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
//type DownResp struct {
|
||||
// //BaseResp
|
||||
// Data struct {
|
||||
// DownloadUrl string `json:"DownloadUrl"`
|
||||
// } `json:"data"`
|
||||
//}
|
81
drivers/123_share/util.go
Normal file
81
drivers/123_share/util.go
Normal file
@ -0,0 +1,81 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
const (
|
||||
Api = "https://www.123pan.com/api"
|
||||
AApi = "https://www.123pan.com/a/api"
|
||||
BApi = "https://www.123pan.com/b/api"
|
||||
MainApi = Api
|
||||
FileList = MainApi + "/share/get"
|
||||
DownloadInfo = MainApi + "/share/download/info"
|
||||
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
|
||||
)
|
||||
|
||||
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"origin": "https://www.123pan.com",
|
||||
"referer": "https://www.123pan.com/",
|
||||
"user-agent": "Dart/2.19(dart:io)",
|
||||
"platform": "android",
|
||||
"app-version": "36",
|
||||
})
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body := res.Body()
|
||||
code := utils.Json.Get(body, "code").ToInt()
|
||||
if code != 0 {
|
||||
return nil, errors.New(jsoniter.Get(body, "message").ToString())
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
|
||||
page := 1
|
||||
res := make([]File, 0)
|
||||
for {
|
||||
var resp Files
|
||||
query := map[string]string{
|
||||
"limit": "100",
|
||||
"next": "0",
|
||||
"orderBy": d.OrderBy,
|
||||
"orderDirection": d.OrderDirection,
|
||||
"parentFileId": parentId,
|
||||
"Page": strconv.Itoa(page),
|
||||
"shareKey": d.ShareKey,
|
||||
"SharePwd": d.SharePwd,
|
||||
}
|
||||
_, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
|
||||
req.SetQueryParams(query)
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
page++
|
||||
res = append(res, resp.Data.InfoList...)
|
||||
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// do others that not defined in Driver interface
|
@ -103,9 +103,9 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.isFamily() {
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var contentInfoList []string
|
||||
var catalogInfoList []string
|
||||
@ -131,7 +131,10 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
|
||||
_, err := d.post(pathname, data, nil)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
@ -300,6 +303,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
|
||||
var partSize = getPartSize(stream.GetSize())
|
||||
part := (stream.GetSize() + partSize - 1) / partSize
|
||||
if part == 0 {
|
||||
part = 1
|
||||
}
|
||||
for i := int64(0); i < part; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
@ -331,13 +337,11 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = res.Body.Close()
|
||||
log.Debugf("%+v", res)
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||
}
|
||||
|
||||
res.Body.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -10,7 +10,7 @@ type Catalog struct {
|
||||
CatalogID string `json:"catalogID"`
|
||||
CatalogName string `json:"catalogName"`
|
||||
//CatalogType int `json:"catalogType"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
UpdateTime string `json:"updateTime"`
|
||||
//IsShared bool `json:"isShared"`
|
||||
//CatalogLevel int `json:"catalogLevel"`
|
||||
@ -63,7 +63,7 @@ type Content struct {
|
||||
//ParentCatalogID string `json:"parentCatalogId"`
|
||||
//Channel string `json:"channel"`
|
||||
//GeoLocFlag string `json:"geoLocFlag"`
|
||||
//Digest string `json:"digest"`
|
||||
Digest string `json:"digest"`
|
||||
//Version string `json:"version"`
|
||||
//FileEtag string `json:"fileEtag"`
|
||||
//FileVersion string `json:"fileVersion"`
|
||||
@ -141,7 +141,7 @@ type CloudContent struct {
|
||||
//ContentSuffix string `json:"contentSuffix"`
|
||||
ContentSize int64 `json:"contentSize"`
|
||||
//ContentDesc string `json:"contentDesc"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
//Shottime interface{} `json:"shottime"`
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
ThumbnailURL string `json:"thumbnailURL"`
|
||||
@ -165,7 +165,7 @@ type CloudCatalog struct {
|
||||
CatalogID string `json:"catalogID"`
|
||||
CatalogName string `json:"catalogName"`
|
||||
//CloudID string `json:"cloudID"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
//Creator string `json:"creator"`
|
||||
//CreatorNickname string `json:"creatorNickname"`
|
||||
|
@ -42,13 +42,13 @@ func calSign(body, ts, randStr string) string {
|
||||
sort.Strings(strs)
|
||||
body = strings.Join(strs, "")
|
||||
body = base64.StdEncoding.EncodeToString([]byte(body))
|
||||
res := utils.GetMD5Encode(body) + utils.GetMD5Encode(ts+":"+randStr)
|
||||
res = strings.ToUpper(utils.GetMD5Encode(res))
|
||||
res := utils.GetMD5EncodeStr(body) + utils.GetMD5EncodeStr(ts+":"+randStr)
|
||||
res = strings.ToUpper(utils.GetMD5EncodeStr(res))
|
||||
return res
|
||||
}
|
||||
|
||||
func getTime(t string) time.Time {
|
||||
stamp, _ := time.ParseInLocation("20060102150405", t, time.Local)
|
||||
stamp, _ := time.ParseInLocation("20060102150405", t, utils.CNLoc)
|
||||
return stamp
|
||||
}
|
||||
|
||||
@ -139,6 +139,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: catalog.CatalogName,
|
||||
Size: 0,
|
||||
Modified: getTime(catalog.UpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
IsFolder: true,
|
||||
}
|
||||
files = append(files, &f)
|
||||
@ -150,6 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: content.ContentName,
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.UpdateTime),
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
@ -202,6 +204,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Size: 0,
|
||||
IsFolder: true,
|
||||
Modified: getTime(catalog.LastUpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@ -212,6 +215,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: content.ContentName,
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.LastUpdateTime),
|
||||
Ctime: getTime(content.CreateTime),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
|
@ -385,7 +385,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||
sliceMd5 := fileMd5
|
||||
if file.GetSize() > DEFAULT {
|
||||
sliceMd5 = utils.GetMD5Encode(strings.Join(md5s, "\n"))
|
||||
sliceMd5 = utils.GetMD5EncodeStr(strings.Join(md5s, "\n"))
|
||||
}
|
||||
res, err = d.uploadRequest("/person/commitMultiUploadFile", map[string]string{
|
||||
"uploadFileId": uploadFileId,
|
||||
|
@ -3,10 +3,13 @@ package _189pc
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
@ -22,10 +25,17 @@ type Cloud189PC struct {
|
||||
|
||||
loginParam *LoginParam
|
||||
tokenInfo *AppSessionResp
|
||||
|
||||
uploadThread int
|
||||
|
||||
storageConfig driver.Config
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Config() driver.Config {
|
||||
return config
|
||||
if y.storageConfig.Name == "" {
|
||||
y.storageConfig = config
|
||||
}
|
||||
return y.storageConfig
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||
@ -33,6 +43,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
// 兼容旧上传接口
|
||||
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
||||
|
||||
// 处理个人云和家庭云参数
|
||||
if y.isFamily() && y.RootFolderID == "-11" {
|
||||
y.RootFolderID = ""
|
||||
@ -42,6 +55,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
y.FamilyID = ""
|
||||
}
|
||||
|
||||
// 限制上传线程数
|
||||
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
|
||||
if y.uploadThread < 1 || y.uploadThread > 32 {
|
||||
y.uploadThread, y.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
// 初始化请求客户端
|
||||
if y.client == nil {
|
||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||
@ -51,7 +70,7 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// 避免重复登陆
|
||||
identity := utils.GetMD5Encode(y.Username + y.Password)
|
||||
identity := utils.GetMD5EncodeStr(y.Username + y.Password)
|
||||
if !y.isLogin() || y.identity != identity {
|
||||
y.identity = identity
|
||||
if err = y.login(); err != nil {
|
||||
@ -107,10 +126,11 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
||||
|
||||
// 重定向获取真实链接
|
||||
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&", "&"), "http://", "https://", 1)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).Get(downloadUrl.URL)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).SetDoNotParseResponse(true).Get(downloadUrl.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.RawBody().Close()
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl.URL = res.Header().Get("location")
|
||||
}
|
||||
@ -135,13 +155,14 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
||||
return like, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
fullUrl := API_URL
|
||||
if y.isFamily() {
|
||||
fullUrl += "/family/file"
|
||||
}
|
||||
fullUrl += "/createFolder.action"
|
||||
|
||||
var newFolder Cloud189Folder
|
||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetQueryParams(map[string]string{
|
||||
@ -158,11 +179,15 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
|
||||
"parentFolderId": parentDir.GetID(),
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
return err
|
||||
}, &newFolder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &newFolder, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp CreateBatchTaskResp
|
||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetFormData(map[string]string{
|
||||
@ -182,11 +207,17 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
"familyId": y.FamilyID,
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
return err
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
queryParam := make(map[string]string)
|
||||
fullUrl := API_URL
|
||||
method := http.MethodPost
|
||||
@ -195,23 +226,34 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
|
||||
method = http.MethodGet
|
||||
queryParam["familyId"] = y.FamilyID
|
||||
}
|
||||
if srcObj.IsDir() {
|
||||
fullUrl += "/renameFolder.action"
|
||||
queryParam["folderId"] = srcObj.GetID()
|
||||
queryParam["destFolderName"] = newName
|
||||
} else {
|
||||
|
||||
var newObj model.Obj
|
||||
switch f := srcObj.(type) {
|
||||
case *Cloud189File:
|
||||
fullUrl += "/renameFile.action"
|
||||
queryParam["fileId"] = srcObj.GetID()
|
||||
queryParam["destFileName"] = newName
|
||||
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
|
||||
case *Cloud189Folder:
|
||||
fullUrl += "/renameFolder.action"
|
||||
queryParam["folderId"] = srcObj.GetID()
|
||||
queryParam["destFolderName"] = newName
|
||||
newObj = &Cloud189Folder{}
|
||||
default:
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
_, err := y.request(fullUrl, method, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetQueryParams(queryParam)
|
||||
}, nil, nil)
|
||||
return err
|
||||
req.SetContext(ctx).SetQueryParams(queryParam)
|
||||
}, nil, newObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
var resp CreateBatchTaskResp
|
||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetFormData(map[string]string{
|
||||
@ -232,11 +274,15 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
"familyId": y.FamilyID,
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
return err
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return y.WaitBatchTask("COPY", resp.TaskID, time.Second)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||
var resp CreateBatchTaskResp
|
||||
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetFormData(map[string]string{
|
||||
@ -256,19 +302,33 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||
"familyId": y.FamilyID,
|
||||
})
|
||||
}
|
||||
}, nil)
|
||||
return err
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 批量任务数量限制,过快会导致无法删除
|
||||
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 响应时间长,按需启用
|
||||
if y.Addition.RapidUpload {
|
||||
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
}
|
||||
}
|
||||
|
||||
switch y.UploadMethod {
|
||||
case "stream":
|
||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
||||
case "old":
|
||||
return y.OldUpload(ctx, dstDir, stream, up)
|
||||
case "rapid":
|
||||
return y.FastUpload(ctx, dstDir, stream, up)
|
||||
case "stream":
|
||||
if stream.GetSize() == 0 {
|
||||
return y.FastUpload(ctx, dstDir, stream, up)
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
return y.CommonUpload(ctx, dstDir, stream, up)
|
||||
return y.StreamUpload(ctx, dstDir, stream, up)
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -83,6 +84,55 @@ func MustParseTime(str string) *time.Time {
|
||||
return &lastOpTime
|
||||
}
|
||||
|
||||
type Time time.Time
|
||||
|
||||
func (t *Time) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||
func (t *Time) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||
b, err := e.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b, ok := b.(xml.CharData); ok {
|
||||
if err = t.Unmarshal(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return e.Skip()
|
||||
}
|
||||
func (t *Time) Unmarshal(b []byte) error {
|
||||
bs := strings.Trim(string(b), "\"")
|
||||
var v time.Time
|
||||
var err error
|
||||
for _, f := range []string{"2006-01-02 15:04:05 -07", "Jan 2, 2006 15:04:05 PM -07"} {
|
||||
v, err = time.ParseInLocation(f, bs+" +08", time.Local)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
*t = Time(v)
|
||||
return err
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
func (t *String) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
|
||||
func (t *String) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
|
||||
b, err := e.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b, ok := b.(xml.CharData); ok {
|
||||
if err = t.Unmarshal(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return e.Skip()
|
||||
}
|
||||
func (s *String) Unmarshal(b []byte) error {
|
||||
*s = String(bytes.Trim(b, "\""))
|
||||
return nil
|
||||
}
|
||||
|
||||
func toFamilyOrderBy(o string) string {
|
||||
switch o {
|
||||
case "filename":
|
||||
@ -110,9 +160,8 @@ func toDesc(o string) string {
|
||||
func ParseHttpHeader(str string) map[string]string {
|
||||
header := make(map[string]string)
|
||||
for _, value := range strings.Split(str, "&") {
|
||||
i := strings.Index(value, "=")
|
||||
if i > 0 {
|
||||
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
|
||||
if k, v, found := strings.Cut(value, "="); found {
|
||||
header[k] = v
|
||||
}
|
||||
}
|
||||
return header
|
||||
@ -122,10 +171,6 @@ func MustString(str string, err error) string {
|
||||
return str
|
||||
}
|
||||
|
||||
func MustToBytes(b []byte, err error) []byte {
|
||||
return b
|
||||
}
|
||||
|
||||
func BoolToNumber(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||
FamilyID string `json:"family_id"`
|
||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
NoUseOcr bool `json:"no_use_ocr"`
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@ package _189pc
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -151,8 +152,13 @@ type FamilyInfoResp struct {
|
||||
/*文件部分*/
|
||||
// 文件
|
||||
type Cloud189File struct {
|
||||
CreateDate string `json:"createDate"`
|
||||
FileCata int64 `json:"fileCata"`
|
||||
ID String `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Md5 string `json:"md5"`
|
||||
|
||||
LastOpTime Time `json:"lastOpTime"`
|
||||
CreateDate Time `json:"createDate"`
|
||||
Icon struct {
|
||||
//iconOption 5
|
||||
SmallUrl string `json:"smallUrl"`
|
||||
@ -162,62 +168,60 @@ type Cloud189File struct {
|
||||
Max600 string `json:"max600"`
|
||||
MediumURL string `json:"mediumUrl"`
|
||||
} `json:"icon"`
|
||||
ID int64 `json:"id"`
|
||||
LastOpTime string `json:"lastOpTime"`
|
||||
Md5 string `json:"md5"`
|
||||
MediaType int `json:"mediaType"`
|
||||
Name string `json:"name"`
|
||||
Orientation int64 `json:"orientation"`
|
||||
Rev string `json:"rev"`
|
||||
Size int64 `json:"size"`
|
||||
StarLabel int64 `json:"starLabel"`
|
||||
|
||||
parseTime *time.Time
|
||||
// Orientation int64 `json:"orientation"`
|
||||
// FileCata int64 `json:"fileCata"`
|
||||
// MediaType int `json:"mediaType"`
|
||||
// Rev string `json:"rev"`
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||
func (c *Cloud189File) GetName() string { return c.Name }
|
||||
func (c *Cloud189File) ModTime() time.Time {
|
||||
if c.parseTime == nil {
|
||||
c.parseTime = MustParseTime(c.LastOpTime)
|
||||
}
|
||||
return *c.parseTime
|
||||
func (c *Cloud189File) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
func (c *Cloud189File) IsDir() bool { return false }
|
||||
func (c *Cloud189File) GetID() string { return fmt.Sprint(c.ID) }
|
||||
func (c *Cloud189File) GetPath() string { return "" }
|
||||
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
||||
|
||||
func (c *Cloud189File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||
func (c *Cloud189File) GetName() string { return c.Name }
|
||||
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
func (c *Cloud189File) IsDir() bool { return false }
|
||||
func (c *Cloud189File) GetID() string { return string(c.ID) }
|
||||
func (c *Cloud189File) GetPath() string { return "" }
|
||||
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
|
||||
|
||||
// 文件夹
|
||||
type Cloud189Folder struct {
|
||||
ID int64 `json:"id"`
|
||||
ID String `json:"id"`
|
||||
ParentID int64 `json:"parentId"`
|
||||
Name string `json:"name"`
|
||||
|
||||
FileCata int64 `json:"fileCata"`
|
||||
FileCount int64 `json:"fileCount"`
|
||||
LastOpTime Time `json:"lastOpTime"`
|
||||
CreateDate Time `json:"createDate"`
|
||||
|
||||
LastOpTime string `json:"lastOpTime"`
|
||||
CreateDate string `json:"createDate"`
|
||||
|
||||
FileListSize int64 `json:"fileListSize"`
|
||||
Rev string `json:"rev"`
|
||||
StarLabel int64 `json:"starLabel"`
|
||||
|
||||
parseTime *time.Time
|
||||
// FileListSize int64 `json:"fileListSize"`
|
||||
// FileCount int64 `json:"fileCount"`
|
||||
// FileCata int64 `json:"fileCata"`
|
||||
// Rev string `json:"rev"`
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||
func (c *Cloud189Folder) ModTime() time.Time {
|
||||
if c.parseTime == nil {
|
||||
c.parseTime = MustParseTime(c.LastOpTime)
|
||||
}
|
||||
return *c.parseTime
|
||||
func (c *Cloud189Folder) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
func (c *Cloud189Folder) IsDir() bool { return true }
|
||||
func (c *Cloud189Folder) GetID() string { return fmt.Sprint(c.ID) }
|
||||
func (c *Cloud189Folder) GetPath() string { return "" }
|
||||
|
||||
func (c *Cloud189Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
func (c *Cloud189Folder) IsDir() bool { return true }
|
||||
func (c *Cloud189Folder) GetID() string { return string(c.ID) }
|
||||
func (c *Cloud189Folder) GetPath() string { return "" }
|
||||
|
||||
type Cloud189FilesResp struct {
|
||||
//ResCode int `json:"res_code"`
|
||||
@ -252,14 +256,25 @@ type InitMultiUploadResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
type UploadUrlsResp struct {
|
||||
Code string `json:"code"`
|
||||
UploadUrls map[string]Part `json:"uploadUrls"`
|
||||
Code string `json:"code"`
|
||||
Data map[string]UploadUrlsData `json:"uploadUrls"`
|
||||
}
|
||||
type Part struct {
|
||||
type UploadUrlsData struct {
|
||||
RequestURL string `json:"requestURL"`
|
||||
RequestHeader string `json:"requestHeader"`
|
||||
}
|
||||
|
||||
type UploadUrlInfo struct {
|
||||
PartNumber int
|
||||
Headers map[string]string
|
||||
UploadUrlsData
|
||||
}
|
||||
|
||||
type UploadProgress struct {
|
||||
UploadInfo InitMultiUploadResp
|
||||
UploadParts []string
|
||||
}
|
||||
|
||||
/* 第二种上传方式 */
|
||||
type CreateUploadFileResp struct {
|
||||
// 上传文件请求ID
|
||||
@ -284,15 +299,60 @@ func (r *GetUploadFileStatusResp) GetSize() int64 {
|
||||
return r.DataSize + r.Size
|
||||
}
|
||||
|
||||
type CommitUploadFileResp struct {
|
||||
type CommitMultiUploadFileResp struct {
|
||||
File struct {
|
||||
UserFileID String `json:"userFileId"`
|
||||
FileName string `json:"fileName"`
|
||||
FileSize int64 `json:"fileSize"`
|
||||
FileMd5 string `json:"fileMd5"`
|
||||
CreateDate Time `json:"createDate"`
|
||||
} `json:"file"`
|
||||
}
|
||||
|
||||
func (f *CommitMultiUploadFileResp) toFile() *Cloud189File {
|
||||
return &Cloud189File{
|
||||
ID: f.File.UserFileID,
|
||||
Name: f.File.FileName,
|
||||
Size: f.File.FileSize,
|
||||
Md5: f.File.FileMd5,
|
||||
LastOpTime: f.File.CreateDate,
|
||||
CreateDate: f.File.CreateDate,
|
||||
}
|
||||
}
|
||||
|
||||
type OldCommitUploadFileResp struct {
|
||||
XMLName xml.Name `xml:"file"`
|
||||
Id string `xml:"id"`
|
||||
ID String `xml:"id"`
|
||||
Name string `xml:"name"`
|
||||
Size string `xml:"size"`
|
||||
Size int64 `xml:"size"`
|
||||
Md5 string `xml:"md5"`
|
||||
CreateDate string `xml:"createDate"`
|
||||
Rev string `xml:"rev"`
|
||||
UserId string `xml:"userId"`
|
||||
CreateDate Time `xml:"createDate"`
|
||||
}
|
||||
|
||||
func (f *OldCommitUploadFileResp) toFile() *Cloud189File {
|
||||
return &Cloud189File{
|
||||
ID: f.ID,
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Md5: f.Md5,
|
||||
CreateDate: f.CreateDate,
|
||||
LastOpTime: f.CreateDate,
|
||||
}
|
||||
}
|
||||
|
||||
type CreateBatchTaskResp struct {
|
||||
TaskID string `json:"taskId"`
|
||||
}
|
||||
|
||||
type BatchTaskStateResp struct {
|
||||
FailedCount int `json:"failedCount"`
|
||||
Process int `json:"process"`
|
||||
SkipCount int `json:"skipCount"`
|
||||
SubTaskCount int `json:"subTaskCount"`
|
||||
SuccessedCount int `json:"successedCount"`
|
||||
SuccessedFileIDList []int64 `json:"successedFileIdList"`
|
||||
TaskID string `json:"taskId"`
|
||||
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中,4 完成
|
||||
}
|
||||
|
||||
/* query 加密参数*/
|
||||
|
@ -13,8 +13,9 @@ import (
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -24,6 +25,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
@ -268,7 +270,7 @@ func (y *Cloud189PC) login() (err error) {
|
||||
"validateCode": y.VCode,
|
||||
"captchaToken": param.CaptchaToken,
|
||||
"returnUrl": RETURN_URL,
|
||||
"mailSuffix": "@189.cn",
|
||||
// "mailSuffix": "@189.cn",
|
||||
"dynamicCheck": "FALSE",
|
||||
"clientType": CLIENT_TYPE,
|
||||
"cb_SaveName": "1",
|
||||
@ -434,15 +436,20 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
}
|
||||
|
||||
// 普通上传
|
||||
func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
// 无法上传大小为0的文件
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastPartSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastPartSize == 0 {
|
||||
lastPartSize = sliceSize
|
||||
}
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"lazyCheck": "1",
|
||||
}
|
||||
|
||||
@ -457,72 +464,71 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
|
||||
// 初始化上传
|
||||
var initMultiUpload InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &initMultiUpload)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
|
||||
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
byteData := make([]byte, sliceSize)
|
||||
if i == count {
|
||||
byteData = byteData[:lastPartSize]
|
||||
}
|
||||
|
||||
// 读取块
|
||||
byteData.Reset()
|
||||
silceMd5.Reset()
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
|
||||
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
|
||||
return err
|
||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
// 获取上传链接
|
||||
var uploadUrl UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
|
||||
"uploadFileId": initMultiUpload.Data.UploadFileID,
|
||||
}, &uploadUrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 开始上传
|
||||
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(i * 100 / count))
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / count)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
// 提交上传
|
||||
var resp CommitMultiUploadFileResp
|
||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
@ -533,199 +539,240 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
"lazyCheck": "1",
|
||||
"isLog": "0",
|
||||
"opertype": "3",
|
||||
}, nil)
|
||||
return err
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(fileMd5) < utils.MD5.Width {
|
||||
return nil, errors.New("invalid hash")
|
||||
}
|
||||
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uploadInfo.FileDataExists != 1 {
|
||||
return nil, errors.New("rapid upload fail")
|
||||
}
|
||||
|
||||
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
|
||||
}
|
||||
|
||||
// 快传
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastSliceSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastSliceSize == 0 {
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
// 优先计算所需信息
|
||||
//step.1 优先计算所需信息
|
||||
byteSize := sliceSize
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
silceMd5Base64s := make([]string, 0, count)
|
||||
partInfos := make([]string, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
if i == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
silceMd5.Reset()
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return err
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
md5Byte := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5Encode(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
// 检测是否支持快传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.Data.FileDataExists != 1 {
|
||||
var uploadUrls UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"partInfo": strings.Join(silceMd5Base64s, ","),
|
||||
}, &uploadUrls)
|
||||
// 尝试恢复进度
|
||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
if !ok {
|
||||
//step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
}
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
}
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, DEFAULT)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
n, err := io.ReadFull(tempFile, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
up(int(i * 100 / count))
|
||||
uploadProgress = &UploadProgress{
|
||||
UploadInfo: uploadInfo,
|
||||
UploadParts: partInfos,
|
||||
}
|
||||
}
|
||||
|
||||
// 提交
|
||||
uploadInfo := uploadProgress.UploadInfo.Data
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.FileDataExists != 1 {
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, uploadPart := range uploadProgress.UploadParts {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
i, uploadPart := i, uploadPart
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
// step.3 获取上传链接
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadUrl := uploadUrls[0]
|
||||
|
||||
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
|
||||
if uploadUrl.PartNumber == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
up(int(threadG.Success()) * 100 / len(uploadUrls))
|
||||
uploadProgress.UploadParts[i] = ""
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// step.5 提交
|
||||
var resp CommitMultiUploadFileResp
|
||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"uploadFileId": uploadInfo.UploadFileID,
|
||||
"isLog": "0",
|
||||
"opertype": "3",
|
||||
}, nil)
|
||||
return err
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
|
||||
if err != nil {
|
||||
return err
|
||||
// 获取上传切片信息
|
||||
// 对http body有大小限制,分片信息太多会出错
|
||||
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
fullUrl += "/person"
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
// 计算md5
|
||||
fileMd5 := md5.New()
|
||||
if _, err := io.Copy(fileMd5, tempFile); err != nil {
|
||||
return err
|
||||
var uploadUrlsResp UploadUrlsResp
|
||||
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadFileId,
|
||||
"partInfo": strings.Join(partInfo, ","),
|
||||
}, &uploadUrlsResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
uploadUrls := uploadUrlsResp.Data
|
||||
|
||||
if len(uploadUrls) != len(partInfo) {
|
||||
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
|
||||
}
|
||||
|
||||
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
|
||||
for k, uploadUrl := range uploadUrls {
|
||||
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
|
||||
PartNumber: partNumber,
|
||||
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
|
||||
UploadUrlsData: uploadUrl,
|
||||
})
|
||||
}
|
||||
sort.Slice(uploadUrlInfos, func(i, j int) bool {
|
||||
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
|
||||
})
|
||||
return uploadUrlInfos, nil
|
||||
}
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
|
||||
// 创建上传会话
|
||||
var uploadInfo CreateUploadFileResp
|
||||
|
||||
fullUrl := API_URL + "/createUploadFile.action"
|
||||
if y.isFamily() {
|
||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||
}
|
||||
_, err = y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"fileMd5": fileMd5Hex,
|
||||
"fileName": file.GetName(),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"parentId": dstDir.GetID(),
|
||||
"resumePolicy": "1",
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": file.GetName(),
|
||||
"size": fmt.Sprint(file.GetSize()),
|
||||
"md5": fileMd5Hex,
|
||||
"opertype": "3",
|
||||
"flag": "1",
|
||||
"resumePolicy": "1",
|
||||
"isLog": "0",
|
||||
// "baseFileId": "",
|
||||
// "lastWrite":"",
|
||||
// "localPath": strings.ReplaceAll(param.LocalPath, "\\", "/"),
|
||||
// "fileExt": "",
|
||||
})
|
||||
}
|
||||
}, &uploadInfo)
|
||||
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
|
||||
for status.Size < file.GetSize() && status.FileDataExists != 1 {
|
||||
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
||||
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
header := map[string]string{
|
||||
@ -742,7 +789,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
|
||||
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
|
||||
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 获取断点状态
|
||||
@ -760,35 +807,80 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
}
|
||||
}, &status)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
up(int(status.Size / file.GetSize()))
|
||||
up(int(status.GetSize()/file.GetSize()) * 100)
|
||||
}
|
||||
|
||||
// 提交
|
||||
var resp CommitUploadFileResp
|
||||
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
|
||||
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
|
||||
}
|
||||
|
||||
// 创建上传会话
|
||||
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
|
||||
var uploadInfo CreateUploadFileResp
|
||||
|
||||
fullUrl := API_URL + "/createUploadFile.action"
|
||||
if y.isFamily() {
|
||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||
}
|
||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"parentId": parentID,
|
||||
"fileMd5": fileMd5,
|
||||
"fileName": fileName,
|
||||
"fileSize": fileSize,
|
||||
"resumePolicy": "1",
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"parentFolderId": parentID,
|
||||
"fileName": fileName,
|
||||
"size": fileSize,
|
||||
"md5": fileMd5,
|
||||
"opertype": "3",
|
||||
"flag": "1",
|
||||
"resumePolicy": "1",
|
||||
"isLog": "0",
|
||||
})
|
||||
}
|
||||
}, &uploadInfo)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &uploadInfo, nil
|
||||
}
|
||||
|
||||
// 提交上传文件
|
||||
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
|
||||
var resp OldCommitUploadFileResp
|
||||
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetHeaders(map[string]string{
|
||||
"ResumePolicy": "1",
|
||||
"UploadFileId": fmt.Sprint(status.UploadFileId),
|
||||
"UploadFileId": fmt.Sprint(uploadFileID),
|
||||
"FamilyId": fmt.Sprint(y.FamilyID),
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"opertype": "3",
|
||||
"resumePolicy": "1",
|
||||
"uploadFileId": fmt.Sprint(status.UploadFileId),
|
||||
"uploadFileId": fmt.Sprint(uploadFileID),
|
||||
"isLog": "0",
|
||||
})
|
||||
}
|
||||
}, &resp)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) isFamily() bool {
|
||||
@ -829,3 +921,33 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
|
||||
}
|
||||
return fmt.Sprint(infos[0].FamilyID), nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
|
||||
var resp BatchTaskStateResp
|
||||
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"type": aType,
|
||||
"taskId": taskID,
|
||||
})
|
||||
}, &resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
|
||||
for {
|
||||
state, err := y.CheckBatchTask(aType, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch state.TaskStatus {
|
||||
case 2:
|
||||
return errors.New("there is a conflict with the target object")
|
||||
case 4:
|
||||
return nil
|
||||
}
|
||||
time.Sleep(t)
|
||||
}
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package alist_v3
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
SetHeader("Password", d.MetaPassword).
|
||||
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetContentLength(true).
|
||||
SetBody(stream.GetReadCloser())
|
||||
SetBody(io.ReadCloser(stream))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -18,9 +18,11 @@ type ObjResp struct {
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
@ -67,7 +68,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
// init deviceID
|
||||
deviceID := utils.GetSHA256Encode(d.UserID)
|
||||
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
|
||||
// init privateKey
|
||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||
state := State{
|
||||
@ -163,14 +164,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := model.FileStream{
|
||||
Obj: stream,
|
||||
ReadCloser: stream,
|
||||
Mimetype: stream.GetMimetype(),
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := stream.FileStream{
|
||||
Obj: streamer,
|
||||
Reader: streamer,
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
@ -187,25 +188,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
}
|
||||
|
||||
var localFile *os.File
|
||||
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
|
||||
localFile, _ = fileStream.ReadCloser.(*os.File)
|
||||
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
|
||||
localFile, _ = fileStream.Reader.(*os.File)
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
io.CopyN(buf, file, 1024)
|
||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.String())
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// 把头部拼接回去
|
||||
file.ReadCloser = struct {
|
||||
file.Reader = struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, file),
|
||||
Closer: file,
|
||||
Closer: &file,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -259,7 +260,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
(t.file.slice(o.toNumber(), Math.min(o.plus(8).toNumber(), t.file.size)))
|
||||
*/
|
||||
buf := make([]byte, 8)
|
||||
r, _ := new(big.Int).SetString(utils.GetMD5Encode(d.AccessToken)[:16], 16)
|
||||
r, _ := new(big.Int).SetString(utils.GetMD5EncodeStr(d.AccessToken)[:16], 16)
|
||||
i := new(big.Int).SetInt64(file.GetSize())
|
||||
o := new(big.Int).SetInt64(0)
|
||||
if file.GetSize() > 0 {
|
||||
@ -281,7 +282,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
file.ReadCloser = localFile
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
|
@ -2,12 +2,12 @@ package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/rateg"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -36,13 +36,25 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Init(ctx context.Context) error {
|
||||
if d.LIVPDownloadFormat == "" {
|
||||
d.LIVPDownloadFormat = "jpeg"
|
||||
}
|
||||
if d.DriveType == "" {
|
||||
d.DriveType = "default"
|
||||
}
|
||||
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
|
||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
||||
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -75,6 +87,12 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
|
||||
return nil, err
|
||||
}
|
||||
url := utils.Json.Get(res, "url").ToString()
|
||||
if url == "" {
|
||||
if utils.Ext(file.GetName()) != "livp" {
|
||||
return nil, errors.New("get download url failed: " + string(res))
|
||||
}
|
||||
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
|
||||
}
|
||||
exp := time.Hour
|
||||
return &model.Link{
|
||||
URL: url,
|
||||
@ -89,7 +107,9 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
nowTime, _ := getNowTime()
|
||||
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -97,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
||||
"name": dirName,
|
||||
"type": "folder",
|
||||
"check_name_mode": "refuse",
|
||||
})
|
||||
}).SetResult(&newDir)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -110,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
|
||||
"to_parent_file_id": dstDir.GetID(),
|
||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||
})
|
||||
}).SetResult(&resp)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Exist {
|
||||
return nil, errors.New("existence of files with the same name")
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.ID = resp.FileID
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
"name": newName,
|
||||
})
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -152,75 +192,8 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
// rapid_upload is not currently supported
|
||||
// 1. create
|
||||
// Part Size Unit: Bytes, Default: 20MB,
|
||||
// Maximum number of slices 10,000, ≈195.3125GB
|
||||
var partSize int64 = 20 * 1024 * 1024
|
||||
createData := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
}
|
||||
count := 1
|
||||
if stream.GetSize() > partSize {
|
||||
if stream.GetSize() > 1*1024*1024*1024*1024 { // file Size over 1TB
|
||||
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
||||
} else if stream.GetSize() > 768*1024*1024*1024 { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if stream.GetSize() > 512*1024*1024*1024 { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if stream.GetSize() > 384*1024*1024*1024 { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if stream.GetSize() > 256*1024*1024*1024 { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if stream.GetSize() > 128*1024*1024*1024 { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
count = int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||
createData["part_info_list"] = makePartInfos(count)
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 2. upload
|
||||
preTime := time.Now()
|
||||
for i := 1; i <= len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
err = d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
up(i * 100 / count)
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
}
|
||||
// 3. complete
|
||||
_, err = d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": createResp.FileId,
|
||||
"upload_id": createResp.UploadId,
|
||||
})
|
||||
})
|
||||
return err
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.upload(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
@ -248,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||
|
@ -6,16 +6,19 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
|
||||
driver.RootID
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
||||
AccessToken string
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||
RapidUpload bool `json:"rapid_upload" help:"If you enable this option, the file will be uploaded to the server first, so the progress will be incorrect"`
|
||||
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
|
||||
LIVPDownloadFormat string `json:"livp_download_format" type:"select" options:"jpeg,mov" default:"jpeg"`
|
||||
AccessToken string
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,6 +1,7 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -17,22 +18,28 @@ type Files struct {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt *time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
|
||||
// create only
|
||||
FileName string `json:"file_name"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.Name == "" {
|
||||
f.Name = f.FileName
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.FileId,
|
||||
@ -40,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
Ctime: f.CreatedAt,
|
||||
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
}
|
||||
@ -67,3 +76,9 @@ type CreateResp struct {
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
PartInfoList []PartInfo `json:"part_info_list"`
|
||||
}
|
||||
|
||||
type MoveOrCopyResp struct {
|
||||
Exist bool `json:"exist"`
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
269
drivers/aliyundrive_open/upload.go
Normal file
269
drivers/aliyundrive_open/upload.go
Normal file
@ -0,0 +1,269 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func makePartInfos(size int) []base.Json {
|
||||
partInfoList := make([]base.Json, size)
|
||||
for i := 0; i < size; i++ {
|
||||
partInfoList[i] = base.Json{"part_number": 1 + i}
|
||||
}
|
||||
return partInfoList
|
||||
}
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
var partSize int64 = 20 * utils.MB
|
||||
if fileSize > partSize {
|
||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||
partSize = 5 * utils.GB // file part size 5GB
|
||||
} else if fileSize > 768*utils.GB { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if fileSize > 512*utils.GB { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if fileSize > 384*utils.GB { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if fileSize > 256*utils.GB { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if fileSize > 128*utils.GB { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
}
|
||||
return partSize
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
partInfoList := makePartInfos(count)
|
||||
var resp CreateResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
"part_info_list": partInfoList,
|
||||
"upload_id": uploadId,
|
||||
}).SetResult(&resp)
|
||||
})
|
||||
return resp.PartInfoList, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
||||
uploadUrl := partInfo.UploadUrl
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||
// 3. complete
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
"upload_id": uploadId,
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
type ProofRange struct {
|
||||
Start int64
|
||||
End int64
|
||||
}
|
||||
|
||||
func getProofRange(input string, size int64) (*ProofRange, error) {
|
||||
if size == 0 {
|
||||
return &ProofRange{}, nil
|
||||
}
|
||||
tmpStr := utils.GetMD5EncodeStr(input)[0:16]
|
||||
tmpInt, err := strconv.ParseUint(tmpStr, 16, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := tmpInt % uint64(size)
|
||||
pr := &ProofRange{
|
||||
Start: int64(index),
|
||||
End: int64(index) + 8,
|
||||
}
|
||||
if pr.End >= size {
|
||||
pr.End = size
|
||||
}
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
length := proofRange.End - proofRange.Start
|
||||
buf := bytes.NewBuffer(make([]byte, 0, length))
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = io.CopyN(buf, reader, length)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. create
|
||||
// Part Size Unit: Bytes, Default: 20MB,
|
||||
// Maximum number of slices 10,000, ≈195.3125GB
|
||||
var partSize = calPartSize(stream.GetSize())
|
||||
const dateFormat = "2006-01-02T15:04:05.000Z"
|
||||
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
|
||||
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||
|
||||
createData := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"local_modified_at": mtimeStr,
|
||||
"local_created_at": ctimeStr,
|
||||
}
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||
createData["part_info_list"] = makePartInfos(count)
|
||||
// rapid upload
|
||||
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||
if rapidUpload {
|
||||
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||
// read 1024 bytes to calculate pre hash
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createData["size"] = stream.GetSize()
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
var tmpF model.File
|
||||
if err != nil {
|
||||
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||
|
||||
hi := stream.GetHash()
|
||||
hash := hi.GetHash(utils.SHA1)
|
||||
if len(hash) <= 0 {
|
||||
tmpF, err = stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
delete(createData, "pre_hash")
|
||||
createData["proof_version"] = "v1"
|
||||
createData["content_hash_name"] = "sha1"
|
||||
createData["content_hash"] = hash
|
||||
createData["proof_code"], err = d.calProofCode(stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !createResp.RapidUpload {
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
//rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += partSize
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
}
|
||||
|
||||
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||
// 3. complete
|
||||
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||
}
|
@ -2,52 +2,102 @@ package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
url := d.base + "/oauth/access_token"
|
||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||
url = d.OauthTokenURL
|
||||
}
|
||||
var resp base.TokenResp
|
||||
//var resp base.TokenResp
|
||||
var e ErrResp
|
||||
_, err := base.RestyClient.R().
|
||||
ForceContentType("application/json").
|
||||
res, err := base.RestyClient.R().
|
||||
//ForceContentType("application/json").
|
||||
SetBody(base.Json{
|
||||
"client_id": d.ClientID,
|
||||
"client_secret": d.ClientSecret,
|
||||
"grant_type": "refresh_token",
|
||||
"refresh_token": d.RefreshToken,
|
||||
}).
|
||||
SetResult(&resp).
|
||||
//SetResult(&resp).
|
||||
SetError(&e).
|
||||
Post(url)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
log.Debugf("[ali_open] refresh token response: %s", res.String())
|
||||
if e.Code != "" {
|
||||
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
}
|
||||
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
|
||||
if refresh == "" {
|
||||
return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
|
||||
}
|
||||
curSub, err := getSub(d.RefreshToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
newSub, err := getSub(refresh)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if curSub != newSub {
|
||||
return "", "", errors.New("failed to refresh token: sub not match")
|
||||
}
|
||||
return refresh, access, nil
|
||||
}
|
||||
|
||||
func getSub(token string) (string, error) {
|
||||
segments := strings.Split(token, ".")
|
||||
if len(segments) != 3 {
|
||||
return "", errors.New("not a jwt token because of invalid segments")
|
||||
}
|
||||
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
|
||||
if err != nil {
|
||||
return "", errors.New("failed to decode jwt token")
|
||||
}
|
||||
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
refresh, access, err := d._refreshToken()
|
||||
for i := 0; i < 3; i++ {
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||
}
|
||||
refresh, access, err = d._refreshToken()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Code != "" {
|
||||
return fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
}
|
||||
if resp.RefreshToken == "" {
|
||||
return errors.New("failed to refresh token: refresh token is empty")
|
||||
}
|
||||
d.RefreshToken, d.AccessToken = resp.RefreshToken, resp.AccessToken
|
||||
log.Infof("[ali_open] toekn exchange: %s -> %s", d.RefreshToken, refresh)
|
||||
d.RefreshToken, d.AccessToken = refresh, access
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
||||
b, err, _ := d.requestReturnErrResp(uri, method, callback, retry...)
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base.ReqCallback, retry ...bool) ([]byte, error, *ErrResp) {
|
||||
req := base.RestyClient.R()
|
||||
// TODO check whether access_token is expired
|
||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
||||
@ -61,20 +111,23 @@ func (d *AliyundriveOpen) request(uri, method string, callback base.ReqCallback,
|
||||
req.SetError(&e)
|
||||
res, err := req.Execute(method, d.base+uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if res != nil {
|
||||
log.Errorf("[aliyundrive_open] request error: %s", res.String())
|
||||
}
|
||||
return nil, err, nil
|
||||
}
|
||||
isRetry := len(retry) > 0 && retry[0]
|
||||
if e.Code != "" {
|
||||
if !isRetry && (utils.SliceContains([]string{"AccessTokenInvalid", "AccessTokenExpired", "I400JD"}, e.Code) || d.AccessToken == "") {
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, err, nil
|
||||
}
|
||||
return d.request(uri, method, callback, true)
|
||||
return d.requestReturnErrResp(uri, method, callback, true)
|
||||
}
|
||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message)
|
||||
return nil, fmt.Errorf("%s:%s", e.Code, e.Message), &e
|
||||
}
|
||||
return res.Body(), nil
|
||||
return res.Body(), nil, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) list(ctx context.Context, data base.Json) (*Files, error) {
|
||||
@ -118,58 +171,8 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func makePartInfos(size int) []base.Json {
|
||||
partInfoList := make([]base.Json, size)
|
||||
for i := 0; i < size; i++ {
|
||||
partInfoList[i] = base.Json{"part_number": 1 + i}
|
||||
}
|
||||
return partInfoList
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]PartInfo, error) {
|
||||
partInfoList := makePartInfos(count)
|
||||
var resp CreateResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/getUploadUrl", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": fileId,
|
||||
"part_info_list": partInfoList,
|
||||
"upload_id": uploadId,
|
||||
}).SetResult(&resp)
|
||||
})
|
||||
return resp.PartInfoList, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
|
||||
partInfo := resp.PartInfoList[i-1]
|
||||
uploadUrl := partInfo.UploadUrl
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
if retry {
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
if retry && res.StatusCode == http.StatusForbidden {
|
||||
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
func getNowTime() (time.Time, string) {
|
||||
nowTime := time.Now()
|
||||
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||
return nowTime, nowTimeStr
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/rateg"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -52,8 +53,14 @@ func (d *AliyundriveShare) Init(ctx context.Context) error {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
||||
})
|
||||
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
|
||||
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
|
||||
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
|
||||
Limit: 4,
|
||||
Bucket: 1,
|
||||
})
|
||||
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
|
||||
Limit: 1,
|
||||
Bucket: 1,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
Ctime: f.CreatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
|
@ -3,6 +3,8 @@ package drivers
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189pc"
|
||||
@ -16,6 +18,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||
_ "github.com/alist-org/alist/v3/drivers/google_drive"
|
||||
@ -43,6 +46,7 @@ import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/uss"
|
||||
_ "github.com/alist-org/alist/v3/drivers/virtual"
|
||||
_ "github.com/alist-org/alist/v3/drivers/webdav"
|
||||
_ "github.com/alist-org/alist/v3/drivers/weiyun"
|
||||
_ "github.com/alist-org/alist/v3/drivers/wopan"
|
||||
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
|
||||
)
|
||||
|
@ -1,30 +1,37 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type BaiduNetdisk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
const DefaultSliceSize int64 = 4 * utils.MB
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
@ -34,6 +41,15 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||
}
|
||||
|
||||
res, err := d.get("/xpan/nas", map[string]string{
|
||||
"method": "uinfo",
|
||||
}, nil)
|
||||
@ -62,12 +78,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
||||
return d.linkOfficial(file, args)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
||||
return err
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var newDir File
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -76,10 +96,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
||||
},
|
||||
}
|
||||
_, err := d.manage("move", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -87,7 +115,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
|
||||
},
|
||||
}
|
||||
_, err := d.manage("rename", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
|
||||
srcObj.Name = newName
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -108,126 +146,173 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
var Default int64 = 4 * 1024 * 1024
|
||||
defaultByteData := make([]byte, Default)
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
|
||||
var SliceSize int64 = 256 * 1024
|
||||
// cal md5
|
||||
h1 := md5.New()
|
||||
h2 := md5.New()
|
||||
block_list := make([]string, 0)
|
||||
content_md5 := ""
|
||||
slice_md5 := ""
|
||||
left := stream.GetSize()
|
||||
for i := 0; i < count; i++ {
|
||||
byteSize := Default
|
||||
var byteData []byte
|
||||
if left < Default {
|
||||
byteSize = left
|
||||
byteData = make([]byte, byteSize)
|
||||
} else {
|
||||
byteData = defaultByteData
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.ReadFull(tempFile, byteData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h1.Write(byteData)
|
||||
h2.Write(byteData)
|
||||
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
||||
h2.Reset()
|
||||
}
|
||||
content_md5 = hex.EncodeToString(h1.Sum(nil))
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stream.GetSize() <= SliceSize {
|
||||
slice_md5 = content_md5
|
||||
} else {
|
||||
sliceData := make([]byte, SliceSize)
|
||||
_, err = io.ReadFull(tempFile, sliceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h2.Write(sliceData)
|
||||
slice_md5 = hex.EncodeToString(h2.Sum(nil))
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||
contentMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(contentMd5) < utils.MD5.Width {
|
||||
return nil, errors.New("invalid hash")
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
|
||||
path, stream.GetSize(),
|
||||
block_list_str,
|
||||
content_md5, slice_md5)
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
|
||||
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&rtype=3&block_list=%s&local_mtime=%d&local_ctime=%d",
|
||||
path, streamSize, blockList, mtime, ctime)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
"method": "create",
|
||||
}
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||
var newFile File
|
||||
_, err := d.post("/xpan/file", params, data, &newFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// rapid upload
|
||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1))
|
||||
lastBlockSize := streamSize % DefaultSliceSize
|
||||
if streamSize > 0 && lastBlockSize == 0 {
|
||||
lastBlockSize = DefaultSliceSize
|
||||
}
|
||||
|
||||
//cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * 1024
|
||||
// cal md5
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := DefaultSliceSize
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s&local_mtime=%d&local_ctime=%d",
|
||||
path, streamSize, blockListStr, contentMd5, sliceMd5, mtime, ctime)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(precreateResp.File), nil
|
||||
}
|
||||
}
|
||||
// step.2 上传分片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DefaultSliceSize, DefaultSliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
// 如果属于用户主动取消,则保存上传进度
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// step.3 创建文件
|
||||
var newFile File
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", fileName, file).
|
||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
return nil
|
||||
log.Debugln(res.RawResponse.Status + res.String())
|
||||
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
|
||||
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errCode != 0 || errNo != 0 {
|
||||
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
|
||||
}
|
||||
params = map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
}
|
||||
left = stream.GetSize()
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
byteSize := Default
|
||||
var byteData []byte
|
||||
if left < Default {
|
||||
byteSize = left
|
||||
byteData = make([]byte, byteSize)
|
||||
} else {
|
||||
byteData = defaultByteData
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.ReadFull(tempFile, byteData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u := "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
|
||||
params["partseq"] = strconv.Itoa(partseq)
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
|
||||
Post(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugln(res.String())
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(i * 100 / len(precreateResp.BlockList))
|
||||
}
|
||||
}
|
||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*BaiduNetdisk)(nil)
|
||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,6 +1,8 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -17,10 +19,8 @@ type File struct {
|
||||
//OwnerType int `json:"owner_type"`
|
||||
//Category int `json:"category"`
|
||||
//RealCategory string `json:"real_category"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
//OperId int `json:"oper_id"`
|
||||
//ServerCtime int `json:"server_ctime"`
|
||||
Thumbs struct {
|
||||
//Icon string `json:"icon"`
|
||||
Url3 string `json:"url3"`
|
||||
@ -28,29 +28,50 @@ type File struct {
|
||||
//Url1 string `json:"url1"`
|
||||
} `json:"thumbs"`
|
||||
//Wpfile int `json:"wpfile"`
|
||||
//LocalMtime int `json:"local_mtime"`
|
||||
|
||||
Size int64 `json:"size"`
|
||||
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
||||
Path string `json:"path"`
|
||||
//Share int `json:"share"`
|
||||
//ServerAtime int `json:"server_atime"`
|
||||
//Pl int `json:"pl"`
|
||||
//LocalCtime int `json:"local_ctime"`
|
||||
ServerFilename string `json:"server_filename"`
|
||||
//Md5 string `json:"md5"`
|
||||
Md5 string `json:"md5"`
|
||||
//OwnerId int `json:"owner_id"`
|
||||
//Unlist int `json:"unlist"`
|
||||
Isdir int `json:"isdir"`
|
||||
|
||||
// list resp
|
||||
ServerCtime int64 `json:"server_ctime"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
LocalMtime int64 `json:"local_mtime"`
|
||||
LocalCtime int64 `json:"local_ctime"`
|
||||
//ServerAtime int64 `json:"server_atime"` `
|
||||
|
||||
// only create and precreate resp
|
||||
Ctime int64 `json:"ctime"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.ServerFilename == "" {
|
||||
f.ServerFilename = path.Base(f.Path)
|
||||
}
|
||||
if f.LocalCtime == 0 {
|
||||
f.LocalCtime = f.Ctime
|
||||
}
|
||||
if f.LocalMtime == 0 {
|
||||
f.LocalMtime = f.Mtime
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: strconv.FormatInt(f.FsId, 10),
|
||||
Path: f.Path,
|
||||
Name: f.ServerFilename,
|
||||
Size: f.Size,
|
||||
Modified: time.Unix(f.ServerMtime, 0),
|
||||
Modified: time.Unix(f.LocalMtime, 0),
|
||||
Ctime: time.Unix(f.LocalCtime, 0),
|
||||
IsFolder: f.Isdir == 1,
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||
}
|
||||
@ -154,10 +175,15 @@ type DownloadResp2 struct {
|
||||
}
|
||||
|
||||
type PrecreateResp struct {
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
ReturnType int `json:"return_type"`
|
||||
BlockList []int `json:"block_list"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
ReturnType int `json:"return_type"`
|
||||
|
||||
// return_type=1
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
@ -1,25 +1,29 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *BaiduNetdisk) refreshToken() error {
|
||||
err := d._refreshToken()
|
||||
if err != nil && err == errs.EmptyToken {
|
||||
if err != nil && errors.Is(err, errs.EmptyToken) {
|
||||
err = d._refreshToken()
|
||||
}
|
||||
return err
|
||||
@ -50,30 +54,40 @@ func (d *BaiduNetdisk) _refreshToken() error {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
req := base.RestyClient.R()
|
||||
req.SetQueryParam("access_token", d.AccessToken)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, furl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno != 0 {
|
||||
if errno == -6 {
|
||||
err = d.refreshToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.request(furl, method, callback, resp)
|
||||
var result []byte
|
||||
err := retry.Do(func() error {
|
||||
req := base.RestyClient.R()
|
||||
req.SetQueryParam("access_token", d.AccessToken)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
return nil, fmt.Errorf("errno: %d, refer to https://pan.baidu.com/union/doc/", errno)
|
||||
}
|
||||
return res.Body(), nil
|
||||
if resp != nil {
|
||||
req.SetResult(resp)
|
||||
}
|
||||
res, err := req.Execute(method, furl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("[baidu_netdisk] req: %s, resp: %s", furl, res.String())
|
||||
errno := utils.Json.Get(res.Body(), "errno").ToInt()
|
||||
if errno != 0 {
|
||||
if utils.SliceContains([]int{111, -6}, errno) {
|
||||
log.Info("refreshing baidu_netdisk token.")
|
||||
err2 := d.refreshToken()
|
||||
if err2 != nil {
|
||||
return retry.Unrecoverable(err2)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||
}
|
||||
result = res.Body()
|
||||
return nil
|
||||
},
|
||||
retry.LastErrorOnly(true),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
|
||||
@ -170,28 +184,31 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "filemanager",
|
||||
"opera": opera,
|
||||
}
|
||||
marshal, err := utils.Json.Marshal(filelist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
|
||||
marshal, _ := utils.Json.MarshalToString(filelist)
|
||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=fail", marshal)
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "create",
|
||||
}
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
data := ""
|
||||
if mtime == 0 || ctime == 0 {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
} else {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3&local_mtime=%d&local_ctime=%d", encodeURIComponent(path), size, isdir, mtime, ctime)
|
||||
}
|
||||
|
||||
if uploadid != "" {
|
||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||
}
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
return d.post("/xpan/file", params, data, resp)
|
||||
}
|
||||
|
||||
func encodeURIComponent(str string) string {
|
||||
|
@ -4,18 +4,22 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -26,6 +30,8 @@ type BaiduPhoto struct {
|
||||
AccessToken string
|
||||
Uk int64
|
||||
root model.Obj
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Config() driver.Config {
|
||||
@ -37,6 +43,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if err := d.refreshToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -126,7 +137,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
||||
case *File:
|
||||
return d.linkFile(ctx, file, args)
|
||||
case *AlbumFile:
|
||||
return d.linkAlbum(ctx, file, args)
|
||||
f, err := d.CopyAlbumFile(ctx, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.linkFile(ctx, f, args)
|
||||
// 有概率无法获取到链接
|
||||
//return d.linkAlbum(ctx, file, args)
|
||||
}
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
@ -169,9 +186,9 @@ func (d *BaiduPhoto) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// 仅支持相册之间移动
|
||||
if file, ok := srcObj.(*AlbumFile); ok {
|
||||
if _, ok := dstDir.(*Album); ok {
|
||||
switch dstDir.(type) {
|
||||
case *Album, *Root: // albumfile -> root -> album or albumfile -> root
|
||||
newObj, err := d.Copy(ctx, srcObj, dstDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -205,45 +222,57 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 不支持大小为0的文件
|
||||
if stream.GetSize() == 0 {
|
||||
return nil, fmt.Errorf("file size cannot be zero")
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// 暂时没有找到妙传方式
|
||||
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
const DEFAULT int64 = 1 << 22
|
||||
const SliceSize int64 = 1 << 18
|
||||
|
||||
// 计算需要的数据
|
||||
const DEFAULT = 1 << 22
|
||||
const SliceSize = 1 << 18
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
||||
lastBlockSize := streamSize % DEFAULT
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = DEFAULT
|
||||
}
|
||||
|
||||
// step.1 计算MD5
|
||||
sliceMD5List := make([]string, 0, count)
|
||||
fileMd5 := md5.New()
|
||||
sliceMd5 := md5.New()
|
||||
sliceMd52 := md5.New()
|
||||
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
|
||||
byteSize := int64(DEFAULT)
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
|
||||
sliceMd5.Reset()
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
|
||||
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||
|
||||
// 开始执行上传
|
||||
// step.2 预上传
|
||||
params := map[string]string{
|
||||
"autoinit": "1",
|
||||
"isdir": "0",
|
||||
@ -251,46 +280,69 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
"ctype": "11",
|
||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||
"size": fmt.Sprint(stream.GetSize()),
|
||||
"slice-md5": slice_md5,
|
||||
"content-md5": content_md5,
|
||||
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
|
||||
"slice-md5": sliceMd5,
|
||||
"content-md5": contentMd5,
|
||||
"block_list": blockListStr,
|
||||
}
|
||||
|
||||
// 预上传
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch precreateResp.ReturnType {
|
||||
case 1: // 上传文件
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
case 1: //step.3 上传文件切片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
uploadParams["partseq"] = fmt.Sprint(i)
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"partseq": fmt.Sprint(partseq),
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
up(i * 100 / count)
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 2: // 创建文件
|
||||
case 2: //step.4 创建文件
|
||||
params["uploadid"] = precreateResp.UploadID
|
||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
@ -300,7 +352,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 3: // 增加到相册
|
||||
case 3: //step.5 增加到相册
|
||||
rootfile := precreateResp.Data.toFile()
|
||||
if album, ok := dstDir.(*Album); ok {
|
||||
return d.AddAlbumFile(ctx, album, rootfile)
|
||||
|
@ -61,11 +61,18 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
|
||||
|
||||
func renameAlbum(album *Album, newName string) *Album {
|
||||
return &Album{
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreateTime: album.CreateTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreationTime: album.CreationTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func BoolToIntStr(b bool) string {
|
||||
if b {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
}
|
||||
|
@ -10,8 +10,10 @@ type Addition struct {
|
||||
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
|
||||
AlbumID string `json:"album_id"`
|
||||
//AlbumPassword string `json:"album_password"`
|
||||
DeleteOrigin bool `json:"delete_origin"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
@ -51,22 +53,17 @@ type (
|
||||
Ctime int64 `json:"ctime"` // 创建时间 s
|
||||
Mtime int64 `json:"mtime"` // 修改时间 s
|
||||
Thumburl []string `json:"thumburl"`
|
||||
|
||||
parseTime *time.Time
|
||||
Md5 string `json:"md5"`
|
||||
}
|
||||
)
|
||||
|
||||
func (c *File) GetSize() int64 { return c.Size }
|
||||
func (c *File) GetName() string { return getFileName(c.Path) }
|
||||
func (c *File) ModTime() time.Time {
|
||||
if c.parseTime == nil {
|
||||
c.parseTime = toTime(c.Mtime)
|
||||
}
|
||||
return *c.parseTime
|
||||
}
|
||||
func (c *File) IsDir() bool { return false }
|
||||
func (c *File) GetID() string { return "" }
|
||||
func (c *File) GetPath() string { return "" }
|
||||
func (c *File) GetSize() int64 { return c.Size }
|
||||
func (c *File) GetName() string { return getFileName(c.Path) }
|
||||
func (c *File) CreateTime() time.Time { return time.Unix(c.Ctime, 0) }
|
||||
func (c *File) ModTime() time.Time { return time.Unix(c.Mtime, 0) }
|
||||
func (c *File) IsDir() bool { return false }
|
||||
func (c *File) GetID() string { return "" }
|
||||
func (c *File) GetPath() string { return "" }
|
||||
func (c *File) Thumb() string {
|
||||
if len(c.Thumburl) > 0 {
|
||||
return c.Thumburl[0]
|
||||
@ -74,6 +71,10 @@ func (c *File) Thumb() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
/*相册部分*/
|
||||
type (
|
||||
AlbumListResp struct {
|
||||
@ -84,12 +85,12 @@ type (
|
||||
}
|
||||
|
||||
Album struct {
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreationTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
|
||||
parseTime *time.Time
|
||||
}
|
||||
@ -109,17 +110,17 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) ModTime() time.Time {
|
||||
if a.parseTime == nil {
|
||||
a.parseTime = toTime(a.Mtime)
|
||||
}
|
||||
return *a.parseTime
|
||||
func (a *Album) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
func (a *Album) IsDir() bool { return true }
|
||||
func (a *Album) GetID() string { return "" }
|
||||
func (a *Album) GetPath() string { return "" }
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) CreateTime() time.Time { return time.Unix(a.CreationTime, 0) }
|
||||
func (a *Album) ModTime() time.Time { return time.Unix(a.Mtime, 0) }
|
||||
func (a *Album) IsDir() bool { return true }
|
||||
func (a *Album) GetID() string { return "" }
|
||||
func (a *Album) GetPath() string { return "" }
|
||||
|
||||
type (
|
||||
CopyFileResp struct {
|
||||
@ -160,9 +161,9 @@ type (
|
||||
CreateFileResp
|
||||
|
||||
//不存在返回
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
Blocklist []int64 `json:"block_list"`
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -21,7 +21,7 @@ const (
|
||||
FILE_API_URL_V2 = API_URL + "/file/v2"
|
||||
)
|
||||
|
||||
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
req := base.RestyClient.R().
|
||||
SetQueryParam("access_token", d.AccessToken)
|
||||
if callback != nil {
|
||||
@ -52,9 +52,17 @@ func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallba
|
||||
default:
|
||||
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
|
||||
}
|
||||
return res.Body(), nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
//func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
// res, err := d.request(furl, method, callback, resp)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return res.Body(), nil
|
||||
//}
|
||||
|
||||
func (d *BaiduPhoto) refreshToken() error {
|
||||
u := "https://openapi.baidu.com/oauth/2.0/token"
|
||||
var resp base.TokenResp
|
||||
@ -79,11 +87,11 @@ func (d *BaiduPhoto) refreshToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
return d.Request(furl, http.MethodGet, callback, resp)
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
|
||||
return d.Request(furl, http.MethodPost, callback, resp)
|
||||
}
|
||||
|
||||
@ -223,7 +231,7 @@ func (d *BaiduPhoto) DeleteAlbum(ctx context.Context, album *Album) error {
|
||||
r.SetFormData(map[string]string{
|
||||
"album_id": album.AlbumID,
|
||||
"tid": fmt.Sprint(album.Tid),
|
||||
"delete_origin_image": "0", // 是否删除原图 0 不删除 1 删除
|
||||
"delete_origin_image": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@ -237,7 +245,7 @@ func (d *BaiduPhoto) DeleteAlbumFile(ctx context.Context, file *AlbumFile) error
|
||||
"album_id": fmt.Sprint(file.AlbumID),
|
||||
"tid": fmt.Sprint(file.Tid),
|
||||
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
|
||||
"del_origin": "0", // 是否删除原图 0 不删除 1 删除
|
||||
"del_origin": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
|
||||
})
|
||||
}, nil)
|
||||
return err
|
||||
@ -391,6 +399,49 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
|
||||
return link, nil
|
||||
}
|
||||
|
||||
/*func (d *BaiduPhoto) linkStreamAlbum(ctx context.Context, file *AlbumFile) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
Header: http.Header{},
|
||||
Writer: func(w io.Writer) error {
|
||||
res, err := d.Get(ALBUM_API_URL+"/streaming", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"fsid": fmt.Sprint(file.Fsid),
|
||||
"album_id": file.AlbumID,
|
||||
"tid": fmt.Sprint(file.Tid),
|
||||
"uk": fmt.Sprint(file.Uk),
|
||||
}).SetDoNotParseResponse(true)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.RawBody().Close()
|
||||
_, err = io.Copy(w, res.RawBody())
|
||||
return err
|
||||
},
|
||||
}, nil
|
||||
}*/
|
||||
|
||||
/*func (d *BaiduPhoto) linkStream(ctx context.Context, file *File) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
Header: http.Header{},
|
||||
Writer: func(w io.Writer) error {
|
||||
res, err := d.Get(FILE_API_URL_V1+"/streaming", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(map[string]string{
|
||||
"fsid": fmt.Sprint(file.Fsid),
|
||||
}).SetDoNotParseResponse(true)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.RawBody().Close()
|
||||
_, err = io.Copy(w, res.RawBody())
|
||||
return err
|
||||
},
|
||||
}, nil
|
||||
}*/
|
||||
|
||||
// 获取uk
|
||||
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
|
||||
var info UInfo
|
||||
|
31
drivers/base/upload.go
Normal file
31
drivers/base/upload.go
Normal file
@ -0,0 +1,31 @@
|
||||
package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/go-cache"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
)
|
||||
|
||||
// storage upload progress, for upload recovery
|
||||
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
|
||||
|
||||
// Save upload progress for 20 minutes
|
||||
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
|
||||
return UploadStateCache.Set(
|
||||
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
|
||||
state,
|
||||
cache.WithEx[any](time.Minute*20))
|
||||
}
|
||||
|
||||
// An upload progress can only be made by one process alone,
|
||||
// so here you need to get it and then delete it.
|
||||
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
|
||||
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
|
||||
if ok {
|
||||
state, ok = v.(T)
|
||||
}
|
||||
return
|
||||
}
|
@ -1,30 +1 @@
|
||||
package base
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
func HandleRange(link *model.Link, file io.ReadSeekCloser, header http.Header, size int64) {
|
||||
if header.Get("Range") != "" {
|
||||
r, err := http_range.ParseRange(header.Get("Range"), size)
|
||||
if err == nil && len(r) > 0 {
|
||||
_, err := file.Seek(r[0].Start, io.SeekStart)
|
||||
if err == nil {
|
||||
link.Data = utils.NewLimitReadCloser(file, func() error {
|
||||
return file.Close()
|
||||
}, r[0].Length)
|
||||
link.Status = http.StatusPartialContent
|
||||
link.Header = http.Header{
|
||||
"Content-Range": []string{r[0].ContentRange(size)},
|
||||
"Content-Length": []string{strconv.FormatInt(r[0].Length, 10)},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if stream.GetReadCloser() == http.NoBody {
|
||||
if io.ReadCloser(stream) == http.NoBody {
|
||||
return d.create(ctx, dstDir, stream)
|
||||
}
|
||||
var r DirectoryResp
|
||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Cookie string `json:"cookie"`
|
||||
CustomUA string `json:"custom_ua"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -22,15 +22,18 @@ const loginPath = "/user/session"
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
u := d.Address + "/api/v3" + path
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
|
||||
var r Resp
|
||||
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
|
398
drivers/crypt/driver.go
Normal file
398
drivers/crypt/driver.go
Normal file
@ -0,0 +1,398 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
rcCrypt "github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Crypt struct {
|
||||
model.Storage
|
||||
Addition
|
||||
cipher *rcCrypt.Cipher
|
||||
remoteStorage driver.Driver
|
||||
}
|
||||
|
||||
const obfuscatedPrefix = "___Obfuscated___"
|
||||
|
||||
func (d *Crypt) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Crypt) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Crypt) Init(ctx context.Context) error {
|
||||
//obfuscate credentials if it's updated or just created
|
||||
err := d.updateObfusParm(&d.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obfuscate password: %w", err)
|
||||
}
|
||||
err = d.updateObfusParm(&d.Salt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obfuscate salt: %w", err)
|
||||
}
|
||||
|
||||
isCryptExt := regexp.MustCompile(`^[.][A-Za-z0-9-_]{2,}$`).MatchString
|
||||
if !isCryptExt(d.EncryptedSuffix) {
|
||||
return fmt.Errorf("EncryptedSuffix is Illegal")
|
||||
}
|
||||
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
|
||||
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
//need remote storage exist
|
||||
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't find remote storage: %w", err)
|
||||
}
|
||||
d.remoteStorage = storage
|
||||
|
||||
p, _ := strings.CutPrefix(d.Password, obfuscatedPrefix)
|
||||
p2, _ := strings.CutPrefix(d.Salt, obfuscatedPrefix)
|
||||
config := configmap.Simple{
|
||||
"password": p,
|
||||
"password2": p2,
|
||||
"filename_encryption": d.FileNameEnc,
|
||||
"directory_name_encryption": d.DirNameEnc,
|
||||
"filename_encoding": d.FileNameEncoding,
|
||||
"suffix": d.EncryptedSuffix,
|
||||
"pass_bad_blocks": "",
|
||||
}
|
||||
c, err := rcCrypt.NewCipher(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Cipher: %w", err)
|
||||
}
|
||||
d.cipher = c
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) updateObfusParm(str *string) error {
|
||||
temp := *str
|
||||
if !strings.HasPrefix(temp, obfuscatedPrefix) {
|
||||
temp, err := obscure.Obscure(temp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
temp = obfuscatedPrefix + temp
|
||||
*str = temp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
path := dir.GetPath()
|
||||
//return d.list(ctx, d.RemotePath, path)
|
||||
//remoteFull
|
||||
|
||||
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
|
||||
// the obj must implement the model.SetPath interface
|
||||
// return objs, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []model.Obj
|
||||
for _, obj := range objs {
|
||||
if obj.IsDir() {
|
||||
name, err := d.cipher.DecryptDirName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
continue
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: 0,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
thumb, ok := model.GetThumb(obj)
|
||||
size, err := d.cipher.DecryptedSize(obj.GetSize())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
continue
|
||||
}
|
||||
name, err := d.cipher.DecryptFileName(obj.GetName())
|
||||
if err != nil {
|
||||
//filter illegal files
|
||||
continue
|
||||
}
|
||||
objRes := model.Object{
|
||||
Name: name,
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
objWithThumb := model.ObjThumb{
|
||||
Object: objRes,
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}
|
||||
result = append(result, &objWithThumb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if utils.PathEqual(path, "/") {
|
||||
return &model.Object{
|
||||
Name: "Root",
|
||||
IsFolder: true,
|
||||
Path: "/",
|
||||
}, nil
|
||||
}
|
||||
remoteFullPath := ""
|
||||
var remoteObj model.Obj
|
||||
var err, err2 error
|
||||
firstTryIsFolder, secondTry := guessPath(path)
|
||||
remoteFullPath = d.getPathForRemote(path, firstTryIsFolder)
|
||||
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err != nil {
|
||||
if errs.IsObjectNotFound(err) && secondTry {
|
||||
//try the opposite
|
||||
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
|
||||
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var size int64 = 0
|
||||
name := ""
|
||||
if !remoteObj.IsDir() {
|
||||
size, err = d.cipher.DecryptedSize(remoteObj.GetSize())
|
||||
if err != nil {
|
||||
log.Warnf("DecryptedSize failed for %s ,will use original size, err:%s", path, err)
|
||||
size = remoteObj.GetSize()
|
||||
}
|
||||
name, err = d.cipher.DecryptFileName(remoteObj.GetName())
|
||||
if err != nil {
|
||||
log.Warnf("DecryptFileName failed for %s ,will use original name, err:%s", path, err)
|
||||
name = remoteObj.GetName()
|
||||
}
|
||||
} else {
|
||||
name, err = d.cipher.DecryptDirName(remoteObj.GetName())
|
||||
if err != nil {
|
||||
log.Warnf("DecryptDirName failed for %s ,will use original name, err:%s", path, err)
|
||||
name = remoteObj.GetName()
|
||||
}
|
||||
}
|
||||
obj := &model.Object{
|
||||
Path: path,
|
||||
Name: name,
|
||||
Size: size,
|
||||
Modified: remoteObj.ModTime(),
|
||||
IsFolder: remoteObj.IsDir(),
|
||||
}
|
||||
return obj, nil
|
||||
//return nil, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
remoteLink, remoteFile, err := op.Link(ctx, d.remoteStorage, dstDirActualPath, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||
}
|
||||
remoteFileSize := remoteFile.GetSize()
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||
length := underlyingLength
|
||||
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||
length = -1
|
||||
}
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = converted
|
||||
}
|
||||
if rrc != nil {
|
||||
//remoteRangeReader, err :=
|
||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
if remoteLink.MFile != nil {
|
||||
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//remoteClosers.Add(remoteLink.MFile)
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
|
||||
return nil, errs.NotSupport
|
||||
|
||||
}
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readSeeker, nil
|
||||
}
|
||||
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
Header: remoteLink.Header,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
||||
return resultLink, nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(parentDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
dir := d.cipher.EncryptDirName(dirName)
|
||||
return op.MakeDir(ctx, d.remoteStorage, stdpath.Join(dstDirActualPath, dir))
|
||||
}
|
||||
|
||||
func (d *Crypt) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
return op.Move(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||
}
|
||||
|
||||
func (d *Crypt) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
remoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
var newEncryptedName string
|
||||
if srcObj.IsDir() {
|
||||
newEncryptedName = d.cipher.EncryptDirName(newName)
|
||||
} else {
|
||||
newEncryptedName = d.cipher.EncryptFileName(newName)
|
||||
}
|
||||
return op.Rename(ctx, d.remoteStorage, remoteActualPath, newEncryptedName)
|
||||
}
|
||||
|
||||
func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
|
||||
|
||||
}
|
||||
|
||||
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||
remoteActualPath, err := d.getActualPathForRemote(obj.GetPath(), obj.IsDir())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||
}
|
||||
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := d.cipher.EncryptData(streamer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||
}
|
||||
|
||||
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
|
||||
streamOut := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
ID: streamer.GetID(),
|
||||
Path: streamer.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(streamer.GetName()),
|
||||
Size: d.cipher.EncryptedSize(streamer.GetSize()),
|
||||
Modified: streamer.ModTime(),
|
||||
IsFolder: streamer.IsDir(),
|
||||
},
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
// return nil, errs.NotSupport
|
||||
//}
|
||||
|
||||
var _ driver.Driver = (*Crypt)(nil)
|
42
drivers/crypt/meta.go
Normal file
42
drivers/crypt/meta.go
Normal file
@ -0,0 +1,42 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
// Usually one of two
|
||||
//driver.RootPath
|
||||
//driver.RootID
|
||||
// define other
|
||||
|
||||
FileNameEnc string `json:"filename_encryption" type:"select" required:"true" options:"off,standard,obfuscate" default:"off"`
|
||||
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
|
||||
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
|
||||
|
||||
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
|
||||
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
|
||||
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Crypt",
|
||||
LocalSort: true,
|
||||
OnlyLocal: false,
|
||||
OnlyProxy: true,
|
||||
NoCache: true,
|
||||
NoUpload: false,
|
||||
NeedMs: false,
|
||||
DefaultRoot: "/",
|
||||
CheckStatus: false,
|
||||
Alert: "",
|
||||
NoOverwriteUpload: false,
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Crypt{}
|
||||
})
|
||||
}
|
1
drivers/crypt/types.go
Normal file
1
drivers/crypt/types.go
Normal file
@ -0,0 +1 @@
|
||||
package crypt
|
44
drivers/crypt/util.go
Normal file
44
drivers/crypt/util.go
Normal file
@ -0,0 +1,44 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
// will give the best guessing based on the path
|
||||
func guessPath(path string) (isFolder, secondTry bool) {
|
||||
if strings.HasSuffix(path, "/") {
|
||||
//confirmed a folder
|
||||
return true, false
|
||||
}
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if strings.Index(path[lastSlash:], ".") < 0 {
|
||||
//no dot, try folder then try file
|
||||
return true, true
|
||||
}
|
||||
return false, true
|
||||
}
|
||||
|
||||
func (d *Crypt) getPathForRemote(path string, isFolder bool) (remoteFullPath string) {
|
||||
if isFolder && !strings.HasSuffix(path, "/") {
|
||||
path = path + "/"
|
||||
}
|
||||
dir, fileName := filepath.Split(path)
|
||||
|
||||
remoteDir := d.cipher.EncryptDirName(dir)
|
||||
remoteFileName := ""
|
||||
if len(strings.TrimSpace(fileName)) > 0 {
|
||||
remoteFileName = d.cipher.EncryptFileName(fileName)
|
||||
}
|
||||
return stdpath.Join(d.RemotePath, remoteDir, remoteFileName)
|
||||
|
||||
}
|
||||
|
||||
// actual path is used for internal only. any link for user should come from remoteFullPath
|
||||
func (d *Crypt) getActualPathForRemote(path string, isFolder bool) (string, error) {
|
||||
_, remoteActualPath, err := op.GetStorageAndActualPath(d.getPathForRemote(path, isFolder))
|
||||
return remoteActualPath, err
|
||||
}
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
stdpath "path"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -65,11 +64,10 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
||||
r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
|
||||
link := &model.Link{
|
||||
Data: r,
|
||||
MFile: r,
|
||||
}
|
||||
base.HandleRange(link, r, args.Header, file.GetSize())
|
||||
return link, nil
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
@ -30,43 +31,59 @@ func (d *FTP) login() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// An FTP file reader that implements io.ReadSeekCloser for seeking.
|
||||
type FTPFileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
|
||||
return &FTPFileReader{
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.offset += int64(n)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
oldOffset := r.offset
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
size, err := r.conn.FileSize(r.path)
|
||||
if err != nil {
|
||||
return oldOffset, err
|
||||
}
|
||||
newOffset = offset + int64(size)
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset = newOffset
|
||||
|
||||
if r.resp != nil {
|
||||
// close the existing ftp data connection, otherwise the next read will be blocked
|
||||
_ = r.resp.Close() // we do not care about whether it returns an error
|
||||
r.resp = nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Close() error {
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
obj := stream.GetOld()
|
||||
obj := stream.GetExist()
|
||||
var (
|
||||
e Error
|
||||
url string
|
||||
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
var offset int64 = 0
|
||||
for offset < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
chunkSize := stream.GetSize() - finish
|
||||
chunkSize := stream.GetSize() - offset
|
||||
if chunkSize > defaultChunkSize {
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(reader).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += chunkSize
|
||||
offset += chunkSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
@ -2,9 +2,7 @@ package lanzou
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -19,6 +17,8 @@ type LanZou struct {
|
||||
model.Storage
|
||||
uid string
|
||||
vei string
|
||||
|
||||
flag int32
|
||||
}
|
||||
|
||||
func (d *LanZou) Config() driver.Config {
|
||||
@ -30,16 +30,18 @@ func (d *LanZou) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *LanZou) Init(ctx context.Context) (err error) {
|
||||
if d.IsCookie() {
|
||||
switch d.Type {
|
||||
case "account":
|
||||
_, err := d.Login()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fallthrough
|
||||
case "cookie":
|
||||
if d.RootFolderID == "" {
|
||||
d.RootFolderID = "-1"
|
||||
}
|
||||
ylogin := regexp.MustCompile("ylogin=(.*?);").FindStringSubmatch(d.Cookie)
|
||||
if len(ylogin) < 2 {
|
||||
return fmt.Errorf("cookie does not contain ylogin")
|
||||
}
|
||||
d.uid = ylogin[1]
|
||||
d.vei, err = d.getVei()
|
||||
d.vei, d.uid, err = d.getVeiAndUid()
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -51,7 +53,7 @@ func (d *LanZou) Drop(ctx context.Context) error {
|
||||
|
||||
// 获取的大小和时间不准确
|
||||
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
return d.GetAllFiles(dir.GetID())
|
||||
} else {
|
||||
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
|
||||
@ -119,7 +121,7 @@ func (d *LanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
data, err := d.doupload(func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
req.SetFormData(map[string]string{
|
||||
@ -137,11 +139,11 @@ func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
FolID: utils.Json.Get(data, "text").ToString(),
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
if !srcObj.IsDir() {
|
||||
_, err := d.doupload(func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
@ -157,11 +159,11 @@ func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
return srcObj, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
if !srcObj.IsDir() {
|
||||
_, err := d.doupload(func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
@ -179,11 +181,11 @@ func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (
|
||||
return srcObj, nil
|
||||
}
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
_, err := d.doupload(func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if obj.IsDir() {
|
||||
@ -200,13 +202,13 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}, nil)
|
||||
return err
|
||||
}
|
||||
return errs.NotImplement
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if d.IsCookie() {
|
||||
if d.IsCookie() || d.IsAccount() {
|
||||
var resp RespText[[]FileOrFolder]
|
||||
_, err := d._post(d.BaseUrl+"/fileup.php", func(req *resty.Request) {
|
||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||
req.SetFormData(map[string]string{
|
||||
"task": "1",
|
||||
"vie": "2",
|
||||
@ -221,5 +223,5 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
}
|
||||
return &resp.Text[0], nil
|
||||
}
|
||||
return nil, errs.NotImplement
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package lanzou
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -117,13 +118,102 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
|
||||
|
||||
// 根据key查询js变量
|
||||
func findJSVarFunc(key, data string) string {
|
||||
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
var values []string
|
||||
if key != "sasign" {
|
||||
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
} else {
|
||||
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
|
||||
if len(matches) == 3 {
|
||||
values = matches[1]
|
||||
} else {
|
||||
if len(matches) > 0 {
|
||||
values = matches[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[1]
|
||||
}
|
||||
|
||||
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
|
||||
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
|
||||
|
||||
// 查找所有方法位置
|
||||
func findJSFunctionIndex(data string, all bool) [][2]int {
|
||||
findFunction := findFunction
|
||||
if all {
|
||||
findFunction = findFunctionAll
|
||||
}
|
||||
|
||||
indexs := findFunction.FindAllStringIndex(data, -1)
|
||||
fIndexs := make([][2]int, 0, len(indexs))
|
||||
|
||||
for _, index := range indexs {
|
||||
if len(index) != 2 {
|
||||
continue
|
||||
}
|
||||
count, data := 0, data[index[1]:]
|
||||
for ii, v := range data {
|
||||
if v == ' ' && count == 0 {
|
||||
continue
|
||||
}
|
||||
if v == '{' {
|
||||
count++
|
||||
}
|
||||
|
||||
if v == '}' {
|
||||
count--
|
||||
}
|
||||
if count == 0 {
|
||||
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return fIndexs
|
||||
}
|
||||
|
||||
// 删除JS全局方法
|
||||
func removeJSGlobalFunction(html string) string {
|
||||
indexs := findJSFunctionIndex(html, false)
|
||||
block := make([]string, len(indexs))
|
||||
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
|
||||
index := indexs[i]
|
||||
block[i] = html[index[1]:next]
|
||||
next = index[0]
|
||||
}
|
||||
return strings.Join(block, "")
|
||||
}
|
||||
|
||||
// 根据名称获取方法
|
||||
func getJSFunctionByName(html string, name string) (string, error) {
|
||||
indexs := findJSFunctionIndex(html, true)
|
||||
for _, index := range indexs {
|
||||
data := html[index[0]:index[1]]
|
||||
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("not find %s function", name)
|
||||
}
|
||||
|
||||
// 解析html中的JSON,选择最长的数据
|
||||
func htmlJsonToMap2(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindAllStringSubmatch(html, -1)
|
||||
var sData string
|
||||
for _, data := range datas {
|
||||
if len(datas) > 0 && len(data[1]) > len(sData) {
|
||||
sData = data[1]
|
||||
}
|
||||
}
|
||||
if sData == "" {
|
||||
return nil, fmt.Errorf("not find data")
|
||||
}
|
||||
return jsonToMap(sData, html), nil
|
||||
}
|
||||
|
||||
// 解析html中的JSON
|
||||
func htmlJsonToMap(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindStringSubmatch(html)
|
||||
@ -190,3 +280,14 @@ func GetExpirationTime(url string) (etime time.Duration) {
|
||||
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
|
||||
return
|
||||
}
|
||||
|
||||
func CookieToString(cookies []*http.Cookie) string {
|
||||
if cookies == nil {
|
||||
return ""
|
||||
}
|
||||
cookieStrings := make([]string, len(cookies))
|
||||
for i, cookie := range cookies {
|
||||
cookieStrings[i] = cookie.Name + "=" + cookie.Value
|
||||
}
|
||||
return strings.Join(cookieStrings, ";")
|
||||
}
|
||||
|
@ -6,8 +6,13 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Type string `json:"type" type:"select" options:"cookie,url" default:"cookie"`
|
||||
Cookie string `json:"cookie" required:"true" help:"about 15 days valid, ignore if shareUrl is used"`
|
||||
Type string `json:"type" type:"select" options:"account,cookie,url" default:"cookie"`
|
||||
|
||||
Account string `json:"account"`
|
||||
Password string `json:"password"`
|
||||
|
||||
Cookie string `json:"cookie" help:"about 15 days valid, ignore if shareUrl is used"`
|
||||
|
||||
driver.RootID
|
||||
SharePassword string `json:"share_password"`
|
||||
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
|
||||
@ -19,6 +24,10 @@ func (a *Addition) IsCookie() bool {
|
||||
return a.Type == "cookie"
|
||||
}
|
||||
|
||||
func (a *Addition) IsAccount() bool {
|
||||
return a.Type == "account"
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Lanzou",
|
||||
LocalSort: true,
|
||||
|
@ -3,11 +3,14 @@ package lanzou
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var ErrFileShareCancel = errors.New("file sharing cancellation")
|
||||
var ErrFileNotExist = errors.New("file does not exist")
|
||||
var ErrCookieExpiration = errors.New("cookie expiration")
|
||||
|
||||
type RespText[T any] struct {
|
||||
Text T `json:"text"`
|
||||
@ -17,6 +20,9 @@ type RespInfo[T any] struct {
|
||||
Info T `json:"info"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*FileOrFolder)(nil)
|
||||
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
|
||||
|
||||
type FileOrFolder struct {
|
||||
Name string `json:"name"`
|
||||
//Onof string `json:"onof"` // 是否存在提取码
|
||||
@ -48,6 +54,14 @@ type FileOrFolder struct {
|
||||
shareInfo *FileShare `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetID() string {
|
||||
if f.IsDir() {
|
||||
return f.FolID
|
||||
@ -129,6 +143,14 @@ type FileOrFolderByShareUrl struct {
|
||||
repairFlag bool `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
||||
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
||||
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
||||
|
@ -5,13 +5,16 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
@ -37,7 +40,24 @@ func (d *LanZou) get(url string, callback base.ReqCallback) ([]byte, error) {
|
||||
}
|
||||
|
||||
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||
return d._post(url, callback, resp, false)
|
||||
data, err := d._post(url, callback, resp, false)
|
||||
if err == ErrCookieExpiration && d.IsAccount() {
|
||||
if atomic.CompareAndSwapInt32(&d.flag, 0, 1) {
|
||||
_, err2 := d.Login()
|
||||
atomic.SwapInt32(&d.flag, 0)
|
||||
if err2 != nil {
|
||||
err = errors.Join(err, err2)
|
||||
d.Status = err.Error()
|
||||
op.MustSaveDriverStorage(d)
|
||||
return data, err
|
||||
}
|
||||
}
|
||||
for atomic.LoadInt32(&d.flag) != 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
return d._post(url, callback, resp, false)
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
|
||||
@ -49,10 +69,12 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
||||
}
|
||||
return false
|
||||
})
|
||||
callback(req)
|
||||
if callback != nil {
|
||||
callback(req)
|
||||
}
|
||||
}, up)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return data, err
|
||||
}
|
||||
switch utils.Json.Get(data, "zt").ToInt() {
|
||||
case 1, 2, 4:
|
||||
@ -61,12 +83,14 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
|
||||
utils.Json.Unmarshal(data, resp)
|
||||
}
|
||||
return data, nil
|
||||
case 9: // 登录过期
|
||||
return data, ErrCookieExpiration
|
||||
default:
|
||||
info := utils.Json.Get(data, "inf").ToString()
|
||||
if info == "" {
|
||||
info = utils.Json.Get(data, "info").ToString()
|
||||
}
|
||||
return nil, fmt.Errorf(info)
|
||||
return data, fmt.Errorf(info)
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,6 +125,28 @@ func (d *LanZou) request(url string, method string, callback base.ReqCallback, u
|
||||
return res.Body(), err
|
||||
}
|
||||
|
||||
func (d *LanZou) Login() ([]*http.Cookie, error) {
|
||||
resp, err := base.NewRestyClient().SetRedirectPolicy(resty.NoRedirectPolicy()).
|
||||
R().SetFormData(map[string]string{
|
||||
"task": "3",
|
||||
"uid": d.Account,
|
||||
"pwd": d.Password,
|
||||
"setSessionId": "",
|
||||
"setSig": "",
|
||||
"setScene": "",
|
||||
"setTocen": "",
|
||||
"formhash": "",
|
||||
}).Post("https://up.woozooo.com/mlogin.php")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if utils.Json.Get(resp.Body(), "zt").ToInt() != 1 {
|
||||
return nil, fmt.Errorf("login err: %s", resp.Body())
|
||||
}
|
||||
d.Cookie = CookieToString(resp.Cookies())
|
||||
return resp.Cookies(), nil
|
||||
}
|
||||
|
||||
/*
|
||||
通过cookie获取数据
|
||||
*/
|
||||
@ -212,7 +258,7 @@ var sizeFindReg = regexp.MustCompile(`(?i)大小\W*([0-9.]+\s*[bkm]+)`)
|
||||
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
|
||||
|
||||
// 查找分享文件夹子文件夹ID和名称
|
||||
var findSubFolaerReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
||||
var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
||||
|
||||
// 获取下载页面链接
|
||||
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
|
||||
@ -300,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
|
||||
// 需要密码
|
||||
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
||||
param, err := htmlFormToMap(sharePageData)
|
||||
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
param, err := htmlJsonToMap(sharePageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -325,7 +375,6 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
return nil, err
|
||||
}
|
||||
nextPageData := RemoveNotes(string(data))
|
||||
|
||||
param, err = htmlJsonToMap(nextPageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -406,7 +455,7 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
||||
|
||||
files := make([]FileOrFolderByShareUrl, 0)
|
||||
// vip获取文件夹
|
||||
floders := findSubFolaerReg.FindAllStringSubmatch(sharePageData, -1)
|
||||
floders := findSubFolderReg.FindAllStringSubmatch(sharePageData, -1)
|
||||
for _, floder := range floders {
|
||||
if len(floder) == 3 {
|
||||
files = append(files, FileOrFolderByShareUrl{
|
||||
@ -427,10 +476,10 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
/*// 文件夹中的文件也不加密
|
||||
// 文件夹中的文件加密
|
||||
for i := 0; i < len(resp.Text); i++ {
|
||||
resp.Text[i].Pwd = pwd
|
||||
}*/
|
||||
}
|
||||
if len(resp.Text) == 0 {
|
||||
break
|
||||
}
|
||||
@ -451,21 +500,32 @@ func (d *LanZou) getFileRealInfo(downURL string) (*int64, *time.Time) {
|
||||
return &size, &time
|
||||
}
|
||||
|
||||
func (d *LanZou) getVei() (string, error) {
|
||||
resp, err := d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
||||
func (d *LanZou) getVeiAndUid() (vei string, uid string, err error) {
|
||||
var resp []byte
|
||||
resp, err = d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"item": "files",
|
||||
"action": "index",
|
||||
"u": d.uid,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
return
|
||||
}
|
||||
// uid
|
||||
uids := regexp.MustCompile(`uid=([^'"&;]+)`).FindStringSubmatch(string(resp))
|
||||
if len(uids) < 2 {
|
||||
err = fmt.Errorf("uid variable not find")
|
||||
return
|
||||
}
|
||||
uid = uids[1]
|
||||
|
||||
// vei
|
||||
html := RemoveNotes(string(resp))
|
||||
data, err := htmlJsonToMap(html)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return
|
||||
}
|
||||
return data["vei"], nil
|
||||
vei = data["vei"]
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -1,16 +1,18 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -19,6 +21,8 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/sign"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/djherbis/times"
|
||||
log "github.com/sirupsen/logrus"
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
@ -80,36 +84,63 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
if !d.ShowHidden && strings.HasPrefix(f.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
thumb := ""
|
||||
if d.Thumbnail {
|
||||
typeName := utils.GetFileType(f.Name())
|
||||
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
||||
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, f.Name())
|
||||
thumb = utils.EncodePath(thumb, true)
|
||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(args.ReqPath, f.Name()))
|
||||
}
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||
var size int64
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(dir.GetPath(), f.Name()),
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}
|
||||
files = append(files, &file)
|
||||
file := d.FileInfoToObj(f, args.ReqPath, fullPath)
|
||||
files = append(files, file)
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) model.Obj {
|
||||
thumb := ""
|
||||
if d.Thumbnail {
|
||||
typeName := utils.GetFileType(f.Name())
|
||||
if typeName == conf.IMAGE || typeName == conf.VIDEO {
|
||||
thumb = common.GetApiUrl(nil) + stdpath.Join("/d", reqPath, f.Name())
|
||||
thumb = utils.EncodePath(thumb, true)
|
||||
thumb += "?type=thumb&sign=" + sign.Sign(stdpath.Join(reqPath, f.Name()))
|
||||
}
|
||||
}
|
||||
isFolder := f.IsDir() || isSymlinkDir(f, fullPath)
|
||||
var size int64
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
Ctime: ctime,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
},
|
||||
}
|
||||
return &file
|
||||
|
||||
}
|
||||
func (d *Local) GetMeta(ctx context.Context, path string) (model.Obj, error) {
|
||||
f, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file := d.FileInfoToObj(f, path, path)
|
||||
//h := "123123"
|
||||
//if s, ok := f.(model.SetHash); ok && file.GetHash() == ("","") {
|
||||
// s.SetHash(h,"SHA1")
|
||||
//}
|
||||
return file, nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
path = filepath.Join(d.GetRootPath(), path)
|
||||
@ -125,10 +156,18 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if isFolder {
|
||||
size = 0
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(path)
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
file := model.Object{
|
||||
Path: path,
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Ctime: ctime,
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
@ -147,13 +186,21 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
"Content-Type": []string{"image/png"},
|
||||
}
|
||||
if thumbPath != nil {
|
||||
link.FilePath = thumbPath
|
||||
open, err := os.Open(*thumbPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.MFile = open
|
||||
} else {
|
||||
link.Data = io.NopCloser(buf)
|
||||
link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
|
||||
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||
}
|
||||
} else {
|
||||
link.FilePath = &fullPath
|
||||
open, err := os.Open(fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.MFile = open
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
@ -237,6 +284,10 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(fullPath, stream.ModTime(), stream.ModTime())
|
||||
if err != nil {
|
||||
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ func readDir(dirname string) ([]fs.FileInfo, error) {
|
||||
func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||
fullPath := file.GetPath()
|
||||
thumbPrefix := "alist_thumb_"
|
||||
thumbName := thumbPrefix + utils.GetMD5Encode(fullPath) + ".png"
|
||||
thumbName := thumbPrefix + utils.GetMD5EncodeStr(fullPath) + ".png"
|
||||
if d.ThumbCacheFolder != "" {
|
||||
// skip if the file is a thumbnail
|
||||
if strings.HasPrefix(file.GetName(), thumbPrefix) {
|
||||
@ -91,7 +91,7 @@ func (d *Local) getThumb(file model.Obj) (*bytes.Buffer, *string, error) {
|
||||
srcBuf = imgBuf
|
||||
}
|
||||
|
||||
image, err := imaging.Decode(srcBuf)
|
||||
image, err := imaging.Decode(srcBuf, imaging.AutoOrientation(true))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
uploader := s3manager.NewUploader(s)
|
||||
input := &s3manager.UploadInput{
|
||||
|
@ -4,7 +4,10 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -39,7 +42,7 @@ func (d *Mega) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if node, ok := dir.(*MegaNode); ok {
|
||||
nodes, err := d.c.FS.GetChildren(node.Node)
|
||||
nodes, err := d.c.FS.GetChildren(node.n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -53,7 +56,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
||||
return res, nil
|
||||
}
|
||||
log.Errorf("can't convert: %+v", dir)
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
@ -64,77 +67,67 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
|
||||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if node, ok := file.(*MegaNode); ok {
|
||||
//link, err := d.c.Link(node.Node, true)
|
||||
|
||||
//down, err := d.c.NewDownload(n.Node)
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||
//}
|
||||
//return &model.Link{URL: link}, nil
|
||||
down, err := d.c.NewDownload(node.Node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//u := down.GetResourceUrl()
|
||||
//u = strings.Replace(u, "http", "https", 1)
|
||||
//return &model.Link{URL: u}, nil
|
||||
r, w := io.Pipe()
|
||||
go func() {
|
||||
defer func() {
|
||||
_ = recover()
|
||||
}()
|
||||
log.Debugf("chunk size: %d", down.Chunks())
|
||||
var (
|
||||
chunk []byte
|
||||
err error
|
||||
)
|
||||
for id := 0; id < down.Chunks(); id++ {
|
||||
chunk, err = down.DownloadChunk(id)
|
||||
if err != nil {
|
||||
log.Errorf("mega down: %+v", err)
|
||||
break
|
||||
}
|
||||
log.Debugf("id: %d,len: %d", id, len(chunk))
|
||||
//_, _, err = down.ChunkLocation(id)
|
||||
//if err != nil {
|
||||
// log.Errorf("mega down: %+v", err)
|
||||
// return
|
||||
//}
|
||||
//_, err = c.Write(chunk)
|
||||
if _, err = w.Write(chunk); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
size := file.GetSize()
|
||||
var finalClosers utils.Closers
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||
length = -1
|
||||
}
|
||||
err = w.CloseWithError(err)
|
||||
var down *mega.Download
|
||||
err := utils.Retry(3, time.Second, func() (err error) {
|
||||
down, err = d.c.NewDownload(node.n)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("mega down: %+v", err)
|
||||
return nil, fmt.Errorf("open download file failed: %w", err)
|
||||
}
|
||||
}()
|
||||
return &model.Link{Data: r}, nil
|
||||
oo := &openObject{
|
||||
ctx: ctx,
|
||||
d: down,
|
||||
skip: httpRange.Start,
|
||||
}
|
||||
finalClosers.Add(oo)
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
|
||||
resultLink := &model.Link{
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
}
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentNode, ok := parentDir.(*MegaNode); ok {
|
||||
_, err := d.c.CreateDir(dirName, parentNode.Node)
|
||||
_, err := d.c.CreateDir(dirName, parentNode.n)
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
return d.c.Move(srcNode.Node, dstNode.Node)
|
||||
return d.c.Move(srcNode.n, dstNode.n)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
return d.c.Rename(srcNode.Node, newName)
|
||||
return d.c.Rename(srcNode.n, newName)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -143,14 +136,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
||||
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if node, ok := obj.(*MegaNode); ok {
|
||||
return d.c.Delete(node.Node, false)
|
||||
return d.c.Delete(node.n, false)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
|
||||
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -182,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
_, err = u.Finish()
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package mega
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -8,29 +9,36 @@ import (
|
||||
)
|
||||
|
||||
type MegaNode struct {
|
||||
*mega.Node
|
||||
n *mega.Node
|
||||
}
|
||||
|
||||
//func (m *MegaNode) GetSize() int64 {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
//
|
||||
//func (m *MegaNode) GetName() string {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
func (m *MegaNode) GetSize() int64 {
|
||||
return m.n.GetSize()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetName() string {
|
||||
return m.n.GetName()
|
||||
}
|
||||
|
||||
func (m *MegaNode) CreateTime() time.Time {
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetHash() utils.HashInfo {
|
||||
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (m *MegaNode) ModTime() time.Time {
|
||||
return m.GetTimeStamp()
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) IsDir() bool {
|
||||
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
|
||||
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetID() string {
|
||||
return m.GetHash()
|
||||
return m.n.GetHash()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetPath() string {
|
||||
|
@ -1,3 +1,92 @@
|
||||
package mega
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/t3rm1n4l/go-mega"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
// openObject represents a download in progress
|
||||
type openObject struct {
|
||||
ctx context.Context
|
||||
mu sync.Mutex
|
||||
d *mega.Download
|
||||
id int
|
||||
skip int64
|
||||
chunk []byte
|
||||
closed bool
|
||||
}
|
||||
|
||||
// get the next chunk
|
||||
func (oo *openObject) getChunk(ctx context.Context) (err error) {
|
||||
if oo.id >= oo.d.Chunks() {
|
||||
return io.EOF
|
||||
}
|
||||
var chunk []byte
|
||||
err = utils.Retry(3, time.Second, func() (err error) {
|
||||
chunk, err = oo.d.DownloadChunk(oo.id)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oo.id++
|
||||
oo.chunk = chunk
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (oo *openObject) Read(p []byte) (n int, err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return 0, fmt.Errorf("read on closed file")
|
||||
}
|
||||
// Skip data at the start if requested
|
||||
for oo.skip > 0 {
|
||||
_, size, err := oo.d.ChunkLocation(oo.id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip < int64(size) {
|
||||
break
|
||||
}
|
||||
oo.id++
|
||||
oo.skip -= int64(size)
|
||||
}
|
||||
if len(oo.chunk) == 0 {
|
||||
err = oo.getChunk(oo.ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if oo.skip > 0 {
|
||||
oo.chunk = oo.chunk[oo.skip:]
|
||||
oo.skip = 0
|
||||
}
|
||||
}
|
||||
n = copy(p, oo.chunk)
|
||||
oo.chunk = oo.chunk[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Close closed the file - MAC errors are reported here
|
||||
func (oo *openObject) Close() (err error) {
|
||||
oo.mu.Lock()
|
||||
defer oo.mu.Unlock()
|
||||
if oo.closed {
|
||||
return nil
|
||||
}
|
||||
err = utils.Retry(3, 500*time.Millisecond, func() (err error) {
|
||||
return oo.d.Finish()
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to finish download: %w", err)
|
||||
}
|
||||
oo.closed = true
|
||||
return nil
|
||||
}
|
||||
|
@ -6,16 +6,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/foxxorcat/mopan-sdk-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type MoPan struct {
|
||||
@ -23,7 +26,8 @@ type MoPan struct {
|
||||
Addition
|
||||
client *mopan.MoClient
|
||||
|
||||
userID string
|
||||
userID string
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *MoPan) Config() driver.Config {
|
||||
@ -35,6 +39,10 @@ func (d *MoPan) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *MoPan) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
login := func() error {
|
||||
data, err := d.client.Login(d.Phone, d.Password)
|
||||
if err != nil {
|
||||
@ -47,9 +55,19 @@ func (d *MoPan) Init(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
d.userID = info.UserID
|
||||
log.Debugf("[mopan] Phone: %s UserCloudStorageRelations: %+v", d.Phone, data.UserCloudStorageRelations)
|
||||
cloudCircleApp, _ := d.client.QueryAllCloudCircleApp()
|
||||
log.Debugf("[mopan] Phone: %s CloudCircleApp: %+v", d.Phone, cloudCircleApp)
|
||||
if d.RootFolderID == "" {
|
||||
for _, userCloudStorage := range data.UserCloudStorageRelations {
|
||||
if userCloudStorage.Path == "/文件" {
|
||||
d.RootFolderID = userCloudStorage.FolderID
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
d.client = mopan.NewMoClient().
|
||||
d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()).
|
||||
SetRestyClient(base.RestyClient).
|
||||
SetOnAuthorizationExpired(func(_ error) error {
|
||||
err := login()
|
||||
@ -87,6 +105,7 @@ func (d *MoPan) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
break
|
||||
}
|
||||
|
||||
log.Debugf("[mopan] Phone: %s folder: %+v", d.Phone, data.FileListAO.FolderList)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FolderList, folderToObj)...)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FileList, fileToObj)...)
|
||||
}
|
||||
@ -99,6 +118,15 @@ func (d *MoPan) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data.DownloadUrl = strings.Replace(strings.ReplaceAll(data.DownloadUrl, "&", "&"), "http://", "https://", 1)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).Head(data.DownloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() == 302 {
|
||||
data.DownloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: data.DownloadUrl,
|
||||
}, nil
|
||||
@ -212,68 +240,88 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
file, err := utils.CreateTempFile(stream)
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
initUpdload, err := d.client.InitMultiUpload(ctx, mopan.UpdloadFileParam{
|
||||
// step.1
|
||||
uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{
|
||||
ParentFolderId: dstDir.GetID(),
|
||||
FileName: stream.GetName(),
|
||||
FileSize: stream.GetSize(),
|
||||
File: file,
|
||||
}, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfo)
|
||||
// 尝试恢复进度
|
||||
initUpdload, ok := base.GetUploadProgress[*mopan.InitMultiUploadData](d, d.client.Authorization, uploadPartData.FileMd5)
|
||||
if !ok {
|
||||
// step.2
|
||||
initUpdload, err = d.client.InitMultiUpload(ctx, *uploadPartData, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.client.CloudDiskStartBusiness()
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
utils.Log.Error(d.client.CloudDiskStartBusiness())
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
// step.3
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, part := range parts {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
i, part, byteSize := i, part, initUpdload.PartSize
|
||||
if part.PartNumber == uploadPartData.PartTotal {
|
||||
byteSize = initUpdload.LastPartSize
|
||||
}
|
||||
|
||||
err := retry.Do(func() error {
|
||||
if _, err := file.Seek(int64(part.PartNumber-1)*int64(initUpdload.PartSize), io.SeekStart); err != nil {
|
||||
return retry.Unrecoverable(err)
|
||||
}
|
||||
|
||||
req, err := part.NewRequest(ctx, io.LimitReader(file, int64(initUpdload.PartSize)))
|
||||
// step.4
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
up(100 * int(threadG.Success()) / len(parts))
|
||||
initUpdload.PartInfos[i] = ""
|
||||
return nil
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
initUpdload.PartInfos = utils.SliceFilter(initUpdload.PartInfos, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(d, initUpdload, d.client.Authorization, uploadPartData.FileMd5)
|
||||
}
|
||||
up(100 * (i + 1) / len(parts))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
//step.5
|
||||
uFile, err := d.client.CommitMultiUploadFile(initUpdload.UploadFileID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -9,7 +9,7 @@ type Addition struct {
|
||||
Phone string `json:"phone" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
|
||||
RootFolderID string `json:"root_folder_id" default:"-11" required:"true" help:"be careful when using the -11 value, some operations may cause system errors"`
|
||||
RootFolderID string `json:"root_folder_id" default:""`
|
||||
|
||||
CloudID string `json:"cloud_id"`
|
||||
|
||||
@ -17,6 +17,8 @@ type Addition struct {
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
DeviceInfo string `json:"device_info"`
|
||||
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
func (a *Addition) GetRootId() string {
|
||||
@ -24,7 +26,7 @@ func (a *Addition) GetRootId() string {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MoPan",
|
||||
Name: "MoPan",
|
||||
// DefaultRoot: "root, / or other",
|
||||
CheckStatus: true,
|
||||
Alert: "warning|This network disk may store your password in clear text. Please set your password carefully",
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/foxxorcat/mopan-sdk-go"
|
||||
)
|
||||
|
||||
@ -14,6 +15,8 @@ func fileToObj(f mopan.File) model.Obj {
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: time.Time(f.LastOpTime),
|
||||
Ctime: time.Time(f.CreateDate),
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: f.Icon.SmallURL,
|
||||
@ -26,6 +29,7 @@ func folderToObj(f mopan.Folder) model.Obj {
|
||||
ID: string(f.ID),
|
||||
Name: f.Name,
|
||||
Modified: time.Time(f.LastOpTime),
|
||||
Ctime: time.Time(f.CreateDate),
|
||||
IsFolder: true,
|
||||
}
|
||||
}
|
||||
@ -37,6 +41,7 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
|
||||
Name: newName,
|
||||
IsFolder: true,
|
||||
Modified: o.ModTime(),
|
||||
Ctime: o.CreateTime(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,6 +55,8 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
|
||||
Name: newName,
|
||||
Size: o.GetSize(),
|
||||
Modified: o.ModTime(),
|
||||
Ctime: o.CreateTime(),
|
||||
HashInfo: o.GetHash(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
|
@ -196,7 +196,8 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 {
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
|
@ -187,7 +187,8 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 {
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
|
@ -3,15 +3,14 @@ package pikpak
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@ -124,23 +123,20 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
// cal gcid
|
||||
sha1Str, err := getGcid(tempFile, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
hi := stream.GetHash()
|
||||
sha1Str := hi.GetHash(hash_extend.GCID)
|
||||
if len(sha1Str) < hash_extend.GCID.Width {
|
||||
tFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var resp UploadTaskData
|
||||
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@ -179,7 +175,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: ¶ms.Bucket,
|
||||
Key: ¶ms.Key,
|
||||
Body: tempFile,
|
||||
Body: stream,
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
return err
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user