Compare commits

...

87 Commits

Author SHA1 Message Date
3b90f591b5 Squashed commit of the following:
commit 65c5ec0c34
Author: itsHenry <2671230065@qq.com>
Date:   Sat Nov 4 13:35:09 2023 +0800

    feat(cloudreve): folder size count and switch (#5457 close #5395)

commit a6325967d0
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Mon Oct 30 15:11:20 2023 +0800

    fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit 4dff49470a
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Mon Oct 30 15:10:36 2023 +0800

    fix(deps): update golang.org/x/exp digest to 7918f67 (#5366)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit cc86d6f3d1
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Sun Oct 29 14:45:55 2023 +0800

    fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit c0f9c8ebaf
Author: Andy Hsu <i@nn.ci>
Date:   Thu Oct 26 19:21:09 2023 +0800

    feat: add ignore direct link params (close #5434)
2023-11-05 22:28:31 +08:00
5657b12b20 Merge branch 'main' into refactor/offline-download 2023-10-20 21:17:32 +08:00
aba8bc0ec2 fix: adapt update progress type 2023-10-20 21:15:37 +08:00
ce6e486666 Squashed commit of the following:
commit 4fc0a77565
Author: Andy Hsu <i@nn.ci>
Date:   Fri Oct 20 21:06:25 2023 +0800

    fix(baidu_netdisk): upload file > 4GB (close #5392)

commit aaffaee2b5
Author: gmugu <94156510@qq.com>
Date:   Thu Oct 19 19:17:53 2023 +0800

    perf(webdav): support request with cookies (#5391)

commit 8ef8023c20
Author: NewbieOrange <NewbieOrange@users.noreply.github.com>
Date:   Thu Oct 19 19:17:09 2023 +0800

    fix(aliyundrive_open): upload progress for normal upload (#5398)

commit cdfbe6dcf2
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Wed Oct 18 16:27:07 2023 +0800

    fix: hash gcid empty file (#5394)

commit 94d028743a
Author: Andy Hsu <i@nn.ci>
Date:   Sat Oct 14 13:17:51 2023 +0800

    ci: remove `pr-welcome` label when close issue [skip ci]

commit 7f7335435c
Author: itsHenry <2671230065@qq.com>
Date:   Sat Oct 14 13:12:46 2023 +0800

    feat(cloudreve): support thumbnail (#5373 close #5348)

    * feat(cloudreve): support thumbnail

    * chore: remove unnecessary code

commit b9e192b29c
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Thu Oct 12 20:57:12 2023 +0800

    fix(115): limit request rate (#5367 close #5275)

    * fix(115):limit request rate

    * chore(115): fix unit of `limit_rate`

    ---------

    Co-authored-by: Andy Hsu <i@nn.ci>

commit 69a98eaef6
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Wed Oct 11 22:01:55 2023 +0800

    fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit 1ebc96a4e5
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:32:00 2023 +0800

    fix(wopan): fatal error concurrent map writes (close #5352)

commit 66e2324cac
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:23:11 2023 +0800

    chore(deps): upgrade dependencies

commit 7600dc28df
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:13:58 2023 +0800

    fix(aliyundrive_open): change default api to raw server (close #5358)

commit 8ef89ad0a4
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Tue Oct 10 18:08:27 2023 +0800

    fix(baidu_netdisk): hash and `error 2` (#5356)

    * fix(baidu):hash and error:2

    * fix:invalid memory address

commit 35d672217d
Author: jeffmingup <1960588251@qq.com>
Date:   Sun Oct 8 19:29:45 2023 +0800

    fix(onedrive_app): incorrect api on `_accessToken` (#5346)

commit 1a283bb272
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Fri Oct 6 16:04:39 2023 +0800

    feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334)

commit a008f54f4d
Author: nkh0472 <67589323+nkh0472@users.noreply.github.com>
Date:   Thu Oct 5 13:10:51 2023 +0800

    docs: minor language improvements (#5329) [skip ci]
2023-10-20 21:14:15 +08:00
4fc0a77565 fix(baidu_netdisk): upload file > 4GB (close #5392) 2023-10-20 21:06:25 +08:00
aaffaee2b5 perf(webdav): support request with cookies (#5391) 2023-10-19 19:17:53 +08:00
8ef8023c20 fix(aliyundrive_open): upload progress for normal upload (#5398) 2023-10-19 19:17:09 +08:00
cdfbe6dcf2 fix: hash gcid empty file (#5394) 2023-10-18 16:27:07 +08:00
94d028743a ci: remove pr-welcome label when close issue [skip ci] 2023-10-14 13:17:51 +08:00
7f7335435c feat(cloudreve): support thumbnail (#5373 close #5348)
* feat(cloudreve): support thumbnail

* chore: remove unnecessary code
2023-10-14 13:12:46 +08:00
b9e192b29c fix(115): limit request rate (#5367 close #5275)
* fix(115):limit request rate

* chore(115): fix unit of `limit_rate`

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-10-12 20:57:12 +08:00
69a98eaef6 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-11 22:01:55 +08:00
1ebc96a4e5 fix(wopan): fatal error concurrent map writes (close #5352) 2023-10-10 18:32:00 +08:00
66e2324cac chore(deps): upgrade dependencies 2023-10-10 18:23:11 +08:00
7600dc28df fix(aliyundrive_open): change default api to raw server (close #5358) 2023-10-10 18:13:58 +08:00
8ef89ad0a4 fix(baidu_netdisk): hash and error 2 (#5356)
* fix(baidu):hash and error:2

* fix:invalid memory address
2023-10-10 18:08:27 +08:00
35d672217d fix(onedrive_app): incorrect api on _accessToken (#5346) 2023-10-08 19:29:45 +08:00
9fb9efb704 chore: fix typo 2023-10-06 22:32:05 +08:00
1a283bb272 feat(google_drive): add hash_info, ctime, thumbnail (#5334) 2023-10-06 16:04:39 +08:00
1490da8b53 wip: adapt qBittorrent 2023-10-06 16:02:29 +08:00
12dfb60a66 wip: use tool manager 2023-10-05 22:13:02 +08:00
0380d7fff9 wip: use items in offline_download 2023-10-05 13:38:35 +08:00
a008f54f4d docs: minor language improvements (#5329) [skip ci] 2023-10-05 13:10:51 +08:00
0acb2d6073 wip: adapt aria2 2023-10-04 22:23:45 +08:00
ea9a3432ab refactor: change type of percentage to float64 2023-10-04 20:59:11 +08:00
7db3975b18 wip: refactor offline download (#5331)
* base tool

* working: aria2
2023-10-04 16:27:08 +08:00
3d7f79cba8 docs: change domain of contributors image [skip ci] 2023-10-03 17:34:24 +08:00
9ff83a7950 feat: add header to meta (ref #5317) 2023-10-02 16:43:29 +08:00
e719a1a456 feat(sso): custom username key for OIDC (close #5169) 2023-10-02 14:42:40 +08:00
40a6fcbdff ci: do not stale issue with working or pr-welcome label [skip ci] 2023-10-02 14:13:11 +08:00
0fd51646f6 feat(onedrive): custom host for download link (close #5310) 2023-10-02 14:07:47 +08:00
e8958019d9 fix(115): allow use proxy directly (close #5324) 2023-10-02 14:00:13 +08:00
e1ef690784 fix(terabox): encode parameters for filemanager api (#5308) 2023-10-01 16:58:29 +08:00
4024050dd0 chore: fix typo (#5316) 2023-10-01 16:58:00 +08:00
eb918658f0 fix(deps): update module github.com/ipfs/go-ipfs-api to v0.7.0 (#5247)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-30 22:58:19 +08:00
fb13dae136 feat(crypt): optional pre-generated thumbnails (#5284) 2023-09-27 13:57:10 +08:00
6b67a36d63 fix(terabox): auto refresh JsToken (close #5277) 2023-09-25 16:38:05 +08:00
a64dd4885e fix(139): fixed time zone (close #5263) 2023-09-22 16:54:16 +08:00
0f03a747d8 ci: cancel previous workflow run 2023-09-22 16:53:07 +08:00
30977cdc6d feat: sso compatibility mode (#5260) 2023-09-22 16:45:51 +08:00
106cf720c1 fix(baidu_netdisk): retry logic in request (close #5262) 2023-09-22 16:27:44 +08:00
882112ed1c feat: add hash_info field to /fs/get (close #5259) 2023-09-22 15:20:04 +08:00
2a6ab77295 fix(115): data race in Link (#5253) 2023-09-21 13:39:07 +08:00
f0981a0c8d chore(virtual): implement the driver interface with result 2023-09-20 09:02:56 +08:00
57eea4db17 fix(deps): update module github.com/go-resty/resty/v2 to v2.8.0 (#5244)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-20 08:51:34 +08:00
234852ca61 fix(deps): update module github.com/pkg/sftp to v1.13.6 (#5041)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-19 20:02:42 +08:00
809105b67e fix(deps): update module github.com/blevesearch/bleve/v2 to v2.3.10 (#5232)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-17 15:57:29 +08:00
02e8c31506 fix(deps): update golang.org/x/exp digest to 9212866 (#5205)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-16 23:21:42 +08:00
19b39a5c04 fix(onedrive): overwrite upload big file (close #5217 in #5218)
See https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
2023-09-14 13:38:07 +08:00
28e2731594 fix: clear cache recursively on deleting the folder (close #5209) 2023-09-13 16:06:17 +08:00
b1a279cbcc feat(139): implement MoveResult interface (close #5130) 2023-09-13 15:56:13 +08:00
352a6a741a feat(webdav): support copy directly without task (close #5206) 2023-09-13 15:45:57 +08:00
109015567a fix(deps): update module golang.org/x/oauth2 to v0.12.0 (#5058)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-12 12:52:48 +08:00
9e0fa77ca2 feat: add 123 link driver (close #4924) 2023-09-10 16:50:10 +08:00
335b11c698 chore: implement the driver interface with obj return [skip ci] 2023-09-08 15:25:49 +08:00
8e433355e6 fix(terabox): missing JsToken field on request (close #5189) 2023-09-08 15:18:56 +08:00
3504f017b9 fix(upload): memory leak on form upload as task (close #5185) 2023-09-07 15:51:52 +08:00
cd2f8077fa chore: enable all pprof handle on debug 2023-09-07 14:56:50 +08:00
d5b68a91d2 fix(webdav): optimize HEAD request (close #5182) 2023-09-06 16:32:51 +08:00
623c7dcea5 fix(189pc): get real link after redirect 2023-09-06 16:02:28 +08:00
ecbd6d86cd fix(lanzou): sub file in share folder need pwd (#5184) 2023-09-06 14:48:12 +08:00
7200344ace feat: adapt hash feature for some drivers (#5180)
* feat(pikpak,thunder): adaptation gcid hash

* chore(weiyun): add note

* feat(baidu_netdisk): adaptation rapid

* feat(baidu_photo): adaptation hash

* feat(189pc): adaptation rapid

* feat(mopan):adaptation ctime

* feat(139):adaptation hash and ctime

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-09-06 14:46:35 +08:00
b313ac4daa fix(crypt): fix 139cloud hack (#5178)
(cherry picked from commit 18bf64af47e58cc69cdd2e598de9c19538a7bf78)
2023-09-06 14:12:01 +08:00
f2f312b43a fix: http response body not close on status >= 400 (close #5163) 2023-09-05 15:46:16 +08:00
6f6d20e1ba fix: force_https not take effect on noRoute (close #5167) 2023-09-05 13:05:46 +08:00
3231c3d930 perf(db): release database before exit 2023-09-05 13:04:27 +08:00
b604e21c69 feat(webdav): support http chunked request (close #5161 in #5162)
But we do not recommend not adding the content-length header when putting files
2023-09-05 13:03:29 +08:00
3c66db9845 ci: split release actions 2023-09-03 22:57:18 +08:00
f6ab1f7f61 perf(ftp): non use SIZE FTP command (close #5150) 2023-09-03 18:47:32 +08:00
8e40465e86 fix(aliyundrive_open): date format on uploading (#5151)
(cherry picked from commit 88f815979ac91caa8bc425a2ff9a18bbd8a2e736)
2023-09-03 18:12:05 +08:00
37dffd0fce feat(crypt): customize filename_encoding (#5148)
close #5109
close #5080
2023-09-03 18:06:44 +08:00
e7c0d94b44 fix: form upload when ticked As A Task (#5145) 2023-09-03 15:40:40 +08:00
8102142007 fix(deps): update github.com/orzogc/fake115uploader digest to 58f9eb7 (#5133)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-02 14:50:06 +08:00
7c6dec5d47 fix(deps): update module 115driver to v1.0.16 (close #5117 in #5120)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-01 14:31:47 +08:00
dd10c0c5d0 chore(aliyundrive_open): print resp content on refresh token (close #5129) 2023-08-31 18:43:25 +08:00
34fadecc2c fix(ftp): dead lock on Read (close #5128) 2023-08-31 15:10:47 +08:00
cb8867fcc1 fix(deps): update module github.com/google/uuid to v1.3.1 (#5066)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-30 19:30:41 +08:00
092ed06833 feat(uss): add AntiTheftChainToken field (#5115)
* feat(uss): add AntiTheftChainToken; fix link func

* feat(uss): optimize _upt generation
2023-08-30 15:16:26 +08:00
6308f1c35d fix: updateTime, createTime and HashInfo (#5111) 2023-08-29 13:31:24 +08:00
ce10c9f120 fix: temp file not close and incorrect WebPutAsTask 2023-08-28 18:18:02 +08:00
6c4736fc8f fix: allow no Last-Modified on upload api 2023-08-28 16:42:03 +08:00
b301b791c7 fix(local): set create and modified time for new file (close #4938) 2023-08-27 23:05:13 +08:00
19d34e2eb8 feat: receive lastModified from upload api 2023-08-27 23:03:09 +08:00
a3748af772 feat: misc improvements about upload/copy/hash (#5045)
general: add createTime/updateTime support in webdav and some drivers
general: add hash support in some drivers
general: cross-storage rapid-upload support
general: enhance upload to avoid local temp file if possible
general: replace readseekcloser with File interface to speed upstream operations
feat(aliyun_open): same as above
feat(crypt): add hack for 139cloud

Close #4934 
Close #4819 

baidu_netdisk needs to improve the upload code to support rapid-upload
2023-08-27 21:14:23 +08:00
9b765ef696 chore: remove README.md executable permission (close #5097 in #5100) 2023-08-27 14:35:03 +08:00
8f493cccc4 fix(mopan): parameter error (#5091) 2023-08-25 14:10:05 +08:00
31a033dff1 fix(lanzou): download cannot find data (#5088) 2023-08-24 21:56:20 +08:00
176 changed files with 4690 additions and 1354 deletions

2
.github/stale.yml vendored
View File

@ -6,6 +6,8 @@ daysUntilClose: 20
exemptLabels: exemptLabels:
- accepted - accepted
- security - security
- working
- pr-welcome
# Label to use when marking an issue as stale # Label to use when marking an issue as stale
staleLabel: stale staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable # Comment to post when marking an issue as stale. Set to `false` to disable

View File

@ -11,6 +11,10 @@ on:
- 'cmd/lang.go' - 'cmd/lang.go'
workflow_dispatch: workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
auto_lang: auto_lang:
strategy: strategy:

View File

@ -6,6 +6,10 @@ on:
pull_request: pull_request:
branches: [ 'main' ] branches: [ 'main' ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
build: build:
strategy: strategy:

View File

@ -4,6 +4,10 @@ on:
push: push:
branches: [ main ] branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs: jobs:
build_docker: build_docker:
name: Build docker name: Build docker

View File

@ -14,4 +14,4 @@ jobs:
actions: 'remove-labels' actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }} issue-number: ${{ github.event.issue.number }}
labels: 'working' labels: 'working,pr-welcome'

View File

@ -0,0 +1,34 @@
name: release_linux_musl
on:
release:
types: [ published ]
jobs:
release_linux_musl:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.20' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl
- name: Upload assets
uses: softprops/action-gh-release@v1
with:
files: build/compress/*

View File

@ -5,7 +5,7 @@ on:
types: [ published ] types: [ published ]
jobs: jobs:
release_arm: release_linux_musl_arm:
strategy: strategy:
matrix: matrix:
platform: [ ubuntu-latest ] platform: [ ubuntu-latest ]

10
README.md Executable file → Normal file
View File

@ -43,7 +43,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
## Features ## Features
- [x] Multiple storage - [x] Multiple storages
- [x] Local storage - [x] Local storage
- [x] [Aliyundrive](https://www.aliyundrive.com/) - [x] [Aliyundrive](https://www.aliyundrive.com/)
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us) - [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
@ -86,7 +86,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
- [x] Protected routes (password protection and authentication) - [x] Protected routes (password protection and authentication)
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details) - [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist) - [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare workers proxy - [x] Cloudflare Workers proxy
- [x] File/Folder package download - [x] File/Folder package download
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy - [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
- [x] Offline download - [x] Offline download
@ -103,7 +103,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
## Discussion ## Discussion
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature request only.** Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
## Sponsor ## Sponsor
@ -120,14 +120,14 @@ https://alist.nn.ci/guide/sponsor.html
Thanks goes to these wonderful people: Thanks goes to these wonderful people:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors) [![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## License ## License
The `AList` is open-source software licensed under the AGPL-3.0 license. The `AList` is open-source software licensed under the AGPL-3.0 license.
## Disclaimer ## Disclaimer
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning golang. Please abide by relevant laws and regulations when using it, and do not abuse it; - This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning Golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior; - This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data; - This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business; - Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;

View File

@ -118,7 +118,7 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
Thanks goes to these wonderful people: Thanks goes to these wonderful people:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors) [![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## 许可 ## 许可

View File

@ -120,7 +120,7 @@ https://alist.nn.ci/guide/sponsor.html
これらの素晴らしい人々に感謝します: これらの素晴らしい人々に感謝します:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors) [![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## ライセンス ## ライセンス

View File

@ -89,6 +89,18 @@ BuildDocker() {
} }
BuildRelease() { BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/alist-windows-arm64.exe
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./alist-linux-amd64
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
upx -9 ./alist-windows-amd64-upx.exe
mv alist-* build
}
BuildReleaseLinuxMusl() {
rm -rf .git/ rm -rf .git/
mkdir -p "build" mkdir -p "build"
muslflags="--extldflags '-static -fpic' $ldflags" muslflags="--extldflags '-static -fpic' $ldflags"
@ -112,13 +124,6 @@ BuildRelease() {
export CGO_ENABLED=1 export CGO_ENABLED=1
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter . go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
done done
BuildWinArm64 ./build/alist-windows-arm64.exe
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./alist-linux-amd64
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
upx -9 ./alist-windows-amd64-upx.exe
mv alist-* build
} }
BuildReleaseLinuxMuslArm() { BuildReleaseLinuxMuslArm() {
@ -192,6 +197,9 @@ elif [ "$1" = "release" ]; then
elif [ "$2" = "linux_musl_arm" ]; then elif [ "$2" = "linux_musl_arm" ]; then
BuildReleaseLinuxMuslArm BuildReleaseLinuxMuslArm
MakeRelease "md5-linux-musl-arm.txt" MakeRelease "md5-linux-musl-arm.txt"
elif [ "$2" = "linux_musl" ]; then
BuildReleaseLinuxMusl
MakeRelease "md5-linux-musl.txt"
else else
BuildRelease BuildRelease
MakeRelease "md5.txt" MakeRelease "md5.txt"

View File

@ -19,6 +19,7 @@ var AdminCmd = &cobra.Command{
Short: "Show admin user's info and some operations about admin user's password", Short: "Show admin user's info and some operations about admin user's password",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
Init() Init()
defer Release()
admin, err := op.GetAdmin() admin, err := op.GetAdmin()
if err != nil { if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err) utils.Log.Errorf("failed get admin user: %+v", err)
@ -57,6 +58,7 @@ var ShowTokenCmd = &cobra.Command{
Short: "Show admin token", Short: "Show admin token",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
Init() Init()
defer Release()
token := setting.GetStr(conf.Token) token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token) utils.Log.Infof("Admin token: %s", token)
}, },
@ -64,6 +66,7 @@ var ShowTokenCmd = &cobra.Command{
func setAdminPassword(pwd string) { func setAdminPassword(pwd string) {
Init() Init()
defer Release()
admin, err := op.GetAdmin() admin, err := op.GetAdmin()
if err != nil { if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err) utils.Log.Errorf("failed get admin user: %+v", err)

View File

@ -15,6 +15,7 @@ var Cancel2FACmd = &cobra.Command{
Short: "Delete 2FA of admin user", Short: "Delete 2FA of admin user",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
Init() Init()
defer Release()
admin, err := op.GetAdmin() admin, err := op.GetAdmin()
if err != nil { if err != nil {
utils.Log.Errorf("failed to get admin user: %+v", err) utils.Log.Errorf("failed to get admin user: %+v", err)

View File

@ -7,6 +7,7 @@ import (
"github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/bootstrap/data" "github.com/alist-org/alist/v3/internal/bootstrap/data"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -19,6 +20,10 @@ func Init() {
bootstrap.InitIndex() bootstrap.InitIndex()
} }
func Release() {
db.Close()
}
var pid = -1 var pid = -1
var pidFile string var pidFile string

View File

@ -5,6 +5,8 @@ import (
"os" "os"
"github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/cmd/flags"
_ "github.com/alist-org/alist/v3/drivers"
_ "github.com/alist-org/alist/v3/internal/offline_download"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )

View File

@ -13,7 +13,6 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/cmd/flags" "github.com/alist-org/alist/v3/cmd/flags"
_ "github.com/alist-org/alist/v3/drivers"
"github.com/alist-org/alist/v3/internal/bootstrap" "github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
@ -35,8 +34,7 @@ the address is defined in config file`,
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart) utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second) time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
} }
bootstrap.InitAria2() bootstrap.InitOfflineDownloadTools()
bootstrap.InitQbittorrent()
bootstrap.LoadStorages() bootstrap.LoadStorages()
if !flags.Debug && !flags.Dev { if !flags.Debug && !flags.Dev {
gin.SetMode(gin.ReleaseMode) gin.SetMode(gin.ReleaseMode)
@ -100,7 +98,7 @@ the address is defined in config file`,
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit <-quit
utils.Log.Println("Shutdown server...") utils.Log.Println("Shutdown server...")
Release()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel() defer cancel()
var wg sync.WaitGroup var wg sync.WaitGroup

View File

@ -31,6 +31,7 @@ var disableStorageCmd = &cobra.Command{
} }
mountPath := args[0] mountPath := args[0]
Init() Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath) storage, err := db.GetStorageByMountPath(mountPath)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err) utils.Log.Errorf("failed to query storage: %+v", err)
@ -89,6 +90,7 @@ var listStorageCmd = &cobra.Command{
Short: "List all storages", Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
Init() Init()
defer Release()
storages, _, err := db.GetStorages(1, -1) storages, _, err := db.GetStorages(1, -1)
if err != nil { if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err) utils.Log.Errorf("failed to query storages: %+v", err)

View File

@ -2,19 +2,22 @@ package _115
import ( import (
"context" "context"
"os" "strings"
driver115 "github.com/SheltonZhu/115driver/pkg/driver" driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/time/rate"
) )
type Pan115 struct { type Pan115 struct {
model.Storage model.Storage
Addition Addition
client *driver115.Pan115Client client *driver115.Pan115Client
limiter *rate.Limiter
} }
func (d *Pan115) Config() driver.Config { func (d *Pan115) Config() driver.Config {
@ -26,29 +29,42 @@ func (d *Pan115) GetAddition() driver.Additional {
} }
func (d *Pan115) Init(ctx context.Context) error { func (d *Pan115) Init(ctx context.Context) error {
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return d.login() return d.login()
} }
func (d *Pan115) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
func (d *Pan115) Drop(ctx context.Context) error { func (d *Pan115) Drop(ctx context.Context) error {
return nil return nil
} }
func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
files, err := d.getFiles(dir.GetID()) files, err := d.getFiles(dir.GetID())
if err != nil && !errors.Is(err, driver115.ErrNotExist) { if err != nil && !errors.Is(err, driver115.ErrNotExist) {
return nil, err return nil, err
} }
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) { return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
return src, nil return &src, nil
}) })
} }
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
downloadInfo, err := d.client. downloadInfo, err := d.client.
SetUserAgent(driver115.UA115Browser). DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
Download(file.(driver115.File).PickCode)
// recover for upload
d.client.SetUserAgent(driver115.UA115Desktop)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -60,6 +76,9 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
} }
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil { if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
return err return err
} }
@ -67,31 +86,99 @@ func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
} }
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Move(dstDir.GetID(), srcObj.GetID()) return d.client.Move(dstDir.GetID(), srcObj.GetID())
} }
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Rename(srcObj.GetID(), newName) return d.client.Rename(srcObj.GetID(), newName)
} }
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Copy(dstDir.GetID(), srcObj.GetID()) return d.client.Copy(dstDir.GetID(), srcObj.GetID())
} }
func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error { func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Delete(obj.GetID()) return d.client.Delete(obj.GetID())
} }
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) if err := d.WaitLimit(ctx); err != nil {
return err
}
var (
fastInfo *driver115.UploadInitResp
dirID = dstDir.GetID()
)
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
return err
}
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
return driver115.ErrUploadTooLarge
}
//if digest, err = d.client.GetDigestResult(stream); err != nil {
// return err
//}
const PreHashSize int64 = 128 * utils.KB
hashSize := PreHashSize
if stream.GetSize() < PreHashSize {
hashSize = stream.GetSize()
}
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
if err != nil { if err != nil {
return err return err
} }
defer func() { preHash, err := utils.HashReader(utils.SHA1, reader)
_ = tempFile.Close() if err != nil {
_ = os.Remove(tempFile.Name()) return err
}() }
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile) preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return err
}
}
fullHash = strings.ToUpper(fullHash)
// rapid-upload
// note that 115 add timeout for rapid-upload,
// and "sig invalid" err is thrown even when the hash is correct after timeout.
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
return err
}
if matched, err := fastInfo.Ok(); err != nil {
return err
} else if matched {
return nil
}
// 闪传失败,上传
if stream.GetSize() <= utils.KB { // 文件大小小于1KB改用普通模式上传
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
}
// 分片上传
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
} }
var _ driver.Driver = (*Pan115)(nil) var _ driver.Driver = (*Pan115)(nil)

View File

@ -6,17 +6,18 @@ import (
) )
type Addition struct { type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"` Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"` QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"` PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
driver.RootID driver.RootID
} }
var config = driver.Config{ var config = driver.Config{
Name: "115 Cloud", Name: "115 Cloud",
DefaultRoot: "0", DefaultRoot: "0",
OnlyProxy: true, OnlyProxy: true,
OnlyLocal: true, //OnlyLocal: true,
NoOverwriteUpload: true, NoOverwriteUpload: true,
} }

View File

@ -3,6 +3,20 @@ package _115
import ( import (
"github.com/SheltonZhu/115driver/pkg/driver" "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
) )
var _ model.Obj = (*driver.File)(nil) var _ model.Obj = (*FileObj)(nil)
type FileObj struct {
driver.File
}
func (f *FileObj) CreateTime() time.Time {
return f.File.CreateTime
}
func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}

View File

@ -1,10 +1,25 @@
package _115 package _115
import ( import (
"bytes"
"crypto/tls" "crypto/tls"
"encoding/json"
"fmt" "fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/orzogc/fake115uploader/cipher"
"io"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/SheltonZhu/115driver/pkg/driver" "github.com/SheltonZhu/115driver/pkg/driver"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -41,8 +56,8 @@ func (d *Pan115) login() error {
return d.client.LoginCheck() return d.client.LoginCheck()
} }
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) { func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
res := make([]driver.File, 0) res := make([]FileObj, 0)
if d.PageSize <= 0 { if d.PageSize <= 0 {
d.PageSize = driver.FileListLimit d.PageSize = driver.FileListLimit
} }
@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
return nil, err return nil, err
} }
for _, file := range *files { for _, file := range *files {
res = append(res, file) res = append(res, FileObj{file})
} }
return res, nil return res, nil
} }
const (
appVer = "2.0.3.6"
)
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
var (
ecdhCipher *cipher.EcdhCipher
encrypted []byte
decrypted []byte
encodedToken string
err error
target = "U_1_" + dirID
bodyBytes []byte
result = driver115.UploadInitResp{}
fileSizeStr = strconv.FormatInt(fileSize, 10)
)
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
return nil, err
}
userID := strconv.FormatInt(d.client.UserID, 10)
form := url.Values{}
form.Set("appid", "0")
form.Set("appversion", appVer)
form.Set("userid", userID)
form.Set("filename", fileName)
form.Set("filesize", fileSizeStr)
form.Set("fileid", fileID)
form.Set("target", target)
form.Set("sig", d.client.GenerateSignature(fileID, target))
signKey, signVal := "", ""
for retry := true; retry; {
t := driver115.Now()
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
return nil, err
}
params := map[string]string{
"k_ec": encodedToken,
}
form.Set("t", t.String())
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
if signKey != "" && signVal != "" {
form.Set("sign_key", signKey)
form.Set("sign_val", signVal)
}
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
return nil, err
}
req := d.client.NewRequest().
SetQueryParams(params).
SetBody(encrypted).
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
SetDoNotParseResponse(true)
resp, err := req.Post(driver115.ApiUploadInit)
if err != nil {
return nil, err
}
data := resp.RawBody()
defer data.Close()
if bodyBytes, err = io.ReadAll(data); err != nil {
return nil, err
}
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
return nil, err
}
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
return nil, err
}
if result.Status == 7 {
// Update signKey & signVal
signKey = result.SignKey
signVal, err = UploadDigestRange(stream, result.SignCheck)
if err != nil {
return nil, err
}
} else {
retry = false
}
result.SHA1 = fileID
}
return &result, nil
}
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
var start, end int64
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
return
}
length := end - start + 1
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
hashStr, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return "", err
}
result = strings.ToUpper(hashStr)
return
}
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
ossToken *driver115.UploadOSSTokenResp
err error
)
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
options := driver115.DefalutUploadMultipartOptions()
if len(opts) > 0 {
for _, f := range opts {
f(options)
}
}
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
return err
}
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
return err
}
// ossToken一小时后就会失效所以每50分钟重新获取一次
ticker := time.NewTicker(options.TokenRefreshTime)
defer ticker.Stop()
// 设置超时
timeout := time.NewTimer(options.Timeout)
if chunks, err = SplitFile(fileSize); err != nil {
return err
}
if imur, err = bucket.InitiateMultipartUpload(params.Object,
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
oss.UserAgentHeader(driver115.OSSUserAgent),
); err != nil {
return err
}
wg := sync.WaitGroup{}
wg.Add(len(chunks))
chunksCh := make(chan oss.FileChunk)
errCh := make(chan error)
UploadedPartsCh := make(chan oss.UploadPart)
quit := make(chan struct{})
// producer
go chunksProducer(chunksCh, chunks)
go func() {
wg.Wait()
quit <- struct{}{}
}()
// consumers
for i := 0; i < options.ThreadsNum; i++ {
go func(threadId int) {
defer func() {
if r := recover(); r != nil {
errCh <- fmt.Errorf("Recovered in %v", r)
}
}()
for chunk := range chunksCh {
var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ {
select {
case <-ticker.C:
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
errCh <- errors.Wrap(err, "刷新token时出现错误")
}
default:
}
buf := make([]byte, chunk.Size)
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue
}
b := bytes.NewBuffer(buf)
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}
}
if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
}
UploadedPartsCh <- part
}
}(i)
}
go func() {
for part := range UploadedPartsCh {
parts = append(parts, part)
wg.Done()
}
}()
LOOP:
for {
select {
case <-ticker.C:
// 到时重新获取ossToken
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
case <-quit:
break LOOP
case <-errCh:
return err
case <-timeout.C:
return fmt.Errorf("time out")
}
}
// EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
return err
}
}
return d.checkUploadStatus(dirID, params.SHA1)
}
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
for _, chunk := range chunks {
ch <- chunk
}
}
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
// 验证上传是否成功
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
opts := []driver115.GetFileOptions{
driver115.WithOrder(driver115.FileOrderByTime),
driver115.WithShowDirEnable(false),
driver115.WithAsc(false),
driver115.WithLimit(500),
}
fResp, err := driver115.GetFiles(req, dirID, opts...)
if err != nil {
return err
}
for _, fileInfo := range fResp.Files {
if fileInfo.Sha1 == sha1 {
return nil
}
}
return driver115.ErrUploadFailed
}
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
for i := int64(1); i < 10; i++ {
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
return
}
break
}
}
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
return
}
}
// 单个分片大小不能小于100KB
if chunks[0].Size < 100*utils.KB {
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
return
}
}
return
}
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
if int64(chunkNum) > fileSize {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (fileSize / chunkN)
if i == chunkN-1 {
chunk.Size = fileSize/chunkN + fileSize%chunkN
} else {
chunk.Size = fileSize / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
var chunkN = fileSize / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if fileSize%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = fileSize % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}

View File

@ -6,11 +6,6 @@ import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io"
"net/http"
"net/url"
"os"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
@ -22,6 +17,9 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
"io"
"net/http"
"net/url"
) )
type Pan123 struct { type Pan123 struct {
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// const DEFAULT int64 = 10485760 // const DEFAULT int64 = 10485760
h := md5.New() h := md5.New()
// need to calculate md5 of the full content // need to calculate md5 of the full content
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() {
_ = tempFile.Close() _ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}() }()
if _, err = io.Copy(h, tempFile); err != nil { if _, err = io.Copy(h, tempFile); err != nil {
return err return err

View File

@ -1,6 +1,7 @@
package _123 package _123
import ( import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
@ -21,6 +22,14 @@ type File struct {
DownloadUrl string `json:"DownloadUrl"` DownloadUrl string `json:"DownloadUrl"`
} }
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string { func (f File) GetPath() string {
return "" return ""
} }

View File

@ -107,7 +107,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if err != nil { if err != nil {
return err return err
} }
up(j * 100 / chunkCount) up(float64(j) * 100 / float64(chunkCount))
} }
} }
// complete s3 upload // complete s3 upload

View File

@ -0,0 +1,77 @@
package _123Link
import (
"context"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type Pan123Link struct {
model.Storage
Addition
root *Node
}
func (d *Pan123Link) Config() driver.Config {
return config
}
func (d *Pan123Link) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Pan123Link) Init(ctx context.Context) error {
node, err := BuildTree(d.OriginURLs)
if err != nil {
return err
}
node.calSize()
d.root = node
return nil
}
func (d *Pan123Link) Drop(ctx context.Context) error {
return nil
}
func (d *Pan123Link) Get(ctx context.Context, path string) (model.Obj, error) {
node := GetNodeFromRootByPath(d.root, path)
return nodeToObj(node, path)
}
func (d *Pan123Link) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
node := GetNodeFromRootByPath(d.root, dir.GetPath())
if node == nil {
return nil, errs.ObjectNotFound
}
if node.isFile() {
return nil, errs.NotFolder
}
return utils.SliceConvert(node.Children, func(node *Node) (model.Obj, error) {
return nodeToObj(node, stdpath.Join(dir.GetPath(), node.Name))
})
}
func (d *Pan123Link) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
node := GetNodeFromRootByPath(d.root, file.GetPath())
if node == nil {
return nil, errs.ObjectNotFound
}
if node.isFile() {
signUrl, err := SignURL(node.Url, d.PrivateKey, d.UID, time.Duration(d.ValidDuration)*time.Minute)
if err != nil {
return nil, err
}
return &model.Link{
URL: signUrl,
}, nil
}
return nil, errs.NotFile
}
var _ driver.Driver = (*Pan123Link)(nil)

23
drivers/123_link/meta.go Normal file
View File

@ -0,0 +1,23 @@
package _123Link
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
OriginURLs string `json:"origin_urls" type:"text" required:"true" default:"https://vip.123pan.com/29/folder/file.mp3" help:"structure:FolderName:\n [FileSize:][Modified:]Url"`
PrivateKey string `json:"private_key"`
UID uint64 `json:"uid" type:"number"`
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
}
var config = driver.Config{
Name: "123PanLink",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123Link{}
})
}

152
drivers/123_link/parse.go Normal file
View File

@ -0,0 +1,152 @@
package _123Link
import (
"fmt"
url2 "net/url"
stdpath "path"
"strconv"
"strings"
"time"
)
// build tree from text, text structure definition:
/**
* FolderName:
* [FileSize:][Modified:]Url
*/
/**
* For example:
* folder1:
* name1:url1
* url2
* folder2:
* url3
* url4
* url5
* folder3:
* url6
* url7
* url8
*/
// if there are no name, use the last segment of url as name
func BuildTree(text string) (*Node, error) {
lines := strings.Split(text, "\n")
var root = &Node{Level: -1, Name: "root"}
stack := []*Node{root}
for _, line := range lines {
// calculate indent
indent := 0
for i := 0; i < len(line); i++ {
if line[i] != ' ' {
break
}
indent++
}
// if indent is not a multiple of 2, it is an error
if indent%2 != 0 {
return nil, fmt.Errorf("the line '%s' is not a multiple of 2", line)
}
// calculate level
level := indent / 2
line = strings.TrimSpace(line[indent:])
// if the line is empty, skip
if line == "" {
continue
}
// if level isn't greater than the level of the top of the stack
// it is not the child of the top of the stack
for level <= stack[len(stack)-1].Level {
// pop the top of the stack
stack = stack[:len(stack)-1]
}
// if the line is a folder
if isFolder(line) {
// create a new node
node := &Node{
Level: level,
Name: strings.TrimSuffix(line, ":"),
}
// add the node to the top of the stack
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
// push the node to the stack
stack = append(stack, node)
} else {
// if the line is a file
// create a new node
node, err := parseFileLine(line)
if err != nil {
return nil, err
}
node.Level = level
// add the node to the top of the stack
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
}
}
return root, nil
}
func isFolder(line string) bool {
return strings.HasSuffix(line, ":")
}
// line definition:
// [FileSize:][Modified:]Url
func parseFileLine(line string) (*Node, error) {
// if there is no url, it is an error
if !strings.Contains(line, "http://") && !strings.Contains(line, "https://") {
return nil, fmt.Errorf("invalid line: %s, because url is required for file", line)
}
index := strings.Index(line, "http://")
if index == -1 {
index = strings.Index(line, "https://")
}
url := line[index:]
info := line[:index]
node := &Node{
Url: url,
}
name := stdpath.Base(url)
unescape, err := url2.PathUnescape(name)
if err == nil {
name = unescape
}
node.Name = name
if index > 0 {
if !strings.HasSuffix(info, ":") {
return nil, fmt.Errorf("invalid line: %s, because file info must end with ':'", line)
}
info = info[:len(info)-1]
if info == "" {
return nil, fmt.Errorf("invalid line: %s, because file name can't be empty", line)
}
infoParts := strings.Split(info, ":")
size, err := strconv.ParseInt(infoParts[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid line: %s, because file size must be an integer", line)
}
node.Size = size
if len(infoParts) > 1 {
modified, err := strconv.ParseInt(infoParts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid line: %s, because file modified must be an unix timestamp", line)
}
node.Modified = modified
} else {
node.Modified = time.Now().Unix()
}
}
return node, nil
}
func splitPath(path string) []string {
if path == "/" {
return []string{"root"}
}
parts := strings.Split(path, "/")
parts[0] = "root"
return parts
}
func GetNodeFromRootByPath(root *Node, path string) *Node {
return root.getByPath(splitPath(path))
}

66
drivers/123_link/types.go Normal file
View File

@ -0,0 +1,66 @@
package _123Link
import (
"time"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
)
// Node is a node in the folder tree
type Node struct {
Url string
Name string
Level int
Modified int64
Size int64
Children []*Node
}
func (node *Node) getByPath(paths []string) *Node {
if len(paths) == 0 || node == nil {
return nil
}
if node.Name != paths[0] {
return nil
}
if len(paths) == 1 {
return node
}
for _, child := range node.Children {
tmp := child.getByPath(paths[1:])
if tmp != nil {
return tmp
}
}
return nil
}
func (node *Node) isFile() bool {
return node.Url != ""
}
func (node *Node) calSize() int64 {
if node.isFile() {
return node.Size
}
var size int64 = 0
for _, child := range node.Children {
size += child.calSize()
}
node.Size = size
return size
}
func nodeToObj(node *Node, path string) (model.Obj, error) {
if node == nil {
return nil, errs.ObjectNotFound
}
return &model.Object{
Name: node.Name,
Size: node.Size,
Modified: time.Unix(node.Modified, 0),
IsFolder: !node.isFile(),
Path: path,
}, nil
}

30
drivers/123_link/util.go Normal file
View File

@ -0,0 +1,30 @@
package _123Link
import (
"crypto/md5"
"fmt"
"math/rand"
"net/url"
"time"
)
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
if privateKey == "" {
return originURL, nil
}
var (
ts = time.Now().Add(validDuration).Unix() // 有效时间戳
rInt = rand.Int() // 随机正整数
objURL *url.URL
)
objURL, err = url.Parse(originURL)
if err != nil {
return "", err
}
authKey := fmt.Sprintf("%d-%d-%d-%x", ts, rInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
objURL.Path, ts, rInt, uid, privateKey))))
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}

View File

@ -1,6 +1,7 @@
package _123Share package _123Share
import ( import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url" "net/url"
"path" "path"
"strconv" "strconv"
@ -21,6 +22,10 @@ type File struct {
DownloadUrl string `json:"DownloadUrl"` DownloadUrl string `json:"DownloadUrl"`
} }
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string { func (f File) GetPath() string {
return "" return ""
} }
@ -36,6 +41,9 @@ func (f File) GetName() string {
func (f File) ModTime() time.Time { func (f File) ModTime() time.Time {
return f.UpdateAt return f.UpdateAt
} }
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) IsDir() bool { func (f File) IsDir() bool {
return f.Type == 1 return f.Type == 1

View File

@ -103,9 +103,9 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
return err return err
} }
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.isFamily() { if d.isFamily() {
return errs.NotImplement return nil, errs.NotImplement
} }
var contentInfoList []string var contentInfoList []string
var catalogInfoList []string var catalogInfoList []string
@ -131,7 +131,10 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
} }
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask" pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
_, err := d.post(pathname, data, nil) _, err := d.post(pathname, data, nil)
return err if err != nil {
return nil, err
}
return srcObj, nil
} }
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {

View File

@ -10,7 +10,7 @@ type Catalog struct {
CatalogID string `json:"catalogID"` CatalogID string `json:"catalogID"`
CatalogName string `json:"catalogName"` CatalogName string `json:"catalogName"`
//CatalogType int `json:"catalogType"` //CatalogType int `json:"catalogType"`
//CreateTime string `json:"createTime"` CreateTime string `json:"createTime"`
UpdateTime string `json:"updateTime"` UpdateTime string `json:"updateTime"`
//IsShared bool `json:"isShared"` //IsShared bool `json:"isShared"`
//CatalogLevel int `json:"catalogLevel"` //CatalogLevel int `json:"catalogLevel"`
@ -63,7 +63,7 @@ type Content struct {
//ParentCatalogID string `json:"parentCatalogId"` //ParentCatalogID string `json:"parentCatalogId"`
//Channel string `json:"channel"` //Channel string `json:"channel"`
//GeoLocFlag string `json:"geoLocFlag"` //GeoLocFlag string `json:"geoLocFlag"`
//Digest string `json:"digest"` Digest string `json:"digest"`
//Version string `json:"version"` //Version string `json:"version"`
//FileEtag string `json:"fileEtag"` //FileEtag string `json:"fileEtag"`
//FileVersion string `json:"fileVersion"` //FileVersion string `json:"fileVersion"`
@ -141,7 +141,7 @@ type CloudContent struct {
//ContentSuffix string `json:"contentSuffix"` //ContentSuffix string `json:"contentSuffix"`
ContentSize int64 `json:"contentSize"` ContentSize int64 `json:"contentSize"`
//ContentDesc string `json:"contentDesc"` //ContentDesc string `json:"contentDesc"`
//CreateTime string `json:"createTime"` CreateTime string `json:"createTime"`
//Shottime interface{} `json:"shottime"` //Shottime interface{} `json:"shottime"`
LastUpdateTime string `json:"lastUpdateTime"` LastUpdateTime string `json:"lastUpdateTime"`
ThumbnailURL string `json:"thumbnailURL"` ThumbnailURL string `json:"thumbnailURL"`
@ -165,7 +165,7 @@ type CloudCatalog struct {
CatalogID string `json:"catalogID"` CatalogID string `json:"catalogID"`
CatalogName string `json:"catalogName"` CatalogName string `json:"catalogName"`
//CloudID string `json:"cloudID"` //CloudID string `json:"cloudID"`
//CreateTime string `json:"createTime"` CreateTime string `json:"createTime"`
LastUpdateTime string `json:"lastUpdateTime"` LastUpdateTime string `json:"lastUpdateTime"`
//Creator string `json:"creator"` //Creator string `json:"creator"`
//CreatorNickname string `json:"creatorNickname"` //CreatorNickname string `json:"creatorNickname"`

View File

@ -48,7 +48,7 @@ func calSign(body, ts, randStr string) string {
} }
func getTime(t string) time.Time { func getTime(t string) time.Time {
stamp, _ := time.ParseInLocation("20060102150405", t, time.Local) stamp, _ := time.ParseInLocation("20060102150405", t, utils.CNLoc)
return stamp return stamp
} }
@ -139,6 +139,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
Name: catalog.CatalogName, Name: catalog.CatalogName,
Size: 0, Size: 0,
Modified: getTime(catalog.UpdateTime), Modified: getTime(catalog.UpdateTime),
Ctime: getTime(catalog.CreateTime),
IsFolder: true, IsFolder: true,
} }
files = append(files, &f) files = append(files, &f)
@ -150,6 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
Name: content.ContentName, Name: content.ContentName,
Size: content.ContentSize, Size: content.ContentSize,
Modified: getTime(content.UpdateTime), Modified: getTime(content.UpdateTime),
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
}, },
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL, //Thumbnail: content.BigthumbnailURL,
@ -202,6 +204,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Size: 0, Size: 0,
IsFolder: true, IsFolder: true,
Modified: getTime(catalog.LastUpdateTime), Modified: getTime(catalog.LastUpdateTime),
Ctime: getTime(catalog.CreateTime),
} }
files = append(files, &f) files = append(files, &f)
} }
@ -212,6 +215,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Name: content.ContentName, Name: content.ContentName,
Size: content.ContentSize, Size: content.ContentSize,
Modified: getTime(content.LastUpdateTime), Modified: getTime(content.LastUpdateTime),
Ctime: getTime(content.CreateTime),
}, },
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL}, Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL, //Thumbnail: content.BigthumbnailURL,

View File

@ -380,7 +380,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
if err != nil { if err != nil {
return err return err
} }
up(int(i * 100 / count)) up(float64(i) * 100 / float64(count))
} }
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil)) fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
sliceMd5 := fileMd5 sliceMd5 := fileMd5

View File

@ -27,10 +27,15 @@ type Cloud189PC struct {
tokenInfo *AppSessionResp tokenInfo *AppSessionResp
uploadThread int uploadThread int
storageConfig driver.Config
} }
func (y *Cloud189PC) Config() driver.Config { func (y *Cloud189PC) Config() driver.Config {
return config if y.storageConfig.Name == "" {
y.storageConfig = config
}
return y.storageConfig
} }
func (y *Cloud189PC) GetAddition() driver.Additional { func (y *Cloud189PC) GetAddition() driver.Additional {
@ -38,6 +43,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
} }
func (y *Cloud189PC) Init(ctx context.Context) (err error) { func (y *Cloud189PC) Init(ctx context.Context) (err error) {
// 兼容旧上传接口
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
// 处理个人云和家庭云参数 // 处理个人云和家庭云参数
if y.isFamily() && y.RootFolderID == "-11" { if y.isFamily() && y.RootFolderID == "-11" {
y.RootFolderID = "" y.RootFolderID = ""
@ -118,10 +126,11 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
// 重定向获取真实链接 // 重定向获取真实链接
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&amp;", "&"), "http://", "https://", 1) downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&amp;", "&"), "http://", "https://", 1)
res, err := base.NoRedirectClient.R().SetContext(ctx).Get(downloadUrl.URL) res, err := base.NoRedirectClient.R().SetContext(ctx).SetDoNotParseResponse(true).Get(downloadUrl.URL)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer res.RawBody().Close()
if res.StatusCode() == 302 { if res.StatusCode() == 302 {
downloadUrl.URL = res.Header().Get("location") downloadUrl.URL = res.Header().Get("location")
} }
@ -302,6 +311,13 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
} }
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 响应时间长,按需启用
if y.Addition.RapidUpload {
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
return newObj, nil
}
}
switch y.UploadMethod { switch y.UploadMethod {
case "old": case "old":
return y.OldUpload(ctx, dstDir, stream, up) return y.OldUpload(ctx, dstDir, stream, up)

View File

@ -16,6 +16,7 @@ type Addition struct {
FamilyID string `json:"family_id"` FamilyID string `json:"family_id"`
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"` UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"` UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
RapidUpload bool `json:"rapid_upload"`
NoUseOcr bool `json:"no_use_ocr"` NoUseOcr bool `json:"no_use_ocr"`
} }

View File

@ -3,6 +3,7 @@ package _189pc
import ( import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -175,6 +176,14 @@ type Cloud189File struct {
// StarLabel int64 `json:"starLabel"` // StarLabel int64 `json:"starLabel"`
} }
func (c *Cloud189File) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
func (c *Cloud189File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
}
func (c *Cloud189File) GetSize() int64 { return c.Size } func (c *Cloud189File) GetSize() int64 { return c.Size }
func (c *Cloud189File) GetName() string { return c.Name } func (c *Cloud189File) GetName() string { return c.Name }
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) } func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
@ -199,6 +208,14 @@ type Cloud189Folder struct {
// StarLabel int64 `json:"starLabel"` // StarLabel int64 `json:"starLabel"`
} }
func (c *Cloud189Folder) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
func (c *Cloud189Folder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (c *Cloud189Folder) GetSize() int64 { return 0 } func (c *Cloud189Folder) GetSize() int64 { return 0 }
func (c *Cloud189Folder) GetName() string { return c.Name } func (c *Cloud189Folder) GetName() string { return c.Name }
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) } func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }

View File

@ -13,7 +13,6 @@ import (
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
"net/url" "net/url"
"os"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
@ -514,7 +513,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
if err != nil { if err != nil {
return err return err
} }
up(int(threadG.Success()) * 100 / count) up(float64(threadG.Success()) * 100 / float64(count))
return nil return nil
}) })
} }
@ -547,17 +546,30 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
return resp.toFile(), nil return resp.toFile(), nil
} }
// 快传 func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { fileMd5 := stream.GetHash().GetHash(utils.MD5)
// 需要获取完整文件md5,必须支持 io.Seek if len(fileMd5) < utils.MD5.Width {
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize()) return nil, errors.New("invalid hash")
}
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
if err != nil {
return nil, err
}
if uploadInfo.FileDataExists != 1 {
return nil, errors.New("rapid upload fail")
}
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
}
// 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var sliceSize = partSize(file.GetSize()) var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize))) count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
@ -664,7 +676,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
return err return err
} }
up(int(threadG.Success()) * 100 / len(uploadUrls)) up(float64(threadG.Success()) * 100 / float64(len(uploadUrls)))
uploadProgress.UploadParts[i] = "" uploadProgress.UploadParts[i] = ""
return nil return nil
}) })
@ -741,69 +753,24 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string
// 旧版本上传,家庭云不支持覆盖 // 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 需要获取完整文件md5,必须支持 io.Seek tempFile, err := file.CacheFullInTempFile()
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() { fileMd5, err := utils.HashFile(utils.MD5, tempFile)
_ = tempFile.Close() if err != nil {
_ = os.Remove(tempFile.Name())
}()
// 计算md5
fileMd5 := md5.New()
if _, err := io.Copy(fileMd5, tempFile); err != nil {
return nil, err return nil, err
} }
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
// 创建上传会话 // 创建上传会话
var uploadInfo CreateUploadFileResp uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
fullUrl := API_URL + "/createUploadFile.action"
if y.isFamily() {
fullUrl = API_URL + "/family/file/createFamilyFile.action"
}
_, err = y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"fileMd5": fileMd5Hex,
"fileName": file.GetName(),
"fileSize": fmt.Sprint(file.GetSize()),
"parentId": dstDir.GetID(),
"resumePolicy": "1",
})
} else {
req.SetFormData(map[string]string{
"parentFolderId": dstDir.GetID(),
"fileName": file.GetName(),
"size": fmt.Sprint(file.GetSize()),
"md5": fileMd5Hex,
"opertype": "3",
"flag": "1",
"resumePolicy": "1",
"isLog": "0",
// "baseFileId": "",
// "lastWrite":"",
// "localPath": strings.ReplaceAll(param.LocalPath, "\\", "/"),
// "fileExt": "",
})
}
}, &uploadInfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// 网盘中不存在该文件,开始上传 // 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo} status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
for status.Size < file.GetSize() && status.FileDataExists != 1 { for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
} }
@ -842,28 +809,70 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
if err != nil { if err != nil {
return nil, err return nil, err
} }
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil { if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
return nil, err return nil, err
} }
up(int(status.Size / file.GetSize())) up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
} }
// 提交 return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
}
// 创建上传会话
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
var uploadInfo CreateUploadFileResp
fullUrl := API_URL + "/createUploadFile.action"
if y.isFamily() {
fullUrl = API_URL + "/family/file/createFamilyFile.action"
}
_, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"parentId": parentID,
"fileMd5": fileMd5,
"fileName": fileName,
"fileSize": fileSize,
"resumePolicy": "1",
})
} else {
req.SetFormData(map[string]string{
"parentFolderId": parentID,
"fileName": fileName,
"size": fileSize,
"md5": fileMd5,
"opertype": "3",
"flag": "1",
"resumePolicy": "1",
"isLog": "0",
})
}
}, &uploadInfo)
if err != nil {
return nil, err
}
return &uploadInfo, nil
}
// 提交上传文件
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
var resp OldCommitUploadFileResp var resp OldCommitUploadFileResp
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) { _, err := y.post(fileCommitUrl, func(req *resty.Request) {
req.SetContext(ctx) req.SetContext(ctx)
if y.isFamily() { if y.isFamily() {
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"ResumePolicy": "1", "ResumePolicy": "1",
"UploadFileId": fmt.Sprint(status.UploadFileId), "UploadFileId": fmt.Sprint(uploadFileID),
"FamilyId": fmt.Sprint(y.FamilyID), "FamilyId": fmt.Sprint(y.FamilyID),
}) })
} else { } else {
req.SetFormData(map[string]string{ req.SetFormData(map[string]string{
"opertype": "3", "opertype": "3",
"resumePolicy": "1", "resumePolicy": "1",
"uploadFileId": fmt.Sprint(status.UploadFileId), "uploadFileId": fmt.Sprint(uploadFileID),
"isLog": "0", "isLog": "0",
}) })
} }

View File

@ -3,6 +3,7 @@ package alist_v3
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net/http" "net/http"
"path" "path"
"strconv" "strconv"
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
Object: model.Object{ Object: model.Object{
Name: f.Name, Name: f.Name,
Modified: f.Modified, Modified: f.Modified,
Ctime: f.Created,
Size: f.Size, Size: f.Size,
IsFolder: f.IsDir, IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
}, },
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb}, Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
} }
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
SetHeader("Password", d.MetaPassword). SetHeader("Password", d.MetaPassword).
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)). SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
SetContentLength(true). SetContentLength(true).
SetBody(stream.GetReadCloser()) SetBody(io.ReadCloser(stream))
}) })
return err return err
} }

View File

@ -18,9 +18,11 @@ type ObjResp struct {
Size int64 `json:"size"` Size int64 `json:"size"`
IsDir bool `json:"is_dir"` IsDir bool `json:"is_dir"`
Modified time.Time `json:"modified"` Modified time.Time `json:"modified"`
Created time.Time `json:"created"`
Sign string `json:"sign"` Sign string `json:"sign"`
Thumb string `json:"thumb"` Thumb string `json:"thumb"`
Type int `json:"type"` Type int `json:"type"`
HashInfo string `json:"hashinfo"`
} }
type FsListResp struct { type FsListResp struct {

View File

@ -14,6 +14,8 @@ import (
"os" "os"
"time" "time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -67,7 +69,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
return nil return nil
} }
// init deviceID // init deviceID
deviceID := utils.GetSHA256Encode([]byte(d.UserID)) deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
// init privateKey // init privateKey
privateKey, _ := NewPrivateKeyFromHex(deviceID) privateKey, _ := NewPrivateKeyFromHex(deviceID)
state := State{ state := State{
@ -163,14 +165,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
return err return err
} }
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := model.FileStream{ file := stream.FileStream{
Obj: stream, Obj: streamer,
ReadCloser: stream, Reader: streamer,
Mimetype: stream.GetMimetype(), Mimetype: streamer.GetMimetype(),
} }
const DEFAULT int64 = 10485760 const DEFAULT int64 = 10485760
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT))) var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
partInfoList := make([]base.Json, 0, count) partInfoList := make([]base.Json, 0, count)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
@ -187,25 +189,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
} }
var localFile *os.File var localFile *os.File
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok { if fileStream, ok := file.Reader.(*stream.FileStream); ok {
localFile, _ = fileStream.ReadCloser.(*os.File) localFile, _ = fileStream.Reader.(*os.File)
} }
if d.RapidUpload { if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024)) buf := bytes.NewBuffer(make([]byte, 0, 1024))
io.CopyN(buf, file, 1024) io.CopyN(buf, file, 1024)
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes()) reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil { if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil { if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return err return err
} }
} else { } else {
// 把头部拼接回去 // 把头部拼接回去
file.ReadCloser = struct { file.Reader = struct {
io.Reader io.Reader
io.Closer io.Closer
}{ }{
Reader: io.MultiReader(buf, file), Reader: io.MultiReader(buf, file),
Closer: file, Closer: &file,
} }
} }
} else { } else {
@ -281,7 +283,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
if _, err = localFile.Seek(0, io.SeekStart); err != nil { if _, err = localFile.Seek(0, io.SeekStart); err != nil {
return err return err
} }
file.ReadCloser = localFile file.Reader = localFile
} }
for i, partInfo := range resp.PartInfoList { for i, partInfo := range resp.PartInfoList {
@ -303,7 +305,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
} }
res.Body.Close() res.Body.Close()
if count > 0 { if count > 0 {
up(i * 100 / count) up(float64(i) * 100 / float64(count))
} }
} }
var resp2 base.Json var resp2 base.Json

View File

@ -11,7 +11,7 @@ type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"` OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"` OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"` OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"` ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"` ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"` RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`

View File

@ -1,6 +1,7 @@
package aliyundrive_open package aliyundrive_open
import ( import (
"github.com/alist-org/alist/v3/pkg/utils"
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
@ -46,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
Size: f.Size, Size: f.Size,
Modified: f.UpdatedAt, Modified: f.UpdatedAt,
IsFolder: f.Type == "folder", IsFolder: f.Type == "folder",
Ctime: f.CreatedAt,
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
}, },
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail}, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
} }

View File

@ -3,14 +3,11 @@ package aliyundrive_open
import ( import (
"bytes" "bytes"
"context" "context"
"crypto/sha1"
"encoding/base64" "encoding/base64"
"encoding/hex"
"fmt" "fmt"
"io" "io"
"math" "math"
"net/http" "net/http"
"os"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -18,6 +15,7 @@ import (
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
@ -33,19 +31,19 @@ func makePartInfos(size int) []base.Json {
} }
func calPartSize(fileSize int64) int64 { func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * 1024 * 1024 var partSize int64 = 20 * utils.MB
if fileSize > partSize { if fileSize > partSize {
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB if fileSize > 1*utils.TB { // file Size over 1TB
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB partSize = 5 * utils.GB // file part size 5GB
} else if fileSize > 768*1024*1024*1024 { // over 768GB } else if fileSize > 768*utils.GB { // over 768GB
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
} else if fileSize > 512*1024*1024*1024 { // over 512GB } else if fileSize > 512*utils.GB { // over 512GB
partSize = 82463373 // ≈ 78.6432MB partSize = 82463373 // ≈ 78.6432MB
} else if fileSize > 384*1024*1024*1024 { // over 384GB } else if fileSize > 384*utils.GB { // over 384GB
partSize = 54975582 // ≈ 52.4288MB partSize = 54975582 // ≈ 52.4288MB
} else if fileSize > 256*1024*1024*1024 { // over 256GB } else if fileSize > 256*utils.GB { // over 256GB
partSize = 41231687 // ≈ 39.3216MB partSize = 41231687 // ≈ 39.3216MB
} else if fileSize > 128*1024*1024*1024 { // over 128GB } else if fileSize > 128*utils.GB { // over 128GB
partSize = 27487791 // ≈ 26.2144MB partSize = 27487791 // ≈ 26.2144MB
} }
} }
@ -127,17 +125,22 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
return pr, nil return pr, nil
} }
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) { func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
proofRange, err := getProofRange(d.AccessToken, fileSize) proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
if err != nil { if err != nil {
return "", err return "", err
} }
buf := make([]byte, proofRange.End-proofRange.Start) length := proofRange.End - proofRange.Start
_, err = file.ReadAt(buf, proofRange.Start) buf := bytes.NewBuffer(make([]byte, 0, length))
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
if err != nil { if err != nil {
return "", err return "", err
} }
return base64.StdEncoding.EncodeToString(buf), nil _, err = io.CopyN(buf, reader, length)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
} }
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
@ -145,70 +148,67 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
// Part Size Unit: Bytes, Default: 20MB, // Part Size Unit: Bytes, Default: 20MB,
// Maximum number of slices 10,000, ≈195.3125GB // Maximum number of slices 10,000, ≈195.3125GB
var partSize = calPartSize(stream.GetSize()) var partSize = calPartSize(stream.GetSize())
const dateFormat = "2006-01-02T15:04:05.000Z"
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
createData := base.Json{ createData := base.Json{
"drive_id": d.DriveId, "drive_id": d.DriveId,
"parent_file_id": dstDir.GetID(), "parent_file_id": dstDir.GetID(),
"name": stream.GetName(), "name": stream.GetName(),
"type": "file", "type": "file",
"check_name_mode": "ignore", "check_name_mode": "ignore",
"local_modified_at": mtimeStr,
"local_created_at": ctimeStr,
} }
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize))) count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
createData["part_info_list"] = makePartInfos(count) createData["part_info_list"] = makePartInfos(count)
// rapid upload // rapid upload
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
if rapidUpload { if rapidUpload {
log.Debugf("[aliyundrive_open] start cal pre_hash") log.Debugf("[aliyundrive_open] start cal pre_hash")
// read 1024 bytes to calculate pre hash // read 1024 bytes to calculate pre hash
buf := bytes.NewBuffer(make([]byte, 0, 1024)) reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
_, err := io.CopyN(buf, stream, 1024) if err != nil {
return nil, err
}
hash, err := utils.HashReader(utils.SHA1, reader)
if err != nil { if err != nil {
return nil, err return nil, err
} }
createData["size"] = stream.GetSize() createData["size"] = stream.GetSize()
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes()) createData["pre_hash"] = hash
// if support seek, seek to start
if localFile, ok := stream.(io.Seeker); ok {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return nil, err
}
} else {
// Put spliced head back to stream
stream.SetReadCloser(struct {
io.Reader
io.Closer
}{
Reader: io.MultiReader(buf, stream.GetReadCloser()),
Closer: stream.GetReadCloser(),
})
}
} }
var createResp CreateResp var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) { _, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp) req.SetBody(createData).SetResult(&createResp)
}) })
var tmpF model.File
if err != nil { if err != nil {
if e.Code != "PreHashMatched" || !rapidUpload { if e.Code != "PreHashMatched" || !rapidUpload {
return nil, err return nil, err
} }
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload") log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
// convert to local file
file, err := utils.CreateTempFile(stream, stream.GetSize()) hi := stream.GetHash()
if err != nil { hash := hi.GetHash(utils.SHA1)
return nil, err if len(hash) <= 0 {
} tmpF, err = stream.CacheFullInTempFile()
_ = stream.GetReadCloser().Close() if err != nil {
stream.SetReadCloser(file) return nil, err
// calculate full hash }
h := sha1.New() hash, err = utils.HashFile(utils.SHA1, tmpF)
_, err = io.Copy(h, file) if err != nil {
if err != nil { return nil, err
return nil, err }
} }
delete(createData, "pre_hash") delete(createData, "pre_hash")
createData["proof_version"] = "v1" createData["proof_version"] = "v1"
createData["content_hash_name"] = "sha1" createData["content_hash_name"] = "sha1"
createData["content_hash"] = hex.EncodeToString(h.Sum(nil)) createData["content_hash"] = hash
createData["proof_code"], err = d.calProofCode(file, stream.GetSize()) createData["proof_code"], err = d.calProofCode(stream)
if err != nil { if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error()) return nil, fmt.Errorf("cal proof code error: %s", err.Error())
} }
@ -218,17 +218,15 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil { if err != nil {
return nil, err return nil, err
} }
// seek to start
if _, err = file.Seek(0, io.SeekStart); err != nil {
return nil, err
}
} }
if !createResp.RapidUpload { if !createResp.RapidUpload {
// 2. upload // 2. normal upload
log.Debugf("[aliyundive_open] normal upload") log.Debugf("[aliyundive_open] normal upload")
preTime := time.Now() preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ { for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
@ -241,9 +239,16 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
} }
preTime = time.Now() preTime = time.Now()
} }
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize)) if remain := stream.GetSize() - offset; length > remain {
length = remain
}
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
err = retry.Do(func() error { err = retry.Do(func() error {
rd.Reset() //rd.Reset()
return d.uploadPart(ctx, rd, createResp.PartInfoList[i]) return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
}, },
retry.Attempts(3), retry.Attempts(3),
@ -252,6 +257,8 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil { if err != nil {
return nil, err return nil, err
} }
offset += partSize
up(float64(i*100) / float64(count))
} }
} else { } else {
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId) log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)

View File

@ -26,7 +26,7 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
//var resp base.TokenResp //var resp base.TokenResp
var e ErrResp var e ErrResp
res, err := base.RestyClient.R(). res, err := base.RestyClient.R().
ForceContentType("application/json"). //ForceContentType("application/json").
SetBody(base.Json{ SetBody(base.Json{
"client_id": d.ClientID, "client_id": d.ClientID,
"client_secret": d.ClientSecret, "client_secret": d.ClientSecret,
@ -45,7 +45,7 @@ func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
} }
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString() refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
if refresh == "" { if refresh == "" {
return "", "", errors.New("failed to refresh token: refresh token is empty") return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
} }
curSub, err := getSub(d.RefreshToken) curSub, err := getSub(d.RefreshToken)
if err != nil { if err != nil {
@ -86,7 +86,7 @@ func (d *AliyundriveOpen) refreshToken() error {
if err != nil { if err != nil {
return err return err
} }
log.Infof("[ali_open] toekn exchange: %s -> %s", d.RefreshToken, refresh) log.Infof("[ali_open] token exchange: %s -> %s", d.RefreshToken, refresh)
d.RefreshToken, d.AccessToken = refresh, access d.RefreshToken, d.AccessToken = refresh, access
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
return nil return nil

View File

@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
Name: f.Name, Name: f.Name,
Size: f.Size, Size: f.Size,
Modified: f.UpdatedAt, Modified: f.UpdatedAt,
Ctime: f.CreatedAt,
IsFolder: f.Type == "folder", IsFolder: f.Type == "folder",
}, },
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail}, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},

View File

@ -3,6 +3,7 @@ package drivers
import ( import (
_ "github.com/alist-org/alist/v3/drivers/115" _ "github.com/alist-org/alist/v3/drivers/115"
_ "github.com/alist-org/alist/v3/drivers/123" _ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link"
_ "github.com/alist-org/alist/v3/drivers/123_share" _ "github.com/alist-org/alist/v3/drivers/123_share"
_ "github.com/alist-org/alist/v3/drivers/139" _ "github.com/alist-org/alist/v3/drivers/139"
_ "github.com/alist-org/alist/v3/drivers/189" _ "github.com/alist-org/alist/v3/drivers/189"

View File

@ -5,11 +5,9 @@ import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt"
"io" "io"
"math" "math"
"net/url" "net/url"
"os"
stdpath "path" stdpath "path"
"strconv" "strconv"
"time" "time"
@ -29,10 +27,9 @@ type BaiduNetdisk struct {
Addition Addition
uploadThread int uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
} }
const DefaultSliceSize int64 = 4 * 1024 * 1024
func (d *BaiduNetdisk) Config() driver.Config { func (d *BaiduNetdisk) Config() driver.Config {
return config return config
} }
@ -55,7 +52,11 @@ func (d *BaiduNetdisk) Init(ctx context.Context) error {
"method": "uinfo", "method": "uinfo",
}, nil) }, nil)
log.Debugf("[baidu] get uinfo: %s", string(res)) log.Debugf("[baidu] get uinfo: %s", string(res))
return err if err != nil {
return err
}
d.vipType = utils.Json.Get(res, "vip_type").ToInt()
return nil
} }
func (d *BaiduNetdisk) Drop(ctx context.Context) error { func (d *BaiduNetdisk) Drop(ctx context.Context) error {
@ -81,7 +82,7 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) { func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var newDir File var newDir File
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir) _, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -147,28 +148,50 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
return err return err
} }
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) contentMd5 := stream.GetHash().GetHash(utils.MD5)
if len(contentMd5) < utils.MD5.Width {
return nil, errors.New("invalid hash")
}
streamSize := stream.GetSize()
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
var newFile File
_, err := d.create(path, streamSize, 0, "", blockList, &newFile, mtime, ctime)
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
}
tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
streamSize := stream.GetSize() streamSize := stream.GetSize()
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1)) sliceSize := d.getSliceSize()
lastBlockSize := streamSize % DefaultSliceSize count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
lastBlockSize := streamSize % sliceSize
if streamSize > 0 && lastBlockSize == 0 { if streamSize > 0 && lastBlockSize == 0 {
lastBlockSize = DefaultSliceSize lastBlockSize = sliceSize
} }
//cal md5 for first 256k data //cal md5 for first 256k data
const SliceSize int64 = 256 * 1024 const SliceSize int64 = 256 * 1024
// cal md5 // cal md5
blockList := make([]string, 0, count) blockList := make([]string, 0, count)
byteSize := DefaultSliceSize byteSize := sliceSize
fileMd5H := md5.New() fileMd5H := md5.New()
sliceMd5H := md5.New() sliceMd5H := md5.New()
sliceMd5H2 := md5.New() sliceMd5H2 := md5.New()
@ -191,23 +214,31 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil)) contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil)) sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(blockList) blockListStr, _ := utils.Json.MarshalToString(blockList)
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName()) mtime := stream.ModTime().Unix()
path := encodeURIComponent(rawPath) ctime := stream.CreateTime().Unix()
// step.1 预上传 // step.1 预上传
// 尝试获取之前的进度 // 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5) precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok { if !ok {
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s",
path, streamSize,
blockListStr,
contentMd5, sliceMd5)
params := map[string]string{ params := map[string]string{
"method": "precreate", "method": "precreate",
} }
log.Debugf("[baidu_netdisk] precreate data: %s", data) form := map[string]string{
_, err = d.post("/xpan/file", params, data, &precreateResp) "path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -230,7 +261,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
break break
} }
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DefaultSliceSize, DefaultSliceSize i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count { if partseq+1 == count {
byteSize = lastBlockSize byteSize = lastBlockSize
} }
@ -247,7 +278,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
if err != nil { if err != nil {
return err return err
} }
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList)) up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1 precreateResp.BlockList[i] = -1
return nil return nil
}) })
@ -263,12 +294,13 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
// step.3 创建文件 // step.3 创建文件
var newFile File var newFile File
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile) _, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return fileToObj(newFile), nil return fileToObj(newFile), nil
} }
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error { func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R(). res, err := base.RestyClient.R().
SetContext(ctx). SetContext(ctx).

View File

@ -40,11 +40,11 @@ type File struct {
Isdir int `json:"isdir"` Isdir int `json:"isdir"`
// list resp // list resp
//ServerCtime int64 `json:"server_ctime"` ServerCtime int64 `json:"server_ctime"`
ServerMtime int64 `json:"server_mtime"` ServerMtime int64 `json:"server_mtime"`
//ServerAtime int64 `json:"server_atime"` LocalMtime int64 `json:"local_mtime"`
//LocalCtime int64 `json:"local_ctime"` LocalCtime int64 `json:"local_ctime"`
//LocalMtime int64 `json:"local_mtime"` //ServerAtime int64 `json:"server_atime"` `
// only create and precreate resp // only create and precreate resp
Ctime int64 `json:"ctime"` Ctime int64 `json:"ctime"`
@ -55,8 +55,11 @@ func fileToObj(f File) *model.ObjThumb {
if f.ServerFilename == "" { if f.ServerFilename == "" {
f.ServerFilename = path.Base(f.Path) f.ServerFilename = path.Base(f.Path)
} }
if f.ServerMtime == 0 { if f.LocalCtime == 0 {
f.ServerMtime = int64(f.Mtime) f.LocalCtime = f.Ctime
}
if f.LocalMtime == 0 {
f.LocalMtime = f.Mtime
} }
return &model.ObjThumb{ return &model.ObjThumb{
Object: model.Object{ Object: model.Object{
@ -64,8 +67,12 @@ func fileToObj(f File) *model.ObjThumb {
Path: f.Path, Path: f.Path,
Name: f.ServerFilename, Name: f.ServerFilename,
Size: f.Size, Size: f.Size,
Modified: time.Unix(f.ServerMtime, 0), Modified: time.Unix(f.LocalMtime, 0),
Ctime: time.Unix(f.LocalCtime, 0),
IsFolder: f.Isdir == 1, IsFolder: f.Isdir == 1,
// 直接获取的MD5是错误的
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
}, },
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3}, Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
} }

View File

@ -1,11 +1,10 @@
package baidu_netdisk package baidu_netdisk
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -22,7 +21,7 @@ import (
func (d *BaiduNetdisk) refreshToken() error { func (d *BaiduNetdisk) refreshToken() error {
err := d._refreshToken() err := d._refreshToken()
if err != nil && err == errs.EmptyToken { if err != nil && errors.Is(err, errs.EmptyToken) {
err = d._refreshToken() err = d._refreshToken()
} }
return err return err
@ -74,21 +73,16 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
log.Info("refreshing baidu_netdisk token.") log.Info("refreshing baidu_netdisk token.")
err2 := d.refreshToken() err2 := d.refreshToken()
if err2 != nil { if err2 != nil {
return err2 return retry.Unrecoverable(err2)
} }
} }
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
err2 := fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
if !utils.SliceContains([]int{2}, errno) {
err2 = retry.Unrecoverable(err2)
}
return err2
} }
result = res.Body() result = res.Body()
return nil return nil
}, },
retry.LastErrorOnly(true), retry.LastErrorOnly(true),
retry.Attempts(5), retry.Attempts(3),
retry.Delay(time.Second), retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay)) retry.DelayType(retry.BackOffDelay))
return result, err return result, err
@ -100,10 +94,10 @@ func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp inter
}, resp) }, resp)
} }
func (d *BaiduNetdisk) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) { func (d *BaiduNetdisk) postForm(pathname string, params map[string]string, form map[string]string, resp interface{}) ([]byte, error) {
return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) { return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) {
req.SetQueryParams(params) req.SetQueryParams(params)
req.SetBody(data) req.SetFormData(form)
}, resp) }, resp)
} }
@ -158,6 +152,9 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
//if res.StatusCode() == 302 { //if res.StatusCode() == 302 {
u = res.Header().Get("location") u = res.Header().Get("location")
//} //}
updateObjMd5(file, "pan.baidu.com", u)
return &model.Link{ return &model.Link{
URL: u, URL: u,
Header: http.Header{ Header: http.Header{
@ -180,6 +177,9 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
if err != nil { if err != nil {
return nil, err return nil, err
} }
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
return &model.Link{ return &model.Link{
URL: resp.Info[0].Dlink, URL: resp.Info[0].Dlink,
Header: http.Header{ Header: http.Header{
@ -194,23 +194,73 @@ func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
"opera": opera, "opera": opera,
} }
marshal, _ := utils.Json.MarshalToString(filelist) marshal, _ := utils.Json.MarshalToString(filelist)
data := fmt.Sprintf("async=0&filelist=%s&ondup=fail", marshal) return d.postForm("/xpan/file", params, map[string]string{
return d.post("/xpan/file", params, data, nil) "async": "0",
"filelist": marshal,
"ondup": "fail",
}, nil)
} }
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any) ([]byte, error) { func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
params := map[string]string{ params := map[string]string{
"method": "create", "method": "create",
} }
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir) form := map[string]string{
if uploadid != "" { "path": path,
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list) "size": strconv.FormatInt(size, 10),
"isdir": strconv.Itoa(isdir),
"rtype": "3",
} }
return d.post("/xpan/file", params, data, resp) if mtime != 0 && ctime != 0 {
joinTime(form, ctime, mtime)
}
if uploadid != "" {
form["uploadid"] = uploadid
}
if block_list != "" {
form["block_list"] = block_list
}
return d.postForm("/xpan/file", params, form, resp)
} }
func encodeURIComponent(str string) string { func joinTime(form map[string]string, ctime, mtime int64) {
r := url.QueryEscape(str) form["local_mtime"] = strconv.FormatInt(mtime, 10)
r = strings.ReplaceAll(r, "+", "%20") form["local_ctime"] = strconv.FormatInt(ctime, 10)
return r
} }
func updateObjMd5(obj model.Obj, userAgent, u string) {
object := model.GetRawObject(obj)
if object != nil {
req, _ := http.NewRequest(http.MethodHead, u, nil)
req.Header.Add("User-Agent", userAgent)
resp, _ := base.HttpClient.Do(req)
if resp != nil {
contentMd5 := resp.Header.Get("Content-Md5")
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
}
}
}
const (
DefaultSliceSize int64 = 4 * utils.MB
VipSliceSize = 16 * utils.MB
SVipSliceSize = 32 * utils.MB
)
func (d *BaiduNetdisk) getSliceSize() int64 {
switch d.vipType {
case 1:
return VipSliceSize
case 2:
return SVipSliceSize
default:
return DefaultSliceSize
}
}
// func encodeURIComponent(str string) string {
// r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20")
// return r
// }

View File

@ -8,7 +8,6 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"os"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -228,15 +227,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
return nil, fmt.Errorf("file size cannot be zero") return nil, fmt.Errorf("file size cannot be zero")
} }
// TODO:
// 暂时没有找到妙传方式
// 需要获取完整文件md5,必须支持 io.Seek // 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
const DEFAULT int64 = 1 << 22 const DEFAULT int64 = 1 << 22
const SliceSize int64 = 1 << 18 const SliceSize int64 = 1 << 18
@ -331,7 +329,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil { if err != nil {
return err return err
} }
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList)) up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1 precreateResp.BlockList[i] = -1
return nil return nil
}) })

View File

@ -61,12 +61,12 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
func renameAlbum(album *Album, newName string) *Album { func renameAlbum(album *Album, newName string) *Album {
return &Album{ return &Album{
AlbumID: album.AlbumID, AlbumID: album.AlbumID,
Tid: album.Tid, Tid: album.Tid,
JoinTime: album.JoinTime, JoinTime: album.JoinTime,
CreateTime: album.CreateTime, CreationTime: album.CreationTime,
Title: newName, Title: newName,
Mtime: time.Now().Unix(), Mtime: time.Now().Unix(),
} }
} }

View File

@ -4,6 +4,8 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
) )
@ -51,22 +53,17 @@ type (
Ctime int64 `json:"ctime"` // 创建时间 s Ctime int64 `json:"ctime"` // 创建时间 s
Mtime int64 `json:"mtime"` // 修改时间 s Mtime int64 `json:"mtime"` // 修改时间 s
Thumburl []string `json:"thumburl"` Thumburl []string `json:"thumburl"`
Md5 string `json:"md5"`
parseTime *time.Time
} }
) )
func (c *File) GetSize() int64 { return c.Size } func (c *File) GetSize() int64 { return c.Size }
func (c *File) GetName() string { return getFileName(c.Path) } func (c *File) GetName() string { return getFileName(c.Path) }
func (c *File) ModTime() time.Time { func (c *File) CreateTime() time.Time { return time.Unix(c.Ctime, 0) }
if c.parseTime == nil { func (c *File) ModTime() time.Time { return time.Unix(c.Mtime, 0) }
c.parseTime = toTime(c.Mtime) func (c *File) IsDir() bool { return false }
} func (c *File) GetID() string { return "" }
return *c.parseTime func (c *File) GetPath() string { return "" }
}
func (c *File) IsDir() bool { return false }
func (c *File) GetID() string { return "" }
func (c *File) GetPath() string { return "" }
func (c *File) Thumb() string { func (c *File) Thumb() string {
if len(c.Thumburl) > 0 { if len(c.Thumburl) > 0 {
return c.Thumburl[0] return c.Thumburl[0]
@ -74,6 +71,10 @@ func (c *File) Thumb() string {
return "" return ""
} }
func (c *File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
}
/*相册部分*/ /*相册部分*/
type ( type (
AlbumListResp struct { AlbumListResp struct {
@ -84,12 +85,12 @@ type (
} }
Album struct { Album struct {
AlbumID string `json:"album_id"` AlbumID string `json:"album_id"`
Tid int64 `json:"tid"` Tid int64 `json:"tid"`
Title string `json:"title"` Title string `json:"title"`
JoinTime int64 `json:"join_time"` JoinTime int64 `json:"join_time"`
CreateTime int64 `json:"create_time"` CreationTime int64 `json:"create_time"`
Mtime int64 `json:"mtime"` Mtime int64 `json:"mtime"`
parseTime *time.Time parseTime *time.Time
} }
@ -109,17 +110,17 @@ type (
} }
) )
func (a *Album) GetSize() int64 { return 0 } func (a *Album) GetHash() utils.HashInfo {
func (a *Album) GetName() string { return a.Title } return utils.HashInfo{}
func (a *Album) ModTime() time.Time {
if a.parseTime == nil {
a.parseTime = toTime(a.Mtime)
}
return *a.parseTime
} }
func (a *Album) IsDir() bool { return true }
func (a *Album) GetID() string { return "" } func (a *Album) GetSize() int64 { return 0 }
func (a *Album) GetPath() string { return "" } func (a *Album) GetName() string { return a.Title }
func (a *Album) CreateTime() time.Time { return time.Unix(a.CreationTime, 0) }
func (a *Album) ModTime() time.Time { return time.Unix(a.Mtime, 0) }
func (a *Album) IsDir() bool { return true }
func (a *Album) GetID() string { return "" }
func (a *Album) GetPath() string { return "" }
type ( type (
CopyFileResp struct { CopyFileResp struct {

View File

@ -49,7 +49,19 @@ func (d *Cloudreve) List(ctx context.Context, dir model.Obj, args model.ListArgs
} }
return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) { return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) {
return objectToObj(src), nil thumb, err := d.GetThumb(src)
if err != nil {
return nil, err
}
if src.Type == "dir" && d.EnableThumbAndFolderSize {
var dprop DirectoryProp
err = d.request(http.MethodGet, "/object/property/"+src.Id+"?is_folder=true", nil, &dprop)
if err != nil {
return nil, err
}
src.Size = dprop.Size
}
return objectToObj(src, thumb), nil
}) })
} }
@ -115,7 +127,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if stream.GetReadCloser() == http.NoBody { if io.ReadCloser(stream) == http.NoBody {
return d.create(ctx, dstDir, stream) return d.create(ctx, dstDir, stream)
} }
var r DirectoryResp var r DirectoryResp

View File

@ -9,11 +9,12 @@ type Addition struct {
// Usually one of two // Usually one of two
driver.RootPath driver.RootPath
// define other // define other
Address string `json:"address" required:"true"` Address string `json:"address" required:"true"`
Username string `json:"username"` Username string `json:"username"`
Password string `json:"password"` Password string `json:"password"`
Cookie string `json:"cookie"` Cookie string `json:"cookie"`
CustomUA string `json:"custom_ua"` CustomUA string `json:"custom_ua"`
EnableThumbAndFolderSize bool `json:"enable_thumb_and_folder_size"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -44,13 +44,20 @@ type Object struct {
SourceEnabled bool `json:"source_enabled"` SourceEnabled bool `json:"source_enabled"`
} }
func objectToObj(f Object) *model.Object { type DirectoryProp struct {
return &model.Object{ Size int `json:"size"`
ID: f.Id, }
Name: f.Name,
Size: int64(f.Size), func objectToObj(f Object, t model.Thumbnail) *model.ObjThumb {
Modified: f.Date, return &model.ObjThumb{
IsFolder: f.Type == "dir", Object: model.Object{
ID: f.Id,
Name: f.Name,
Size: int64(f.Size),
Modified: f.Date,
IsFolder: f.Type == "dir",
},
Thumbnail: t,
} }
} }

View File

@ -149,3 +149,26 @@ func convertSrc(obj model.Obj) map[string]interface{} {
m["items"] = items m["items"] = items
return m return m
} }
func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
if !d.Addition.EnableThumbAndFolderSize {
return model.Thumbnail{}, nil
}
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.NoRedirectClient.R()
req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"User-Agent": ua,
})
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
if err != nil {
return model.Thumbnail{}, err
}
return model.Thumbnail{
Thumbnail: resp.Header().Get("Location"),
}, nil
}

View File

@ -3,8 +3,8 @@ package crypt
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io" "io"
"net/http"
stdpath "path" stdpath "path"
"regexp" "regexp"
"strings" "strings"
@ -13,10 +13,10 @@ import (
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs" "github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/http_range" "github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
rcCrypt "github.com/rclone/rclone/backend/crypt" rcCrypt "github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/config/obscure"
@ -55,6 +55,8 @@ func (d *Crypt) Init(ctx context.Context) error {
if !isCryptExt(d.EncryptedSuffix) { if !isCryptExt(d.EncryptedSuffix) {
return fmt.Errorf("EncryptedSuffix is Illegal") return fmt.Errorf("EncryptedSuffix is Illegal")
} }
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
op.MustSaveDriverStorage(d) op.MustSaveDriverStorage(d)
@ -72,7 +74,7 @@ func (d *Crypt) Init(ctx context.Context) error {
"password2": p2, "password2": p2,
"filename_encryption": d.FileNameEnc, "filename_encryption": d.FileNameEnc,
"directory_name_encryption": d.DirNameEnc, "directory_name_encryption": d.DirNameEnc,
"filename_encoding": "base64", "filename_encoding": d.FileNameEncoding,
"suffix": d.EncryptedSuffix, "suffix": d.EncryptedSuffix,
"pass_bad_blocks": "", "pass_bad_blocks": "",
} }
@ -82,7 +84,6 @@ func (d *Crypt) Init(ctx context.Context) error {
} }
d.cipher = c d.cipher = c
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
return nil return nil
} }
@ -128,6 +129,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
Size: 0, Size: 0,
Modified: obj.ModTime(), Modified: obj.ModTime(),
IsFolder: obj.IsDir(), IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
} }
result = append(result, &objRes) result = append(result, &objRes)
} else { } else {
@ -147,8 +150,13 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
Size: size, Size: size,
Modified: obj.ModTime(), Modified: obj.ModTime(),
IsFolder: obj.IsDir(), IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
} }
if !ok { if d.Thumbnail && thumb == "" {
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
}
if !ok && !d.Thumbnail {
result = append(result, &objRes) result = append(result, &objRes)
} else { } else {
objWithThumb := model.ObjThumb{ objWithThumb := model.ObjThumb{
@ -232,70 +240,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err return nil, err
} }
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 { if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion") return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
} }
remoteFileSize := remoteFile.GetSize() remoteFileSize := remoteFile.GetSize()
remoteClosers := utils.NewClosers() remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) { rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
length := underlyingLength length := underlyingLength
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize { if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
length = -1 length = -1
} }
if remoteLink.RangeReadCloser.RangeReader != nil { rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
if err != nil {
return nil, err
}
rrc = converted
}
if rrc != nil {
//remoteRangeReader, err := //remoteRangeReader, err :=
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length}) remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
remoteClosers.Add(remoteLink.RangeReadCloser.Closers) remoteClosers.AddClosers(rrc.GetClosers())
if err != nil { if err != nil {
return nil, err return nil, err
} }
return remoteReader, nil return remoteReader, nil
} }
if remoteLink.ReadSeekCloser != nil { if remoteLink.MFile != nil {
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart) _, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
if err != nil { if err != nil {
return nil, err return nil, err
} }
//remoteClosers.Add(remoteLink.ReadSeekCloser) //remoteClosers.Add(remoteLink.MFile)
//keep reuse same ReadSeekCloser and close at last. //keep reuse same MFile and close at last.
return io.NopCloser(remoteLink.ReadSeekCloser), nil remoteClosers.Add(remoteLink.MFile)
return io.NopCloser(remoteLink.MFile), nil
} }
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
//remoteClosers.Add(response.Body)
if err != nil {
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
}
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
return response.Body, nil
} else if response.StatusCode == http.StatusOK {
log.Warnf("remote http server not supporting range request, expect low perfromace!")
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
if err != nil {
return nil, err
}
return readCloser, nil
}
return response.Body, nil
}
//if remoteLink.Data != nil {
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
// remoteCloser = remoteLink.Data
// if err != nil {
// return nil, err
// }
// return readCloser, nil
//}
return nil, errs.NotSupport return nil, errs.NotSupport
} }
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) { resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length) readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil { if err != nil {
return nil, err return nil, err
@ -306,7 +297,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers} resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{ resultLink := &model.Link{
Header: remoteLink.Header, Header: remoteLink.Header,
RangeReadCloser: *resultRangeReadCloser, RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration, Expiration: remoteLink.Expiration,
} }
@ -370,32 +361,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
return op.Remove(ctx, d.remoteStorage, remoteActualPath) return op.Remove(ctx, d.remoteStorage, remoteActualPath)
} }
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true) dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
if err != nil { if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err) return fmt.Errorf("failed to convert path to remote path: %w", err)
} }
in := stream.GetReadCloser()
// Encrypt the data into wrappedIn // Encrypt the data into wrappedIn
wrappedIn, err := d.cipher.EncryptData(in) wrappedIn, err := d.cipher.EncryptData(streamer)
if err != nil { if err != nil {
return fmt.Errorf("failed to EncryptData: %w", err) return fmt.Errorf("failed to EncryptData: %w", err)
} }
streamOut := &model.FileStream{ // doesn't support seekableStream, since rapid-upload is not working for encrypted data
streamOut := &stream.FileStream{
Obj: &model.Object{ Obj: &model.Object{
ID: stream.GetID(), ID: streamer.GetID(),
Path: stream.GetPath(), Path: streamer.GetPath(),
Name: d.cipher.EncryptFileName(stream.GetName()), Name: d.cipher.EncryptFileName(streamer.GetName()),
Size: d.cipher.EncryptedSize(stream.GetSize()), Size: d.cipher.EncryptedSize(streamer.GetSize()),
Modified: stream.ModTime(), Modified: streamer.ModTime(),
IsFolder: stream.IsDir(), IsFolder: streamer.IsDir(),
}, },
ReadCloser: io.NopCloser(wrappedIn), Reader: wrappedIn,
Mimetype: "application/octet-stream", Mimetype: "application/octet-stream",
WebPutAsTask: stream.NeedStore(), WebPutAsTask: streamer.NeedStore(),
Old: stream.GetOld(), Exist: streamer.GetExist(),
} }
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false) err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
if err != nil { if err != nil {

View File

@ -15,16 +15,13 @@ type Addition struct {
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"` DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"` RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
Password string `json:"password" required:"true" confidential:"true" help:"the main password"` Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password'. Optional but recommended"` Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"encrypted files will have this suffix"` EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
} FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
/*// inMemory contains decrypted confidential info and other temp data. will not persist these info anywhere Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
type inMemory struct { }
password string
salt string
}*/
var config = driver.Config{ var config = driver.Config{
Name: "Crypt", Name: "Crypt",

View File

@ -1,24 +1,13 @@
package crypt package crypt
import ( import (
"net/http"
stdpath "path" stdpath "path"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/http_range"
) )
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
header := net.ProcessHeader(http.Header{}, link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp("GET", header, link.URL)
}
// will give the best guessing based on the path // will give the best guessing based on the path
func guessPath(path string) (isFolder, secondTry bool) { func guessPath(path string) (isFolder, secondTry bool) {
if strings.HasSuffix(path, "/") { if strings.HasSuffix(path, "/") {

View File

@ -203,7 +203,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
_ = res.Body.Close() _ = res.Body.Close()
if count > 0 { if count > 0 {
up((i + 1) * 100 / count) up(float64(i+1) * 100 / float64(count))
} }
offset += byteSize offset += byteSize

View File

@ -64,9 +64,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
return nil, err return nil, err
} }
r := NewFTPFileReader(d.conn, file.GetPath()) r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
link := &model.Link{ link := &model.Link{
ReadSeekCloser: r, MFile: r,
} }
return link, nil return link, nil
} }

View File

@ -4,6 +4,7 @@ import (
"io" "io"
"os" "os"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/jlaffaye/ftp"
@ -30,43 +31,59 @@ func (d *FTP) login() error {
return nil return nil
} }
// An FTP file reader that implements io.ReadSeekCloser for seeking. // FileReader An FTP file reader that implements io.MFile for seeking.
type FTPFileReader struct { type FileReader struct {
conn *ftp.ServerConn conn *ftp.ServerConn
resp *ftp.Response resp *ftp.Response
offset int64 offset atomic.Int64
mu sync.Mutex readAtOffset int64
path string mu sync.Mutex
path string
size int64
} }
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader { func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FTPFileReader{ return &FileReader{
conn: conn, conn: conn,
path: path, path: path,
size: size,
} }
} }
func (r *FTPFileReader) Read(buf []byte) (n int, err error) { func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil { if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset)) r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil { if err != nil {
return 0, err return 0, err
} }
} }
n, err = r.resp.Read(buf) n, err = r.resp.Read(buf)
r.offset += int64(n) r.readAtOffset += int64(n)
return return
} }
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) { func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock() oldOffset := r.offset.Load()
defer r.mu.Unlock()
oldOffset := r.offset
var newOffset int64 var newOffset int64
switch whence { switch whence {
case io.SeekStart: case io.SeekStart:
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
case io.SeekCurrent: case io.SeekCurrent:
newOffset = oldOffset + offset newOffset = oldOffset + offset
case io.SeekEnd: case io.SeekEnd:
size, err := r.conn.FileSize(r.path) return r.size, nil
if err != nil {
return oldOffset, err
}
newOffset = offset + int64(size)
default: default:
return -1, os.ErrInvalid return -1, os.ErrInvalid
} }
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
// offset not changed, so return directly // offset not changed, so return directly
return oldOffset, nil return oldOffset, nil
} }
r.offset = newOffset r.offset.Store(newOffset)
if r.resp != nil {
// close the existing ftp data connection, otherwise the next read will be blocked
_ = r.resp.Close() // we do not care about whether it returns an error
r.resp = nil
}
return newOffset, nil return newOffset, nil
} }
func (r *FTPFileReader) Close() error { func (r *FileReader) Close() error {
if r.resp != nil { if r.resp != nil {
return r.resp.Close() return r.resp.Close()
} }

View File

@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
obj := stream.GetOld() obj := stream.GetExist()
var ( var (
e Error e Error
url string url string
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
putUrl := res.Header().Get("location") putUrl := res.Header().Get("location")
if stream.GetSize() < d.ChunkSize*1024*1024 { if stream.GetSize() < d.ChunkSize*1024*1024 {
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) { _, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser()) req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
}, nil) }, nil)
} else { } else {
err = d.chunkUpload(ctx, stream, putUrl) err = d.chunkUpload(ctx, stream, putUrl)

View File

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
) )
@ -23,12 +24,17 @@ type File struct {
Name string `json:"name"` Name string `json:"name"`
MimeType string `json:"mimeType"` MimeType string `json:"mimeType"`
ModifiedTime time.Time `json:"modifiedTime"` ModifiedTime time.Time `json:"modifiedTime"`
CreatedTime time.Time `json:"createdTime"`
Size string `json:"size"` Size string `json:"size"`
ThumbnailLink string `json:"thumbnailLink"` ThumbnailLink string `json:"thumbnailLink"`
ShortcutDetails struct { ShortcutDetails struct {
TargetId string `json:"targetId"` TargetId string `json:"targetId"`
TargetMimeType string `json:"targetMimeType"` TargetMimeType string `json:"targetMimeType"`
} `json:"shortcutDetails"` } `json:"shortcutDetails"`
MD5Checksum string `json:"md5Checksum"`
SHA1Checksum string `json:"sha1Checksum"`
SHA256Checksum string `json:"sha256Checksum"`
} }
func fileToObj(f File) *model.ObjThumb { func fileToObj(f File) *model.ObjThumb {
@ -39,10 +45,18 @@ func fileToObj(f File) *model.ObjThumb {
ID: f.Id, ID: f.Id,
Name: f.Name, Name: f.Name,
Size: size, Size: size,
Ctime: f.CreatedTime,
Modified: f.ModifiedTime, Modified: f.ModifiedTime,
IsFolder: f.MimeType == "application/vnd.google-apps.folder", IsFolder: f.MimeType == "application/vnd.google-apps.folder",
HashInfo: utils.NewHashInfoByMap(map[*utils.HashType]string{
utils.MD5: f.MD5Checksum,
utils.SHA1: f.SHA1Checksum,
utils.SHA256: f.SHA256Checksum,
}),
},
Thumbnail: model.Thumbnail{
Thumbnail: f.ThumbnailLink,
}, },
Thumbnail: model.Thumbnail{},
} }
if f.MimeType == "application/vnd.google-apps.shortcut" { if f.MimeType == "application/vnd.google-apps.shortcut" {
obj.ID = f.ShortcutDetails.TargetId obj.ID = f.ShortcutDetails.TargetId

View File

@ -5,7 +5,6 @@ import (
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os" "os"
@ -13,6 +12,8 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
@ -195,7 +196,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
} }
query := map[string]string{ query := map[string]string{
"orderBy": orderBy, "orderBy": orderBy,
"fields": "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken", "fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
"pageSize": "1000", "pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id), "q": fmt.Sprintf("'%s' in parents and trashed = false", id),
//"includeItemsFromAllDrives": "true", //"includeItemsFromAllDrives": "true",
@ -216,25 +217,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error { func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024 var defaultChunkSize = d.ChunkSize * 1024 * 1024
var finish int64 = 0 var offset int64 = 0
for finish < stream.GetSize() { for offset < stream.GetSize() {
if utils.IsCanceled(ctx) { if utils.IsCanceled(ctx) {
return ctx.Err() return ctx.Err()
} }
chunkSize := stream.GetSize() - finish chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize { if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize chunkSize = defaultChunkSize
} }
_, err := d.request(url, http.MethodPut, func(req *resty.Request) { reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil {
return err
}
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10), "Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()), "Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx) }).SetBody(reader).SetContext(ctx)
}, nil) }, nil)
if err != nil { if err != nil {
return err return err
} }
finish += chunkSize offset += chunkSize
} }
return nil return nil
} }

View File

@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
} }
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) { resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(stream.GetReadCloser()).SetContext(ctx) req.SetBody(stream).SetContext(ctx)
}, nil, postHeaders) }, nil, postHeaders)
if err != nil { if err != nil {

View File

@ -118,7 +118,19 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
// 根据key查询js变量 // 根据key查询js变量
func findJSVarFunc(key, data string) string { func findJSVarFunc(key, data string) string {
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data) var values []string
if key != "sasign" {
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
} else {
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
if len(matches) == 3 {
values = matches[1]
} else {
if len(matches) > 0 {
values = matches[0]
}
}
}
if len(values) == 0 { if len(values) == 0 {
return "" return ""
} }

View File

@ -3,6 +3,8 @@ package lanzou
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time" "time"
) )
@ -18,6 +20,9 @@ type RespInfo[T any] struct {
Info T `json:"info"` Info T `json:"info"`
} }
var _ model.Obj = (*FileOrFolder)(nil)
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
type FileOrFolder struct { type FileOrFolder struct {
Name string `json:"name"` Name string `json:"name"`
//Onof string `json:"onof"` // 是否存在提取码 //Onof string `json:"onof"` // 是否存在提取码
@ -49,6 +54,14 @@ type FileOrFolder struct {
shareInfo *FileShare `json:"-"` shareInfo *FileShare `json:"-"`
} }
func (f *FileOrFolder) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolder) GetID() string { func (f *FileOrFolder) GetID() string {
if f.IsDir() { if f.IsDir() {
return f.FolID return f.FolID
@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct {
repairFlag bool `json:"-"` repairFlag bool `json:"-"`
} }
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID } func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll } func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
func (f *FileOrFolderByShareUrl) GetPath() string { return "" } func (f *FileOrFolderByShareUrl) GetPath() string { return "" }

View File

@ -258,7 +258,7 @@ var sizeFindReg = regexp.MustCompile(`(?i)大小\W*([0-9.]+\s*[bkm]+)`)
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`) var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
// 查找分享文件夹子文件夹ID和名称 // 查找分享文件夹子文件夹ID和名称
var findSubFolaerReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`) var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
// 获取下载页面链接 // 获取下载页面链接
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`) var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
@ -374,7 +374,7 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
if err != nil { if err != nil {
return nil, err return nil, err
} }
nextPageData := removeJSGlobalFunction(RemoveNotes(string(data))) nextPageData := RemoveNotes(string(data))
param, err = htmlJsonToMap(nextPageData) param, err = htmlJsonToMap(nextPageData)
if err != nil { if err != nil {
return nil, err return nil, err
@ -455,7 +455,7 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
files := make([]FileOrFolderByShareUrl, 0) files := make([]FileOrFolderByShareUrl, 0)
// vip获取文件夹 // vip获取文件夹
floders := findSubFolaerReg.FindAllStringSubmatch(sharePageData, -1) floders := findSubFolderReg.FindAllStringSubmatch(sharePageData, -1)
for _, floder := range floders { for _, floder := range floders {
if len(floder) == 3 { if len(floder) == 3 {
files = append(files, FileOrFolderByShareUrl{ files = append(files, FileOrFolderByShareUrl{
@ -476,10 +476,10 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
if err != nil { if err != nil {
return nil, err return nil, err
} }
/*// 文件夹中的文件也不加密 // 文件夹中的文件加密
for i := 0; i < len(resp.Text); i++ { for i := 0; i < len(resp.Text); i++ {
resp.Text[i].Pwd = pwd resp.Text[i].Pwd = pwd
}*/ }
if len(resp.Text) == 0 { if len(resp.Text) == 0 {
break break
} }

View File

@ -5,7 +5,6 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"io/fs" "io/fs"
"net/http" "net/http"
"os" "os"
@ -13,6 +12,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/alist-org/alist/v3/internal/conf" "github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -21,6 +21,8 @@ import (
"github.com/alist-org/alist/v3/internal/sign" "github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common" "github.com/alist-org/alist/v3/server/common"
"github.com/djherbis/times"
log "github.com/sirupsen/logrus"
_ "golang.org/x/image/webp" _ "golang.org/x/image/webp"
) )
@ -102,6 +104,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
if !isFolder { if !isFolder {
size = f.Size() size = f.Size()
} }
var ctime time.Time
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
if err == nil {
if t.HasBirthTime() {
ctime = t.BirthTime()
}
}
file := model.ObjThumb{ file := model.ObjThumb{
Object: model.Object{ Object: model.Object{
Path: filepath.Join(fullPath, f.Name()), Path: filepath.Join(fullPath, f.Name()),
@ -109,6 +119,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
Modified: f.ModTime(), Modified: f.ModTime(),
Size: size, Size: size,
IsFolder: isFolder, IsFolder: isFolder,
Ctime: ctime,
}, },
Thumbnail: model.Thumbnail{ Thumbnail: model.Thumbnail{
Thumbnail: thumb, Thumbnail: thumb,
@ -145,10 +156,18 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
if isFolder { if isFolder {
size = 0 size = 0
} }
var ctime time.Time
t, err := times.Stat(path)
if err == nil {
if t.HasBirthTime() {
ctime = t.BirthTime()
}
}
file := model.Object{ file := model.Object{
Path: path, Path: path,
Name: f.Name(), Name: f.Name(),
Modified: f.ModTime(), Modified: f.ModTime(),
Ctime: ctime,
Size: size, Size: size,
IsFolder: isFolder, IsFolder: isFolder,
} }
@ -171,9 +190,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil { if err != nil {
return nil, err return nil, err
} }
link.ReadSeekCloser = open link.MFile = open
} else { } else {
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes())) link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len())) //link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
} }
} else { } else {
@ -181,15 +200,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil { if err != nil {
return nil, err return nil, err
} }
link.ReadSeekCloser = struct { link.MFile = open
io.Reader
io.Seeker
io.Closer
}{
Reader: open,
Seeker: open,
Closer: open,
}
} }
return &link, nil return &link, nil
} }
@ -273,6 +284,10 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if err != nil { if err != nil {
return err return err
} }
err = os.Chtimes(fullPath, stream.ModTime(), stream.ModTime())
if err != nil {
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
}
return nil return nil
} }

View File

@ -7,7 +7,6 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"os"
"strconv" "strconv"
"time" "time"
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil { if err != nil {
return err return err
} }
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() {
_ = tempFile.Close() _ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}() }()
uploader := s3manager.NewUploader(s) uploader := s3manager.NewUploader(s)
input := &s3manager.UploadInput{ input := &s3manager.UploadInput{

View File

@ -4,11 +4,12 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/rclone/rclone/lib/readers"
"io" "io"
"time" "time"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/rclone/rclone/lib/readers"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs" "github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
@ -42,7 +43,7 @@ func (d *Mega) Drop(ctx context.Context) error {
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if node, ok := dir.(*MegaNode); ok { if node, ok := dir.(*MegaNode); ok {
nodes, err := d.c.FS.GetChildren(node.Node) nodes, err := d.c.FS.GetChildren(node.n)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -56,7 +57,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
return res, nil return res, nil
} }
log.Errorf("can't convert: %+v", dir) log.Errorf("can't convert: %+v", dir)
return nil, fmt.Errorf("unable to convert dir to mega node") return nil, fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) { func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
@ -68,21 +69,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if node, ok := file.(*MegaNode); ok { if node, ok := file.(*MegaNode); ok {
//down, err := d.c.NewDownload(node.Node) //down, err := d.c.NewDownload(n.Node)
//if err != nil { //if err != nil {
// return nil, fmt.Errorf("open download file failed: %w", err) // return nil, fmt.Errorf("open download file failed: %w", err)
//} //}
size := file.GetSize() size := file.GetSize()
var finalClosers utils.Closers var finalClosers utils.Closers
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) { resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
length = -1 length = -1
} }
var down *mega.Download var down *mega.Download
err := utils.Retry(3, time.Second, func() (err error) { err := utils.Retry(3, time.Second, func() (err error) {
down, err = d.c.NewDownload(node.Node) down, err = d.c.NewDownload(node.n)
return err return err
}) })
if err != nil { if err != nil {
@ -97,37 +98,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
return readers.NewLimitedReadCloser(oo, length), nil return readers.NewLimitedReadCloser(oo, length), nil
} }
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers} resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
resultLink := &model.Link{ resultLink := &model.Link{
RangeReadCloser: *resultRangeReadCloser, RangeReadCloser: resultRangeReadCloser,
} }
return resultLink, nil return resultLink, nil
} }
return nil, fmt.Errorf("unable to convert dir to mega node") return nil, fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentNode, ok := parentDir.(*MegaNode); ok { if parentNode, ok := parentDir.(*MegaNode); ok {
_, err := d.c.CreateDir(dirName, parentNode.Node) _, err := d.c.CreateDir(dirName, parentNode.n)
return err return err
} }
return fmt.Errorf("unable to convert dir to mega node") return fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if srcNode, ok := srcObj.(*MegaNode); ok { if srcNode, ok := srcObj.(*MegaNode); ok {
if dstNode, ok := dstDir.(*MegaNode); ok { if dstNode, ok := dstDir.(*MegaNode); ok {
return d.c.Move(srcNode.Node, dstNode.Node) return d.c.Move(srcNode.n, dstNode.n)
} }
} }
return fmt.Errorf("unable to convert dir to mega node") return fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcNode, ok := srcObj.(*MegaNode); ok { if srcNode, ok := srcObj.(*MegaNode); ok {
return d.c.Rename(srcNode.Node, newName) return d.c.Rename(srcNode.n, newName)
} }
return fmt.Errorf("unable to convert dir to mega node") return fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -136,14 +137,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error { func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
if node, ok := obj.(*MegaNode); ok { if node, ok := obj.(*MegaNode); ok {
return d.c.Delete(node.Node, false) return d.c.Delete(node.n, false)
} }
return fmt.Errorf("unable to convert dir to mega node") return fmt.Errorf("unable to convert dir to mega n")
} }
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if dstNode, ok := dstDir.(*MegaNode); ok { if dstNode, ok := dstDir.(*MegaNode); ok {
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize()) u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
if err != nil { if err != nil {
return err return err
} }
@ -169,13 +170,13 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
if err != nil { if err != nil {
return err return err
} }
up(id * 100 / u.Chunks()) up(float64(id) * 100 / float64(u.Chunks()))
} }
_, err = u.Finish() _, err = u.Finish()
return err return err
} }
return fmt.Errorf("unable to convert dir to mega node") return fmt.Errorf("unable to convert dir to mega n")
} }
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { //func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -1,6 +1,7 @@
package mega package mega
import ( import (
"github.com/alist-org/alist/v3/pkg/utils"
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
@ -8,29 +9,36 @@ import (
) )
type MegaNode struct { type MegaNode struct {
*mega.Node n *mega.Node
} }
//func (m *MegaNode) GetSize() int64 { func (m *MegaNode) GetSize() int64 {
// //TODO implement me return m.n.GetSize()
// panic("implement me") }
//}
// func (m *MegaNode) GetName() string {
//func (m *MegaNode) GetName() string { return m.n.GetName()
// //TODO implement me }
// panic("implement me")
//} func (m *MegaNode) CreateTime() time.Time {
return m.n.GetTimeStamp()
}
func (m *MegaNode) GetHash() utils.HashInfo {
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
return utils.HashInfo{}
}
func (m *MegaNode) ModTime() time.Time { func (m *MegaNode) ModTime() time.Time {
return m.GetTimeStamp() return m.n.GetTimeStamp()
} }
func (m *MegaNode) IsDir() bool { func (m *MegaNode) IsDir() bool {
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
} }
func (m *MegaNode) GetID() string { func (m *MegaNode) GetID() string {
return m.GetHash() return m.n.GetHash()
} }
func (m *MegaNode) GetPath() string { func (m *MegaNode) GetPath() string {

View File

@ -6,8 +6,8 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"os"
"strconv" "strconv"
"strings"
"time" "time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -18,6 +18,7 @@ import (
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go" "github.com/avast/retry-go"
"github.com/foxxorcat/mopan-sdk-go" "github.com/foxxorcat/mopan-sdk-go"
log "github.com/sirupsen/logrus"
) )
type MoPan struct { type MoPan struct {
@ -54,6 +55,16 @@ func (d *MoPan) Init(ctx context.Context) error {
return err return err
} }
d.userID = info.UserID d.userID = info.UserID
log.Debugf("[mopan] Phone: %s UserCloudStorageRelations: %+v", d.Phone, data.UserCloudStorageRelations)
cloudCircleApp, _ := d.client.QueryAllCloudCircleApp()
log.Debugf("[mopan] Phone: %s CloudCircleApp: %+v", d.Phone, cloudCircleApp)
if d.RootFolderID == "" {
for _, userCloudStorage := range data.UserCloudStorageRelations {
if userCloudStorage.Path == "/文件" {
d.RootFolderID = userCloudStorage.FolderID
}
}
}
return nil return nil
} }
d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()). d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()).
@ -94,6 +105,7 @@ func (d *MoPan) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
break break
} }
log.Debugf("[mopan] Phone: %s folder: %+v", d.Phone, data.FileListAO.FolderList)
files = append(files, utils.MustSliceConvert(data.FileListAO.FolderList, folderToObj)...) files = append(files, utils.MustSliceConvert(data.FileListAO.FolderList, folderToObj)...)
files = append(files, utils.MustSliceConvert(data.FileListAO.FileList, fileToObj)...) files = append(files, utils.MustSliceConvert(data.FileListAO.FileList, fileToObj)...)
} }
@ -106,6 +118,15 @@ func (d *MoPan) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err return nil, err
} }
data.DownloadUrl = strings.Replace(strings.ReplaceAll(data.DownloadUrl, "&amp;", "&"), "http://", "https://", 1)
res, err := base.NoRedirectClient.R().SetContext(ctx).Head(data.DownloadUrl)
if err != nil {
return nil, err
}
if res.StatusCode() == 302 {
data.DownloadUrl = res.Header().Get("location")
}
return &model.Link{ return &model.Link{
URL: data.DownloadUrl, URL: data.DownloadUrl,
}, nil }, nil
@ -219,13 +240,12 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
file, err := utils.CreateTempFile(stream, stream.GetSize()) file, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
_ = file.Close() _ = file.Close()
_ = os.Remove(file.Name())
}() }()
// step.1 // step.1
@ -252,7 +272,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
} }
if !initUpdload.FileDataExists { if !initUpdload.FileDataExists {
fmt.Println(d.client.CloudDiskStartBusiness()) utils.Log.Error(d.client.CloudDiskStartBusiness())
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread, threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(3), retry.Attempts(3),
@ -288,7 +308,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
return fmt.Errorf("upload err,code=%d", resp.StatusCode) return fmt.Errorf("upload err,code=%d", resp.StatusCode)
} }
up(100 * int(threadG.Success()) / len(parts)) up(100 * float64(threadG.Success()) / float64(len(parts)))
initUpdload.PartInfos[i] = "" initUpdload.PartInfos[i] = ""
return nil return nil
}) })

View File

@ -9,7 +9,7 @@ type Addition struct {
Phone string `json:"phone" required:"true"` Phone string `json:"phone" required:"true"`
Password string `json:"password" required:"true"` Password string `json:"password" required:"true"`
RootFolderID string `json:"root_folder_id" default:"-11" required:"true" help:"be careful when using the -11 value, some operations may cause system errors"` RootFolderID string `json:"root_folder_id" default:""`
CloudID string `json:"cloud_id"` CloudID string `json:"cloud_id"`

View File

@ -4,6 +4,7 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/foxxorcat/mopan-sdk-go" "github.com/foxxorcat/mopan-sdk-go"
) )
@ -14,6 +15,8 @@ func fileToObj(f mopan.File) model.Obj {
Name: f.Name, Name: f.Name,
Size: int64(f.Size), Size: int64(f.Size),
Modified: time.Time(f.LastOpTime), Modified: time.Time(f.LastOpTime),
Ctime: time.Time(f.CreateDate),
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
}, },
Thumbnail: model.Thumbnail{ Thumbnail: model.Thumbnail{
Thumbnail: f.Icon.SmallURL, Thumbnail: f.Icon.SmallURL,
@ -26,6 +29,7 @@ func folderToObj(f mopan.Folder) model.Obj {
ID: string(f.ID), ID: string(f.ID),
Name: f.Name, Name: f.Name,
Modified: time.Time(f.LastOpTime), Modified: time.Time(f.LastOpTime),
Ctime: time.Time(f.CreateDate),
IsFolder: true, IsFolder: true,
} }
} }
@ -37,6 +41,7 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
Name: newName, Name: newName,
IsFolder: true, IsFolder: true,
Modified: o.ModTime(), Modified: o.ModTime(),
Ctime: o.CreateTime(),
} }
} }
@ -50,6 +55,8 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
Name: newName, Name: newName,
Size: o.GetSize(), Size: o.GetSize(),
Modified: o.ModTime(), Modified: o.ModTime(),
Ctime: o.CreateTime(),
HashInfo: o.GetHash(),
}, },
Thumbnail: model.Thumbnail{ Thumbnail: model.Thumbnail{
Thumbnail: thumb, Thumbnail: thumb,

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"path" "path"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -57,8 +58,17 @@ func (d *Onedrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs
if f.File == nil { if f.File == nil {
return nil, errs.NotFile return nil, errs.NotFile
} }
u := f.Url
if d.CustomHost != "" {
_u, err := url.Parse(f.Url)
if err != nil {
return nil, err
}
_u.Host = d.CustomHost
u = _u.String()
}
return &model.Link{ return &model.Link{
URL: f.Url, URL: u,
}, nil }, nil
} }

View File

@ -15,6 +15,7 @@ type Addition struct {
RefreshToken string `json:"refresh_token" required:"true"` RefreshToken string `json:"refresh_token" required:"true"`
SiteId string `json:"site_id"` SiteId string `json:"site_id"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"` ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -196,13 +196,14 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil { if err != nil {
return err return err
} }
if res.StatusCode != 201 && res.StatusCode != 202 { // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body) data, _ := io.ReadAll(res.Body)
res.Body.Close() res.Body.Close()
return errors.New(string(data)) return errors.New(string(data))
} }
res.Body.Close() res.Body.Close()
up(int(finish * 100 / stream.GetSize())) up(float64(finish) * 100 / float64(stream.GetSize()))
} }
return nil return nil
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"path" "path"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -57,8 +58,17 @@ func (d *OnedriveAPP) Link(ctx context.Context, file model.Obj, args model.LinkA
if f.File == nil { if f.File == nil {
return nil, errs.NotFile return nil, errs.NotFile
} }
u := f.Url
if d.CustomHost != "" {
_u, err := url.Parse(f.Url)
if err != nil {
return nil, err
}
_u.Host = d.CustomHost
u = _u.String()
}
return &model.Link{ return &model.Link{
URL: f.Url, URL: u,
}, nil }, nil
} }

View File

@ -13,6 +13,7 @@ type Addition struct {
TenantID string `json:"tenant_id"` TenantID string `json:"tenant_id"`
Email string `json:"email"` Email string `json:"email"`
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"` ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
} }
var config = driver.Config{ var config = driver.Config{

View File

@ -71,8 +71,8 @@ func (d *OnedriveAPP) _accessToken() error {
"grant_type": "client_credentials", "grant_type": "client_credentials",
"client_id": d.ClientID, "client_id": d.ClientID,
"client_secret": d.ClientSecret, "client_secret": d.ClientSecret,
"resource": "https://graph.microsoft.com/", "resource": onedriveHostMap[d.Region].Api + "/",
"scope": "https://graph.microsoft.com/.default", "scope": onedriveHostMap[d.Region].Api + "/.default",
}).Post(url) }).Post(url)
if err != nil { if err != nil {
return err return err
@ -187,13 +187,14 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
if err != nil { if err != nil {
return err return err
} }
if res.StatusCode != 201 && res.StatusCode != 202 { // https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
data, _ := io.ReadAll(res.Body) data, _ := io.ReadAll(res.Body)
res.Body.Close() res.Body.Close()
return errors.New(string(data)) return errors.New(string(data))
} }
res.Body.Close() res.Body.Close()
up(int(finish * 100 / stream.GetSize())) up(float64(finish) * 100 / float64(stream.GetSize()))
} }
return nil return nil
} }

View File

@ -3,15 +3,14 @@ package pikpak
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net/http" "net/http"
"os"
"strings" "strings"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
@ -124,23 +123,20 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) hi := stream.GetHash()
if err != nil { sha1Str := hi.GetHash(hash_extend.GCID)
return err if len(sha1Str) < hash_extend.GCID.Width {
} tFile, err := stream.CacheFullInTempFile()
defer func() { if err != nil {
_ = tempFile.Close() return err
_ = os.Remove(tempFile.Name()) }
}()
// cal gcid sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
sha1Str, err := getGcid(tempFile, stream.GetSize()) if err != nil {
if err != nil { return err
return err }
}
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
} }
var resp UploadTaskData var resp UploadTaskData
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) { res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{ req.SetBody(base.Json{
@ -179,7 +175,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
input := &s3manager.UploadInput{ input := &s3manager.UploadInput{
Bucket: &params.Bucket, Bucket: &params.Bucket,
Key: &params.Key, Key: &params.Key,
Body: tempFile, Body: stream,
} }
_, err = uploader.UploadWithContext(ctx, input) _, err = uploader.UploadWithContext(ctx, input)
return err return err

View File

@ -5,6 +5,8 @@ import (
"time" "time"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
) )
type RespErr struct { type RespErr struct {
@ -21,7 +23,9 @@ type File struct {
Id string `json:"id"` Id string `json:"id"`
Kind string `json:"kind"` Kind string `json:"kind"`
Name string `json:"name"` Name string `json:"name"`
CreatedTime time.Time `json:"created_time"`
ModifiedTime time.Time `json:"modified_time"` ModifiedTime time.Time `json:"modified_time"`
Hash string `json:"hash"`
Size string `json:"size"` Size string `json:"size"`
ThumbnailLink string `json:"thumbnail_link"` ThumbnailLink string `json:"thumbnail_link"`
WebContentLink string `json:"web_content_link"` WebContentLink string `json:"web_content_link"`
@ -35,8 +39,10 @@ func fileToObj(f File) *model.ObjThumb {
ID: f.Id, ID: f.Id,
Name: f.Name, Name: f.Name,
Size: size, Size: size,
Ctime: f.CreatedTime,
Modified: f.ModifiedTime, Modified: f.ModifiedTime,
IsFolder: f.Kind == "drive#folder", IsFolder: f.Kind == "drive#folder",
HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash),
}, },
Thumbnail: model.Thumbnail{ Thumbnail: model.Thumbnail{
Thumbnail: f.ThumbnailLink, Thumbnail: f.ThumbnailLink,

View File

@ -7,7 +7,6 @@ import (
"encoding/hex" "encoding/hex"
"io" "io"
"net/http" "net/http"
"os"
"time" "time"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
"User-Agent": []string{ua}, "User-Agent": []string{ua},
}, },
Concurrency: 2, Concurrency: 2,
PartSize: 10 * 1024 * 1024, PartSize: 10 * utils.MB,
}, nil }, nil
} }
@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() {
_ = tempFile.Close() _ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}() }()
m := md5.New() m := md5.New()
_, err = io.Copy(m, tempFile) _, err = io.Copy(m, tempFile)
@ -211,7 +209,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
} }
md5s = append(md5s, m) md5s = append(md5s, m)
partNumber++ partNumber++
up(int(100 * (total - left) / total)) up(100 * float64(total-left) / float64(total))
} }
err = d.upCommit(pre, md5s) err = d.upCommit(pre, md5s)
if err != nil { if err != nil {

View File

@ -10,6 +10,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
@ -96,14 +98,14 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.Put(ctx, &model.Object{ return d.Put(ctx, &model.Object{
Path: stdpath.Join(parentDir.GetPath(), dirName), Path: stdpath.Join(parentDir.GetPath(), dirName),
}, &model.FileStream{ }, &stream.FileStream{
Obj: &model.Object{ Obj: &model.Object{
Name: getPlaceholderName(d.Placeholder), Name: getPlaceholderName(d.Placeholder),
Modified: time.Now(), Modified: time.Now(),
}, },
ReadCloser: io.NopCloser(bytes.NewReader([]byte{})), Reader: io.NopCloser(bytes.NewReader([]byte{})),
Mimetype: "application/octet-stream", Mimetype: "application/octet-stream",
}, func(int) {}) }, func(float64) {})
} }
func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {

View File

@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
return nil, err return nil, err
} }
link := &model.Link{ link := &model.Link{
ReadSeekCloser: remoteFile, MFile: remoteFile,
} }
return link, nil return link, nil
} }

View File

@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
Modified: f.ModTime(), Modified: f.ModTime(),
Size: f.Size(), Size: f.Size(),
IsFolder: f.IsDir(), IsFolder: f.IsDir(),
Ctime: f.(*smb2.FileStat).CreationTime,
}, },
} }
files = append(files, &file) files = append(files, &file)
@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
return nil, err return nil, err
} }
link := &model.Link{ link := &model.Link{
ReadSeekCloser: remoteFile, MFile: remoteFile,
} }
d.updateLastConnTime() d.updateLastConnTime()
return link, nil return link, nil

View File

@ -189,7 +189,7 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
if err != nil { if err != nil {
return nil, err return nil, err
} }
up(i * 100 / newChunk.Chunks) up(float64(i) * 100 / float64(newChunk.Chunks))
} }
_, err = base.RestyClient.R().SetHeader("Authorization", token).Post( _, err = base.RestyClient.R().SetHeader("Authorization", token).Post(
fmt.Sprintf("https://%s.teambition.net/upload/chunk/%s", fmt.Sprintf("https://%s.teambition.net/upload/chunk/%s",

View File

@ -41,24 +41,24 @@ func (d *Template) Link(ctx context.Context, file model.Obj, args model.LinkArgs
return nil, errs.NotImplement return nil, errs.NotImplement
} }
func (d *Template) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { func (d *Template) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
// TODO create folder, optional // TODO create folder, optional
return errs.NotImplement return nil, errs.NotImplement
} }
func (d *Template) Move(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Template) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO move obj, optional // TODO move obj, optional
return errs.NotImplement return nil, errs.NotImplement
} }
func (d *Template) Rename(ctx context.Context, srcObj model.Obj, newName string) error { func (d *Template) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
// TODO rename obj, optional // TODO rename obj, optional
return errs.NotImplement return nil, errs.NotImplement
} }
func (d *Template) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Template) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO copy obj, optional // TODO copy obj, optional
return errs.NotImplement return nil, errs.NotImplement
} }
func (d *Template) Remove(ctx context.Context, obj model.Obj) error { func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
@ -66,9 +66,9 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
return errs.NotImplement return errs.NotImplement
} }
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// TODO upload file, optional // TODO upload file, optional
return errs.NotImplement return nil, errs.NotImplement
} }
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -1,4 +1,4 @@
package terbox package terabox
import ( import (
"bytes" "bytes"
@ -6,16 +6,16 @@ import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
"io" "io"
"math" "math"
"os"
stdpath "path" stdpath "path"
"strconv" "strconv"
"strings" "strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
) )
@ -23,6 +23,7 @@ import (
type Terabox struct { type Terabox struct {
model.Storage model.Storage
Addition Addition
JsToken string
} }
func (d *Terabox) Config() driver.Config { func (d *Terabox) Config() driver.Config {
@ -116,14 +117,10 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
} }
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) tempFile, err := stream.CacheFullInTempFile()
if err != nil { if err != nil {
return err return err
} }
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var Default int64 = 4 * 1024 * 1024 var Default int64 = 4 * 1024 * 1024
defaultByteData := make([]byte, Default) defaultByteData := make([]byte, Default)
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default))) count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
@ -170,6 +167,9 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
return err return err
} }
log.Debugf("%+v", precreateResp) log.Debugf("%+v", precreateResp)
if precreateResp.Errno != 0 {
return fmt.Errorf("[terabox] failed to precreate file, errno: %d", precreateResp.Errno)
}
if precreateResp.ReturnType == 2 { if precreateResp.ReturnType == 2 {
return nil return nil
} }
@ -213,7 +213,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
} }
log.Debugln(res.String()) log.Debugln(res.String())
if len(precreateResp.BlockList) > 0 { if len(precreateResp.BlockList) > 0 {
up(i * 100 / len(precreateResp.BlockList)) up(float64(i) * 100 / float64(len(precreateResp.BlockList)))
} }
} }
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str) _, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)

View File

@ -1,4 +1,4 @@
package terbox package terabox
import ( import (
"github.com/alist-org/alist/v3/internal/driver" "github.com/alist-org/alist/v3/internal/driver"
@ -7,7 +7,8 @@ import (
type Addition struct { type Addition struct {
driver.RootPath driver.RootPath
Cookie string `json:"cookie" required:"true"` Cookie string `json:"cookie" required:"true"`
//JsToken string `json:"js_token" type:"string" required:"true"`
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"` DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"` OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`

View File

@ -1,9 +1,10 @@
package terbox package terabox
import ( import (
"github.com/alist-org/alist/v3/internal/model"
"strconv" "strconv"
"time" "time"
"github.com/alist-org/alist/v3/internal/model"
) )
type File struct { type File struct {

View File

@ -1,10 +1,11 @@
package terbox package terabox
import ( import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
"regexp"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -15,7 +16,39 @@ import (
"github.com/go-resty/resty/v2" "github.com/go-resty/resty/v2"
) )
func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { func getStrBetween(raw, start, end string) string {
regexPattern := fmt.Sprintf(`%s(.*?)%s`, regexp.QuoteMeta(start), regexp.QuoteMeta(end))
regex := regexp.MustCompile(regexPattern)
matches := regex.FindStringSubmatch(raw)
if len(matches) < 2 {
return ""
}
mid := matches[1]
return mid
}
func (d *Terabox) resetJsToken() error {
u := "https://www.terabox.com/main"
res, err := base.RestyClient.R().SetHeaders(map[string]string{
"Cookie": d.Cookie,
"Accept": "application/json, text/plain, */*",
"Referer": "https://www.terabox.com/",
"User-Agent": base.UserAgent,
"X-Requested-With": "XMLHttpRequest",
}).Get(u)
if err != nil {
return err
}
html := res.String()
jsToken := getStrBetween(html, "`function%20fn%28a%29%7Bwindow.jsToken%20%3D%20a%7D%3Bfn%28%22", "%22%29`")
if jsToken == "" {
return fmt.Errorf("jsToken not found, html: %s", html)
}
d.JsToken = jsToken
return nil
}
func (d *Terabox) request(furl string, method string, callback base.ReqCallback, resp interface{}, noRetry ...bool) ([]byte, error) {
req := base.RestyClient.R() req := base.RestyClient.R()
req.SetHeaders(map[string]string{ req.SetHeaders(map[string]string{
"Cookie": d.Cookie, "Cookie": d.Cookie,
@ -24,10 +57,13 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback,
"User-Agent": base.UserAgent, "User-Agent": base.UserAgent,
"X-Requested-With": "XMLHttpRequest", "X-Requested-With": "XMLHttpRequest",
}) })
req.SetQueryParam("app_id", "250528") req.SetQueryParams(map[string]string{
req.SetQueryParam("web", "1") "app_id": "250528",
req.SetQueryParam("channel", "dubox") "web": "1",
req.SetQueryParam("clienttype", "0") "channel": "dubox",
"clienttype": "0",
"jsToken": d.JsToken,
})
if callback != nil { if callback != nil {
callback(req) callback(req)
} }
@ -38,6 +74,17 @@ func (d *Terabox) request(furl string, method string, callback base.ReqCallback,
if err != nil { if err != nil {
return nil, err return nil, err
} }
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno == 4000023 {
// reget jsToken
err = d.resetJsToken()
if err != nil {
return nil, err
}
if !utils.IsBool(noRetry...) {
return d.request(furl, method, callback, resp, true)
}
}
return res.Body(), nil return res.Body(), nil
} }
@ -186,7 +233,7 @@ func (d *Terabox) manage(opera string, filelist interface{}) ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal)) data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", encodeURIComponent(string(marshal)))
return d.post("/api/filemanager", params, data, nil) return d.post("/api/filemanager", params, data, nil)
} }

View File

@ -3,9 +3,7 @@ package thunder
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"net/http" "net/http"
"os"
"strings" "strings"
"github.com/alist-org/alist/v3/drivers/base" "github.com/alist-org/alist/v3/drivers/base"
@ -14,6 +12,7 @@ import (
"github.com/alist-org/alist/v3/internal/model" "github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op" "github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils" "github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
@ -333,25 +332,22 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
} }
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize()) hi := stream.GetHash()
if err != nil { gcid := hi.GetHash(hash_extend.GCID)
return err if len(gcid) < hash_extend.GCID.Width {
} tFile, err := stream.CacheFullInTempFile()
defer func() { if err != nil {
_ = tempFile.Close() return err
_ = os.Remove(tempFile.Name()) }
}()
gcid, err := getGcid(tempFile, stream.GetSize()) gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
if err != nil { if err != nil {
return err return err
} }
if _, err := tempFile.Seek(0, io.SeekStart); err != nil {
return err
} }
var resp UploadTaskResponse var resp UploadTaskResponse
_, err = xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) { _, err := xc.Request(FILE_API_URL, http.MethodPost, func(r *resty.Request) {
r.SetContext(ctx) r.SetContext(ctx)
r.SetBody(&base.Json{ r.SetBody(&base.Json{
"kind": FILE, "kind": FILE,
@ -381,7 +377,7 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
Bucket: aws.String(param.Bucket), Bucket: aws.String(param.Bucket),
Key: aws.String(param.Key), Key: aws.String(param.Key),
Expires: aws.Time(param.Expiration), Expires: aws.Time(param.Expiration),
Body: tempFile, Body: stream,
}) })
return err return err
} }

View File

@ -4,6 +4,10 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"time" "time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
) )
type ErrResp struct { type ErrResp struct {
@ -84,6 +88,8 @@ type Link struct {
Type string `json:"type"` Type string `json:"type"`
} }
var _ model.Obj = (*Files)(nil)
type Files struct { type Files struct {
Kind string `json:"kind"` Kind string `json:"kind"`
ID string `json:"id"` ID string `json:"id"`
@ -100,39 +106,39 @@ type Files struct {
ModifiedTime time.Time `json:"modified_time"` ModifiedTime time.Time `json:"modified_time"`
IconLink string `json:"icon_link"` IconLink string `json:"icon_link"`
ThumbnailLink string `json:"thumbnail_link"` ThumbnailLink string `json:"thumbnail_link"`
//Md5Checksum string `json:"md5_checksum"` // Md5Checksum string `json:"md5_checksum"`
//Hash string `json:"hash"` Hash string `json:"hash"`
Links map[string]Link `json:"links"` // Links map[string]Link `json:"links"`
Phase string `json:"phase"` // Phase string `json:"phase"`
Audit struct { // Audit struct {
Status string `json:"status"` // Status string `json:"status"`
Message string `json:"message"` // Message string `json:"message"`
Title string `json:"title"` // Title string `json:"title"`
} `json:"audit"` // } `json:"audit"`
Medias []struct { Medias []struct {
Category string `json:"category"` //Category string `json:"category"`
IconLink string `json:"icon_link"` //IconLink string `json:"icon_link"`
IsDefault bool `json:"is_default"` //IsDefault bool `json:"is_default"`
IsOrigin bool `json:"is_origin"` //IsOrigin bool `json:"is_origin"`
IsVisible bool `json:"is_visible"` //IsVisible bool `json:"is_visible"`
Link Link `json:"link"` Link Link `json:"link"`
MediaID string `json:"media_id"` //MediaID string `json:"media_id"`
MediaName string `json:"media_name"` //MediaName string `json:"media_name"`
NeedMoreQuota bool `json:"need_more_quota"` //NeedMoreQuota bool `json:"need_more_quota"`
Priority int `json:"priority"` //Priority int `json:"priority"`
RedirectLink string `json:"redirect_link"` //RedirectLink string `json:"redirect_link"`
ResolutionName string `json:"resolution_name"` //ResolutionName string `json:"resolution_name"`
Video struct { // Video struct {
AudioCodec string `json:"audio_codec"` // AudioCodec string `json:"audio_codec"`
BitRate int `json:"bit_rate"` // BitRate int `json:"bit_rate"`
Duration int `json:"duration"` // Duration int `json:"duration"`
FrameRate int `json:"frame_rate"` // FrameRate int `json:"frame_rate"`
Height int `json:"height"` // Height int `json:"height"`
VideoCodec string `json:"video_codec"` // VideoCodec string `json:"video_codec"`
VideoType string `json:"video_type"` // VideoType string `json:"video_type"`
Width int `json:"width"` // Width int `json:"width"`
} `json:"video"` // } `json:"video"`
VipTypes []string `json:"vip_types"` // VipTypes []string `json:"vip_types"`
} `json:"medias"` } `json:"medias"`
Trashed bool `json:"trashed"` Trashed bool `json:"trashed"`
DeleteTime string `json:"delete_time"` DeleteTime string `json:"delete_time"`
@ -146,13 +152,18 @@ type Files struct {
//Collection interface{} `json:"collection"` //Collection interface{} `json:"collection"`
} }
func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size } func (c *Files) GetHash() utils.HashInfo {
func (c *Files) GetName() string { return c.Name } return utils.NewHashInfo(hash_extend.GCID, c.Hash)
func (c *Files) ModTime() time.Time { return c.ModifiedTime } }
func (c *Files) IsDir() bool { return c.Kind == FOLDER }
func (c *Files) GetID() string { return c.ID } func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size }
func (c *Files) GetPath() string { return "" } func (c *Files) GetName() string { return c.Name }
func (c *Files) Thumb() string { return c.ThumbnailLink } func (c *Files) CreateTime() time.Time { return c.CreatedTime }
func (c *Files) ModTime() time.Time { return c.ModifiedTime }
func (c *Files) IsDir() bool { return c.Kind == FOLDER }
func (c *Files) GetID() string { return c.ID }
func (c *Files) GetPath() string { return "" }
func (c *Files) Thumb() string { return c.ThumbnailLink }
/* /*
* 上传 * 上传

View File

@ -5,7 +5,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"math"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -128,7 +127,7 @@ func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
stream, stream,
func(byteNum int) { func(byteNum int) {
total += int64(byteNum) total += int64(byteNum)
up(int(math.Round(float64(total) / float64(stream.GetSize()) * 100))) up(float64(total) / float64(stream.GetSize()) * 100)
}, },
} }
req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader) req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)

View File

@ -80,7 +80,11 @@ func (d *USS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
downExp := time.Hour * time.Duration(d.SignURLExpire) downExp := time.Hour * time.Duration(d.SignURLExpire)
expireAt := time.Now().Add(downExp).Unix() expireAt := time.Now().Add(downExp).Unix()
upd := url.QueryEscape(path.Base(file.GetPath())) upd := url.QueryEscape(path.Base(file.GetPath()))
signStr := strings.Join([]string{d.OperatorPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&") tokenOrPassword := d.AntiTheftChainToken
if tokenOrPassword == "" {
tokenOrPassword = d.OperatorPassword
}
signStr := strings.Join([]string{tokenOrPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&")
upt := utils.GetMD5EncodeStr(signStr)[12:20] + fmt.Sprint(expireAt) upt := utils.GetMD5EncodeStr(signStr)[12:20] + fmt.Sprint(expireAt)
link := fmt.Sprintf("%s?_upd=%s&_upt=%s", u, upd, upt) link := fmt.Sprintf("%s?_upd=%s&_upt=%s", u, upd, upt)
return &model.Link{URL: link}, nil return &model.Link{URL: link}, nil

View File

@ -7,10 +7,11 @@ import (
type Addition struct { type Addition struct {
driver.RootPath driver.RootPath
Bucket string `json:"bucket" required:"true"` Bucket string `json:"bucket" required:"true"`
Endpoint string `json:"endpoint" required:"true"` Endpoint string `json:"endpoint" required:"true"`
OperatorName string `json:"operator_name" required:"true"` OperatorName string `json:"operator_name" required:"true"`
OperatorPassword string `json:"operator_password" required:"true"` OperatorPassword string `json:"operator_password" required:"true"`
AntiTheftChainToken string `json:"anti_theft_chain_token" required:"false" default:""`
//CustomHost string `json:"custom_host"` //Endpoint与CustomHost作用相同去除 //CustomHost string `json:"custom_host"` //Endpoint与CustomHost作用相同去除
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"` SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"`
} }

Some files were not shown because too many files have changed in this diff Show More