Compare commits

...

181 Commits

Author SHA1 Message Date
d26887d211 fix: content-type conflicts with #5420 2023-11-24 19:22:19 +08:00
3f405de6a9 feat: customize allow origins, headers and methods 2023-11-24 19:18:34 +08:00
6100647310 fix: reflected XSS vulnerability plist api 2023-11-24 16:46:48 +08:00
34746e951c feat(offline_download): add simple http tool (close #4002) 2023-11-24 16:26:05 +08:00
b6134dc515 feat: allow keep files in offline download (close #4678) 2023-11-24 15:02:36 +08:00
d455a232ef fix(vtencent): hack file with size 0 but actual size is not 0
- allow use another proxy for vtencent and chaoxing
2023-11-23 22:35:07 +08:00
fe34d30d17 feat(crypt): add show hidden option (#5554) 2023-11-23 21:50:16 +08:00
0fbb986ba9 fix(aliyundrive_open): mitigation measures for 15-minute limit (#5560 close #5547)
* fix(aliyundrive_open):Mitigation measures for AliOpen's 15-minute limit.

I conducted small-scale tests, which seem to have no significant negative impact. If the 15-minute issue still occurs, further measures will be needed. Methods like local proxy can be attempted.

* chore(aliyundrive_open): change cache of the link to 1 minute

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-11-23 21:49:16 +08:00
1280070438 feat: add chaoxing and vtencent driver (#5526 close #3347)
* add chaoxing and vtencent

* add vtencent put file

* add sha1 to transfer files instantly

* simplified upload file code

* setting onlyproxy

* fix get files modifyDate bug
2023-11-23 21:40:16 +08:00
d7f66138eb docs: add sponsor VidHub [skip ci] 2023-11-22 15:09:39 +08:00
b2890f05ab feat: retry all failed task (close #5242) 2023-11-21 15:54:42 +08:00
7583c4d734 feat: customize workers and retry of task (close #5493 fix #5274) 2023-11-21 15:51:57 +08:00
11a30c5044 feat: refactor task module 2023-11-20 18:01:51 +08:00
de9647a5fa chore: remove useless code 2023-11-19 20:05:09 +08:00
8d5283604c ci: add short sha to artifact 2023-11-19 15:21:25 +08:00
867accafd1 fix(local): video file thumbnails not displaying on iOS Safari (#5420)
* perf(webdav): support for cookies on webdav drive

* fix(local): video file thumbnails not displaying on iOS Safari
2023-11-18 22:36:41 +08:00
6fc6751463 feat: support using external dist files (close #5531) 2023-11-18 19:56:22 +08:00
f904596cbc chore: remove refs to deprecated io/ioutil (#5519)
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
2023-11-16 05:16:15 -06:00
3d51845f57 feat: invalidate old token after changing the password (close #5515) 2023-11-13 15:22:42 +08:00
a7421d8fc2 fix(deps): update module github.com/aws/aws-sdk-go to v1.46.7 (#5068)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-12 15:14:27 +08:00
55a14bc271 fix(mopan): 302 Redirect (#5505 close #5502)
* fix(mopan):302 Redirect

* fix(mopan): do not forget to close the body

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-11-12 15:13:55 +08:00
91f51f17d0 feat(webdav): add tls_insecure_skip_verify field (close #5490) 2023-11-10 15:38:23 +08:00
4355dae491 fix: incorrect content-type of apk files (close #5385) 2023-11-06 18:20:25 +08:00
da1c7a4c23 feat: add 115_share driver (#5481 close #5384)
This update introduces the ability to mount 115 share links.
 Currently, only listing and downloading are supported. Note that login and share link are required for this feature to work.

 Close #5384
2023-11-06 16:58:57 +08:00
769281bd40 feat: refactor offline download (#5408 close #4108)
* wip: refactor offline download (#5331)

* base tool

* working: aria2

* refactor: change type of percentage to float64

* wip: adapt aria2

* wip: use items in offline_download

* wip: use tool manager

* wip: adapt qBittorrent

* chore: fix typo

* Squashed commit of the following:

commit 4fc0a77565
Author: Andy Hsu <i@nn.ci>
Date:   Fri Oct 20 21:06:25 2023 +0800

    fix(baidu_netdisk): upload file > 4GB (close #5392)

commit aaffaee2b5
Author: gmugu <94156510@qq.com>
Date:   Thu Oct 19 19:17:53 2023 +0800

    perf(webdav): support request with cookies (#5391)

commit 8ef8023c20
Author: NewbieOrange <NewbieOrange@users.noreply.github.com>
Date:   Thu Oct 19 19:17:09 2023 +0800

    fix(aliyundrive_open): upload progress for normal upload (#5398)

commit cdfbe6dcf2
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Wed Oct 18 16:27:07 2023 +0800

    fix: hash gcid empty file (#5394)

commit 94d028743a
Author: Andy Hsu <i@nn.ci>
Date:   Sat Oct 14 13:17:51 2023 +0800

    ci: remove `pr-welcome` label when close issue [skip ci]

commit 7f7335435c
Author: itsHenry <2671230065@qq.com>
Date:   Sat Oct 14 13:12:46 2023 +0800

    feat(cloudreve): support thumbnail (#5373 close #5348)

    * feat(cloudreve): support thumbnail

    * chore: remove unnecessary code

commit b9e192b29c
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Thu Oct 12 20:57:12 2023 +0800

    fix(115): limit request rate (#5367 close #5275)

    * fix(115):limit request rate

    * chore(115): fix unit of `limit_rate`

    ---------

    Co-authored-by: Andy Hsu <i@nn.ci>

commit 69a98eaef6
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Wed Oct 11 22:01:55 2023 +0800

    fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit 1ebc96a4e5
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:32:00 2023 +0800

    fix(wopan): fatal error concurrent map writes (close #5352)

commit 66e2324cac
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:23:11 2023 +0800

    chore(deps): upgrade dependencies

commit 7600dc28df
Author: Andy Hsu <i@nn.ci>
Date:   Tue Oct 10 18:13:58 2023 +0800

    fix(aliyundrive_open): change default api to raw server (close #5358)

commit 8ef89ad0a4
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Tue Oct 10 18:08:27 2023 +0800

    fix(baidu_netdisk): hash and `error 2` (#5356)

    * fix(baidu):hash and error:2

    * fix:invalid memory address

commit 35d672217d
Author: jeffmingup <1960588251@qq.com>
Date:   Sun Oct 8 19:29:45 2023 +0800

    fix(onedrive_app): incorrect api on `_accessToken` (#5346)

commit 1a283bb272
Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com>
Date:   Fri Oct 6 16:04:39 2023 +0800

    feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334)

commit a008f54f4d
Author: nkh0472 <67589323+nkh0472@users.noreply.github.com>
Date:   Thu Oct 5 13:10:51 2023 +0800

    docs: minor language improvements (#5329) [skip ci]

* fix: adapt update progress type

* Squashed commit of the following:

commit 65c5ec0c34
Author: itsHenry <2671230065@qq.com>
Date:   Sat Nov 4 13:35:09 2023 +0800

    feat(cloudreve): folder size count and switch (#5457 close #5395)

commit a6325967d0
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Mon Oct 30 15:11:20 2023 +0800

    fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit 4dff49470a
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Mon Oct 30 15:10:36 2023 +0800

    fix(deps): update golang.org/x/exp digest to 7918f67 (#5366)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit cc86d6f3d1
Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Date:   Sun Oct 29 14:45:55 2023 +0800

    fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370)

    Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>

commit c0f9c8ebaf
Author: Andy Hsu <i@nn.ci>
Date:   Thu Oct 26 19:21:09 2023 +0800

    feat: add ignore direct link params (close #5434)
2023-11-06 16:56:55 +08:00
3bbdd4fa89 fix(115): fix driver package import and variable (#5482)
names
2023-11-06 16:53:57 +08:00
68f440abdb fix(weiyun): unmarshal overflow (#5459) 2023-11-05 22:41:14 +08:00
65c5ec0c34 feat(cloudreve): folder size count and switch (#5457 close #5395) 2023-11-04 13:35:09 +08:00
a6325967d0 fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-30 15:11:20 +08:00
4dff49470a fix(deps): update golang.org/x/exp digest to 7918f67 (#5366)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-30 15:10:36 +08:00
cc86d6f3d1 fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-29 14:45:55 +08:00
c0f9c8ebaf feat: add ignore direct link params (close #5434) 2023-10-26 19:21:09 +08:00
4fc0a77565 fix(baidu_netdisk): upload file > 4GB (close #5392) 2023-10-20 21:06:25 +08:00
aaffaee2b5 perf(webdav): support request with cookies (#5391) 2023-10-19 19:17:53 +08:00
8ef8023c20 fix(aliyundrive_open): upload progress for normal upload (#5398) 2023-10-19 19:17:09 +08:00
cdfbe6dcf2 fix: hash gcid empty file (#5394) 2023-10-18 16:27:07 +08:00
94d028743a ci: remove pr-welcome label when close issue [skip ci] 2023-10-14 13:17:51 +08:00
7f7335435c feat(cloudreve): support thumbnail (#5373 close #5348)
* feat(cloudreve): support thumbnail

* chore: remove unnecessary code
2023-10-14 13:12:46 +08:00
b9e192b29c fix(115): limit request rate (#5367 close #5275)
* fix(115):limit request rate

* chore(115): fix unit of `limit_rate`

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-10-12 20:57:12 +08:00
69a98eaef6 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-11 22:01:55 +08:00
1ebc96a4e5 fix(wopan): fatal error concurrent map writes (close #5352) 2023-10-10 18:32:00 +08:00
66e2324cac chore(deps): upgrade dependencies 2023-10-10 18:23:11 +08:00
7600dc28df fix(aliyundrive_open): change default api to raw server (close #5358) 2023-10-10 18:13:58 +08:00
8ef89ad0a4 fix(baidu_netdisk): hash and error 2 (#5356)
* fix(baidu):hash and error:2

* fix:invalid memory address
2023-10-10 18:08:27 +08:00
35d672217d fix(onedrive_app): incorrect api on _accessToken (#5346) 2023-10-08 19:29:45 +08:00
1a283bb272 feat(google_drive): add hash_info, ctime, thumbnail (#5334) 2023-10-06 16:04:39 +08:00
a008f54f4d docs: minor language improvements (#5329) [skip ci] 2023-10-05 13:10:51 +08:00
3d7f79cba8 docs: change domain of contributors image [skip ci] 2023-10-03 17:34:24 +08:00
9ff83a7950 feat: add header to meta (ref #5317) 2023-10-02 16:43:29 +08:00
e719a1a456 feat(sso): custom username key for OIDC (close #5169) 2023-10-02 14:42:40 +08:00
40a6fcbdff ci: do not stale issue with working or pr-welcome label [skip ci] 2023-10-02 14:13:11 +08:00
0fd51646f6 feat(onedrive): custom host for download link (close #5310) 2023-10-02 14:07:47 +08:00
e8958019d9 fix(115): allow use proxy directly (close #5324) 2023-10-02 14:00:13 +08:00
e1ef690784 fix(terabox): encode parameters for filemanager api (#5308) 2023-10-01 16:58:29 +08:00
4024050dd0 chore: fix typo (#5316) 2023-10-01 16:58:00 +08:00
eb918658f0 fix(deps): update module github.com/ipfs/go-ipfs-api to v0.7.0 (#5247)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-30 22:58:19 +08:00
fb13dae136 feat(crypt): optional pre-generated thumbnails (#5284) 2023-09-27 13:57:10 +08:00
6b67a36d63 fix(terabox): auto refresh JsToken (close #5277) 2023-09-25 16:38:05 +08:00
a64dd4885e fix(139): fixed time zone (close #5263) 2023-09-22 16:54:16 +08:00
0f03a747d8 ci: cancel previous workflow run 2023-09-22 16:53:07 +08:00
30977cdc6d feat: sso compatibility mode (#5260) 2023-09-22 16:45:51 +08:00
106cf720c1 fix(baidu_netdisk): retry logic in request (close #5262) 2023-09-22 16:27:44 +08:00
882112ed1c feat: add hash_info field to /fs/get (close #5259) 2023-09-22 15:20:04 +08:00
2a6ab77295 fix(115): data race in Link (#5253) 2023-09-21 13:39:07 +08:00
f0981a0c8d chore(virtual): implement the driver interface with result 2023-09-20 09:02:56 +08:00
57eea4db17 fix(deps): update module github.com/go-resty/resty/v2 to v2.8.0 (#5244)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-20 08:51:34 +08:00
234852ca61 fix(deps): update module github.com/pkg/sftp to v1.13.6 (#5041)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-19 20:02:42 +08:00
809105b67e fix(deps): update module github.com/blevesearch/bleve/v2 to v2.3.10 (#5232)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-17 15:57:29 +08:00
02e8c31506 fix(deps): update golang.org/x/exp digest to 9212866 (#5205)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-16 23:21:42 +08:00
19b39a5c04 fix(onedrive): overwrite upload big file (close #5217 in #5218)
See https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
2023-09-14 13:38:07 +08:00
28e2731594 fix: clear cache recursively on deleting the folder (close #5209) 2023-09-13 16:06:17 +08:00
b1a279cbcc feat(139): implement MoveResult interface (close #5130) 2023-09-13 15:56:13 +08:00
352a6a741a feat(webdav): support copy directly without task (close #5206) 2023-09-13 15:45:57 +08:00
109015567a fix(deps): update module golang.org/x/oauth2 to v0.12.0 (#5058)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-12 12:52:48 +08:00
9e0fa77ca2 feat: add 123 link driver (close #4924) 2023-09-10 16:50:10 +08:00
335b11c698 chore: implement the driver interface with obj return [skip ci] 2023-09-08 15:25:49 +08:00
8e433355e6 fix(terabox): missing JsToken field on request (close #5189) 2023-09-08 15:18:56 +08:00
3504f017b9 fix(upload): memory leak on form upload as task (close #5185) 2023-09-07 15:51:52 +08:00
cd2f8077fa chore: enable all pprof handle on debug 2023-09-07 14:56:50 +08:00
d5b68a91d2 fix(webdav): optimize HEAD request (close #5182) 2023-09-06 16:32:51 +08:00
623c7dcea5 fix(189pc): get real link after redirect 2023-09-06 16:02:28 +08:00
ecbd6d86cd fix(lanzou): sub file in share folder need pwd (#5184) 2023-09-06 14:48:12 +08:00
7200344ace feat: adapt hash feature for some drivers (#5180)
* feat(pikpak,thunder): adaptation gcid hash

* chore(weiyun): add note

* feat(baidu_netdisk): adaptation rapid

* feat(baidu_photo): adaptation hash

* feat(189pc): adaptation rapid

* feat(mopan):adaptation ctime

* feat(139):adaptation hash and ctime

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-09-06 14:46:35 +08:00
b313ac4daa fix(crypt): fix 139cloud hack (#5178)
(cherry picked from commit 18bf64af47e58cc69cdd2e598de9c19538a7bf78)
2023-09-06 14:12:01 +08:00
f2f312b43a fix: http response body not close on status >= 400 (close #5163) 2023-09-05 15:46:16 +08:00
6f6d20e1ba fix: force_https not take effect on noRoute (close #5167) 2023-09-05 13:05:46 +08:00
3231c3d930 perf(db): release database before exit 2023-09-05 13:04:27 +08:00
b604e21c69 feat(webdav): support http chunked request (close #5161 in #5162)
But we do not recommend not adding the content-length header when putting files
2023-09-05 13:03:29 +08:00
3c66db9845 ci: split release actions 2023-09-03 22:57:18 +08:00
f6ab1f7f61 perf(ftp): non use SIZE FTP command (close #5150) 2023-09-03 18:47:32 +08:00
8e40465e86 fix(aliyundrive_open): date format on uploading (#5151)
(cherry picked from commit 88f815979ac91caa8bc425a2ff9a18bbd8a2e736)
2023-09-03 18:12:05 +08:00
37dffd0fce feat(crypt): customize filename_encoding (#5148)
close #5109
close #5080
2023-09-03 18:06:44 +08:00
e7c0d94b44 fix: form upload when ticked As A Task (#5145) 2023-09-03 15:40:40 +08:00
8102142007 fix(deps): update github.com/orzogc/fake115uploader digest to 58f9eb7 (#5133)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-02 14:50:06 +08:00
7c6dec5d47 fix(deps): update module 115driver to v1.0.16 (close #5117 in #5120)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-01 14:31:47 +08:00
dd10c0c5d0 chore(aliyundrive_open): print resp content on refresh token (close #5129) 2023-08-31 18:43:25 +08:00
34fadecc2c fix(ftp): dead lock on Read (close #5128) 2023-08-31 15:10:47 +08:00
cb8867fcc1 fix(deps): update module github.com/google/uuid to v1.3.1 (#5066)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-30 19:30:41 +08:00
092ed06833 feat(uss): add AntiTheftChainToken field (#5115)
* feat(uss): add AntiTheftChainToken; fix link func

* feat(uss): optimize _upt generation
2023-08-30 15:16:26 +08:00
6308f1c35d fix: updateTime, createTime and HashInfo (#5111) 2023-08-29 13:31:24 +08:00
ce10c9f120 fix: temp file not close and incorrect WebPutAsTask 2023-08-28 18:18:02 +08:00
6c4736fc8f fix: allow no Last-Modified on upload api 2023-08-28 16:42:03 +08:00
b301b791c7 fix(local): set create and modified time for new file (close #4938) 2023-08-27 23:05:13 +08:00
19d34e2eb8 feat: receive lastModified from upload api 2023-08-27 23:03:09 +08:00
a3748af772 feat: misc improvements about upload/copy/hash (#5045)
general: add createTime/updateTime support in webdav and some drivers
general: add hash support in some drivers
general: cross-storage rapid-upload support
general: enhance upload to avoid local temp file if possible
general: replace readseekcloser with File interface to speed upstream operations
feat(aliyun_open): same as above
feat(crypt): add hack for 139cloud

Close #4934 
Close #4819 

baidu_netdisk needs to improve the upload code to support rapid-upload
2023-08-27 21:14:23 +08:00
9b765ef696 chore: remove README.md executable permission (close #5097 in #5100) 2023-08-27 14:35:03 +08:00
8f493cccc4 fix(mopan): parameter error (#5091) 2023-08-25 14:10:05 +08:00
31a033dff1 fix(lanzou): download cannot find data (#5088) 2023-08-24 21:56:20 +08:00
8c3337b88b fix(deps): update module golang.org/x/image to v0.11.0 (#5044)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-21 15:01:11 +08:00
7238243664 fix(deps): update module golang.org/x/crypto to v0.12.0 [skip ci] (#5043)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-21 14:43:59 +08:00
ba2b15ab24 fix(deps): update module golang.org/x/net to v0.14.0 [skip ci] (#5051)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-21 14:43:07 +08:00
28dc8822b7 fix(deps): update module github.com/u2takey/ffmpeg-go to v0.5.0 [skip ci] (#5042)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-20 13:10:01 +08:00
358c5055e9 fix(lanzou): download not find file sgin (close #5046 in #5048) 2023-08-20 13:08:57 +08:00
b6cd40e6d3 chore(deps): update actions-cool/issues-helper action to v3.5.2 [skip ci] (#5033)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-19 14:20:55 +08:00
7d96d8070d fix(deps): update github.com/winfsp/cgofuse digest to f87f5db [skip ci] (#4908)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-19 14:19:30 +08:00
d482fb5f26 fix(deps): update module github.com/aws/aws-sdk-go to v1.44.327 [skip ci] (#4395)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-19 14:18:08 +08:00
60402ce1fc fix(deps): update module github.com/deckarep/golang-set/v2 to v2.3.1 [skip ci] (#4925)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-19 14:15:22 +08:00
1e3950c847 fix: copy tasks using multi-thread downloader can't be canceled (#5028)
#4981 related
2023-08-19 14:06:59 +08:00
ed550594da fix(deps): update golang.org/x/exp digest to d852ddb (#4910)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-18 19:34:38 +08:00
3bbae29f93 feat(cloudreve): add custom user-agent (close #5020) 2023-08-17 19:41:21 +08:00
3b74f8cd9a fix(deps): update module github.com/sheltonzhu/115driver to v1.0.15 (#4926)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-17 15:33:46 +08:00
e9bdb91e01 fix: ignore salt on marshal model.User 2023-08-16 13:31:15 +08:00
1aa024ed6b feat: support webauthn login (#4945)
* feat: support webauthn login

* manually merge

* fix: clear user cache after updating authn

* decrease db size of Authn

* change authn type to text

* simplify code structure

---------

Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-14 22:54:38 +08:00
13e8d36e1a fix(aliyundrive_open): use RawStdEncoding for base64 2023-08-13 20:52:38 +08:00
5606c23768 perf(copy): use multi-thread downloader (close #5000) 2023-08-13 15:31:49 +08:00
0b675d6c02 chore(deps): bump github.com/libp2p/go-libp2p to 0.27.8 (#4978)
Bumps [github.com/libp2p/go-libp2p](https://github.com/libp2p/go-libp2p) from 0.26.3 to 0.27.8.
- [Release notes](https://github.com/libp2p/go-libp2p/releases)
- [Changelog](https://github.com/libp2p/go-libp2p/blob/master/CHANGELOG.md)
- [Commits](https://github.com/libp2p/go-libp2p/compare/v0.26.3...v0.27.8)

---
updated-dependencies:
- dependency-name: github.com/libp2p/go-libp2p
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-11 22:57:42 +08:00
c1db3a36ad feat: upload progress recovery (#4987)
* feat(189pc):upload progress recovery

* fix:some err

* feat(baidu_netdisk,baidu_photo):upload progress recovery

* feat(mopan):upload progress recovery

* feat(baidu_netdisk):custom upload api
2023-08-11 14:23:30 +08:00
c59dbb4f9e fix(local): files get deleted when copied to other storage (close #4983) 2023-08-10 16:42:09 +08:00
df6b306fce perf(drivers): fs operations and cache (#4965)
* perf(baidu_photo):multi-thread upload

* perf(baidu_netdisk):multi-thread upload and cache optimization

* fix:LimitWriter

* fix(weiyun):only one login is allowed

* feat(189pc):multi threaded upload

* feat(baidu_netdisk):multi threaded upload

* feat(baidu_photo):multi threaded upload

* feat(weiyun):multi threaded upload

* perf(aliyundriver_open):optimize upload code and optimize cache

* fix(weiyun):invalid directory ID

* fix(baidu_netdisk):modified time

* fix(baidu_netdisk,baidu_photo):upload slice error

* perf(baidu_netdisk):cancel unnecessary retries

* fix(limitWriter):must return a non-nil error if it returns n < len(p)

* fix(aliyundrive_open):Name and Filename only use one

* perf(mopan):multi-thread upload
2023-08-09 16:13:09 +08:00
9d45718e5f fix: model.Link marshal error (close #4971)
ignore unsupported filed of `model.Link`
2023-08-09 14:04:31 +08:00
b91ed7a78a fix(aliyundrive_open): retry refresh token if sub not match 2023-08-08 22:08:05 +08:00
95386d777b feat(aliyundrive_open): record token exchange 2023-08-08 20:38:13 +08:00
635809c376 feat(cmd): list all storages command (close #4960) 2023-08-08 16:15:45 +08:00
af6bb2a6aa docs: ignore network reason for bug report [skip ci] 2023-08-08 14:54:32 +08:00
a797494aa3 fix: missed update user's password 2023-08-07 18:51:54 +08:00
353dd7f796 ci: mark non-prerelease when upload assets 2023-08-07 16:23:36 +08:00
1c00d64952 feat: rehash password with a unique salt for each user 2023-08-07 15:46:19 +08:00
ff5cf3f4fa feat: allow use token to access WebDAV 2023-08-07 14:38:50 +08:00
5b6b2f427a feat(cmd): add show token command 2023-08-07 13:49:23 +08:00
7877184bee feat(baidu_netdisk): add retry to most operations (close #4863 in #4939) 2023-08-07 13:44:28 +08:00
e9cb37122e chore(cmd): change come output for admin command 2023-08-06 23:02:22 +08:00
a425392a2b feat(cmd): set or random new password for admin 2023-08-06 22:34:02 +08:00
75acbcc115 perf: sha256 for user's password (close #3552) 2023-08-06 22:09:17 +08:00
30415cefbe perf: delete user cache after cancel 2FA 2023-08-06 20:47:58 +08:00
1d06a0019f feat(search): paging and scope (close #4381 in #4930)
Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-06 15:13:23 +08:00
3686075a7f ci: change auto commit user [skip ci] 2023-08-05 16:32:06 +08:00
6c1c7e5cc0 fix(wopan): missing familyID on mkdir (close #4927) 2023-08-04 22:26:56 +08:00
c4f901b201 fix: undeclared identifier kIOMainPortDefault on darwin/arm64 2023-08-04 21:23:58 +08:00
4b7acb1389 feat(ci): add multiple ARM targets prebuilt (close #4243) 2023-08-04 20:57:56 +08:00
15b7169df4 perf: multi-thread downloader, Content-Disposition (#4921)
general: enhance multi-thread downloader with cancelable context, immediately stop all stream processes when canceled;
feat(crypt): improve stream closing;
general: fix the bug of downloading files becomes previewing stream on modern browsers;

Co-authored-by: Sean He <866155+seanhe26@users.noreply.github.com>
Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-04 15:29:54 +08:00
861948bcf3 revert: "ci: auto gofmt for pull request" [skip ci]
This reverts commit 8b353da0d2.
2023-08-04 13:25:23 +08:00
e5ffd39cf2 feat: add 123Pan Share driver (close #4853 in #4898)
Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-03 15:01:43 +08:00
8b353da0d2 ci: auto gofmt for pull request [skip ci] 2023-08-03 14:49:22 +08:00
49bde82426 perf(189pc): empty file upload and cache optimization (#4913)
- login captcha error
- cache optimization
- upload empty file
2023-08-03 14:08:40 +08:00
3e285aaec4 feat: add weiyun support (close #4802 in #4883)
Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-02 21:39:59 +08:00
355fc576b1 issue: add config to bug report template [skip ci] 2023-08-02 21:05:50 +08:00
a69d72aa20 feat(aliyundrive_open): support resource drive (close #4889) 2023-08-02 15:50:01 +08:00
e5d123c5d3 fix(deps): update module golang.org/x/image to v0.10.0 [skip ci] (#4902)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-02 15:38:10 +08:00
220eb33f88 fix(deps): update module golang.org/x/net to v0.13.0 [skip ci] (#4903)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-02 15:16:39 +08:00
5238850036 docs: sync README [skip ci] 2023-08-02 15:15:48 +08:00
81ac963567 fix(deps): update module github.com/ipfs/go-ipfs-api to v0.6.1 [skip ci] (#4882)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-02 15:01:25 +08:00
3c21a9a520 feat: Crypt driver, improve http/webdav handling (#4884)
this PR has several enhancements, fixes, and features:
- [x] Crypt: a transparent encryption driver. Anyone can easily, and safely store encrypted data on the remote storage provider.  Consider your data is safely stored in the safe, and the storage provider can only see the safe, but not your data.
  - [x] Optional: compatible with [Rclone Crypt](https://rclone.org/crypt/). More ways to manipulate the encrypted data.
  - [x] directory and filename encryption
  - [x] server-side encryption mode (server encrypts & decrypts all data, all data flows thru the server)
- [x] obfuscate sensitive information internally
- [x] introduced a server memory-cached multi-thread downloader.
  - [x] Driver: **Quark** enabled this feature, faster load in any single thread scenario. e.g. media player directly playing from the link, now it's faster.
- [x] general improvement on HTTP/WebDAV stream processing & header handling & response handling
  - [x] Driver: **Mega** driver support ranged http header
  - [x] Driver: **Quark** fix bug of not closing HTTP request to Quark server while user end has closed connection to alist

## Crypt, a transparent Encrypt/Decrypt Driver. (Rclone Crypt compatible)

e.g.  
Crypt mount path ->  /vault 
Crypt remote path -> /ali/encrypted
Aliyun mount paht -> /ali

when the user uploads a.jpg to /vault, the data will be encrypted and saved to /ali/encrypted/xxxxx. And when the user wants to access a.jpg,  it's automatically decrypted, and the user can do anything with it.
Since it's Rclone Crypt compatible, users can download /ali/encrypted/xxxxx  and decrypt it with rclone crypt tool. Or the user can mount this folder using rclone, then mount the decrypted folder in Linux...

NB.  Some breaking changes is made to make it follow global standard, e.g. processing the HTTP header properly.

close #4679 
close #4827 

Co-authored-by: Sean He <866155+seanhe26@users.noreply.github.com>
Co-authored-by: Andy Hsu <i@nn.ci>
2023-08-02 14:40:36 +08:00
1dc1dd1f07 feat(aliyundrive_open): support livp format file download (close #4890) 2023-08-01 21:50:25 +08:00
c9ea9bce81 feat(lanzou): support login with account (close #4880 in #4885) 2023-08-01 19:44:57 +08:00
9f08353d31 feat(baidu_photo): optional delete album origin file (close #4872 in #4875) 2023-07-31 18:29:45 +08:00
ce0c3626c2 ci: remove working label on issue closed 2023-07-31 16:54:00 +08:00
06f46206db fix(baidu_photo): album download (close #4603 in #4871)
Co-authored-by: Andy Hsu <i@nn.ci>
2023-07-31 16:27:16 +08:00
579f0c06af ci: delete file after decompression
fix: no space left on device
2023-07-30 18:25:52 +08:00
b12d92acc9 perf(baidu_netdisk): optimize memory allocate 2023-07-29 17:12:43 +08:00
e700ce15e5 fix: missed progress in upload task 2023-07-29 17:09:26 +08:00
7dbef7d559 chore(deps): update actions-cool/issues-helper action to v3.5.1 (#4855)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-28 16:16:42 +08:00
7e9cdd8b07 fix(aliyundrive_open): fail limit on concurrently call (#4851) 2023-07-28 15:55:39 +08:00
cee6bc6b5d fix(terabox): slice out of range (close #4858 in #4860) 2023-07-28 15:52:20 +08:00
cfd23c05b4 fix(139): upload empty file (close #4711) 2023-07-27 19:26:22 +08:00
0c1acd72ca fix: link cache not deleted after overwriting file (close #4852) 2023-07-27 19:07:53 +08:00
e2ca06dcca docs: update go version 2023-07-27 18:32:33 +08:00
0828fd787d chore: update placeholder of version in bug_report issue template 2023-07-27 18:31:16 +08:00
2e23ea68d4 fix(aliyundrive_open): increase limit interval (close #4851) 2023-07-27 18:26:11 +08:00
4afa822bec fix(123): Use APP-side API (close #4834 in #4856) 2023-07-27 15:51:59 +08:00
f2ca9b40db fix(qbittorrent): incorrect field type (close #4843) 2023-07-25 13:31:41 +08:00
4c2535cb22 fix(115): user-agent lost on upload (close #4831) 2023-07-23 15:18:33 +08:00
248 changed files with 12159 additions and 3390 deletions

2
.github/FUNDING.yml vendored
View File

@ -3,7 +3,7 @@
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
ko_fi: xhofe # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username

View File

@ -7,28 +7,44 @@ body:
value: |
Thanks for taking the time to fill out this bug report, please **confirm that your issue is not a duplicate issue and not because of your operation or version issues**
感谢您花时间填写此错误报告,请**务必确认您的issue不是重复的且不是因为您的操作或版本问题**
- type: checkboxes
attributes:
label: Please make sure of the following things
description: You may select more than one, even select all.
description: |
You must check all the following, otherwise your issue may be closed directly. Or you can go to the [discussions](https://github.com/alist-org/alist/discussions)
您必须勾选以下所有内容否则您的issue可能会被直接关闭。或者您可以去[讨论区](https://github.com/alist-org/alist/discussions)
options:
- label: I have read the [documentation](https://alist.nn.ci).
- label: I'm sure there are no duplicate issues or discussions.
- label: I'm sure it's due to `alist` and not something else(such as `Dependencies` or `Operational`).
- label: I'm sure I'm using the latest version
- label: |
I have read the [documentation](https://alist.nn.ci).
我已经阅读了[文档](https://alist.nn.ci)。
- label: |
I'm sure there are no duplicate issues or discussions.
我确定没有重复的issue或讨论。
- label: |
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host)`依赖`或`操作`)。
- label: |
I'm sure this issue is not fixed in the latest version.
我确定这个问题在最新版本中没有被修复。
- type: input
id: version
attributes:
label: Alist Version / Alist 版本
description: What version of our software are you running?
placeholder: v2.0.0
label: AList Version / AList 版本
description: |
What version of our software are you running? Do not use `latest` or `master` as an answer.
您使用的是哪个版本的软件?请不要使用`latest`或`master`作为答案。
placeholder: v3.xx.xx
validations:
required: true
- type: input
id: driver
attributes:
label: Driver used / 使用的存储驱动
description: What storage driver are you using?
description: |
What storage driver are you using?
您使用的是哪个存储驱动?
placeholder: "for example: Onedrive"
validations:
required: true
@ -47,6 +63,15 @@ body:
请提供能复现此问题的链接请知悉如果不提供它你的issue可能会被直接关闭。
validations:
required: true
- type: textarea
id: config
attributes:
label: Config / 配置
description: |
Please provide the configuration file of your `AList` application and take a screenshot of the relevant storage configuration. (hide privacy field)
请提供您的`AList`应用的配置文件,并截图相关存储配置。(隐藏隐私字段)
validations:
required: true
- type: textarea
id: logs
attributes:

2
.github/stale.yml vendored
View File

@ -6,6 +6,8 @@ daysUntilClose: 20
exemptLabels:
- accepted
- security
- working
- pr-welcome
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable

View File

@ -11,6 +11,10 @@ on:
- 'cmd/lang.go'
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
auto_lang:
strategy:
@ -53,8 +57,8 @@ jobs:
run: |
cd alist-web
git add .
git config --local user.email "i@nn.ci"
git config --local user.name "Andy Hsu"
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
git commit -m "chore: auto update i18n file" -a 2>/dev/null || :
cd ..

View File

@ -6,6 +6,10 @@ on:
pull_request:
branches: [ 'main' ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build:
strategy:
@ -23,6 +27,9 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
- uses: benjlevesque/short-sha@v2.2
id: short-sha
- name: Install dependencies
run: |
sudo snap install zig --classic --beta
@ -37,5 +44,5 @@ jobs:
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: alist
name: alist_${{ env.SHA }}
path: dist

View File

@ -4,6 +4,10 @@ on:
push:
branches: [ main ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
build_docker:
name: Build docker
@ -53,8 +57,8 @@ jobs:
- name: Commit
run: |
git config --local user.email "i@nn.ci"
git config --local user.name "Noah Hsu"
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
git commit --allow-empty -m "Trigger build for ${{ github.sha }}"
- name: Push commit

17
.github/workflows/issue_on_close.yml vendored Normal file
View File

@ -0,0 +1,17 @@
name: Remove working label when issue closed
on:
issues:
types: [closed]
jobs:
rm-working:
runs-on: ubuntu-latest
steps:
- name: Remove working label
uses: actions-cool/issues-helper@v3
with:
actions: 'remove-labels'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
labels: 'working,pr-welcome'

View File

@ -10,7 +10,7 @@ jobs:
if: github.event.label.name == 'question'
steps:
- name: Create comment
uses: actions-cool/issues-helper@v3.5.0
uses: actions-cool/issues-helper@v3.5.2
with:
actions: 'create-comment'
token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -41,17 +41,11 @@ jobs:
run: |
bash build.sh release
- name: Release latest
uses: irongut/EditRelease@v1.2.0
with:
token: ${{ secrets.MY_TOKEN }}
id: ${{ github.event.release.id }}
prerelease: false
- name: Upload assets
uses: softprops/action-gh-release@v1
with:
files: build/compress/*
prerelease: false
release_desktop:
needs: release
@ -68,8 +62,8 @@ jobs:
- name: Add tag
run: |
git config --local user.email "i@nn.ci"
git config --local user.name "Andy Hsu"
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
version=$(wget -qO- -t1 -T2 "https://api.github.com/repos/alist-org/alist/releases/latest" | grep "tag_name" | head -n 1 | awk -F ":" '{print $2}' | sed 's/\"//g;s/,//g;s/ //g')
git tag -a $version -m "release $version"

View File

@ -56,8 +56,8 @@ jobs:
- name: Add tag
run: |
git config --local user.email "i@nn.ci"
git config --local user.name "Andy Hsu"
git config --local user.email "bot@nn.ci"
git config --local user.name "IlaBot"
git tag -a ${{ github.ref_name }} -m "release ${{ github.ref_name }}"
- name: Push tags

View File

@ -0,0 +1,34 @@
name: release_linux_musl
on:
release:
types: [ published ]
jobs:
release_linux_musl:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.20' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl
- name: Upload assets
uses: softprops/action-gh-release@v1
with:
files: build/compress/*

View File

@ -0,0 +1,34 @@
name: release_linux_musl_arm
on:
release:
types: [ published ]
jobs:
release_linux_musl_arm:
strategy:
matrix:
platform: [ ubuntu-latest ]
go-version: [ '1.20' ]
name: Release
runs-on: ${{ matrix.platform }}
steps:
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: ${{ matrix.go-version }}
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build
run: |
bash build.sh release linux_musl_arm
- name: Upload assets
uses: softprops/action-gh-release@v1
with:
files: build/compress/*

2
.gitignore vendored
View File

@ -29,3 +29,5 @@ output/
/daemon/
/public/dist/*
/!public/dist/README.md
.VSCodeCounter

View File

@ -7,7 +7,7 @@
Prerequisites:
- [git](https://git-scm.com)
- [Go 1.19+](https://golang.org/doc/install)
- [Go 1.20+](https://golang.org/doc/install)
- [gcc](https://gcc.gnu.org/)
- [nodejs](https://nodejs.org/)

18
README.md Executable file → Normal file
View File

@ -43,7 +43,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
## Features
- [x] Multiple storage
- [x] Multiple storages
- [x] Local storage
- [x] [Aliyundrive](https://www.aliyundrive.com/)
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
@ -86,11 +86,12 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
- [x] Protected routes (password protection and authentication)
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
- [x] Cloudflare workers proxy
- [x] Cloudflare Workers proxy
- [x] File/Folder package download
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
- [x] Offline download
- [x] Copy files between two storage
- [x] Multi-thread downloading acceleration for single-thread download/stream
## Document
@ -102,7 +103,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
## Discussion
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature request only.**
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
## Sponsor
@ -111,22 +112,23 @@ https://alist.nn.ci/guide/sponsor.html
### Special sponsors
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
- [VidHub](https://okaapps.com/product/1659622164?ref=alist) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎
- [JetBrains](https://www.jetbrains.com/) - Essential tools for software developers and teams
## Contributors
Thanks goes to these wonderful people:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
[![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## License
The `AList` is open-source software licensed under the AGPL-3.0 license.
## Disclaimer
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning Golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;

View File

@ -90,6 +90,7 @@
- [x] 网页上传(可以允许访客上传),删除,新建文件夹,重命名,移动,复制
- [x] 离线下载
- [x] 跨存储复制文件
- [x] 单线程下载/串流的多线程下载加速
## 文档
@ -109,15 +110,16 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
### 特别赞助
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (国内API服务器赞助)
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
- [VidHub](https://zh.okaapps.com/product/1659622164?ref=alist) - 苹果生态下优雅的网盘视频播放器iPhoneiPadMacApple TV全平台支持。
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (国内API服务器赞助)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎
- [JetBrains](https://www.jetbrains.com/) - Essential tools for software developers and teams
## 贡献者
Thanks goes to these wonderful people:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
[![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## 许可

View File

@ -91,6 +91,7 @@
- [x] ウェブアップロード(訪問者にアップロードを許可できる), 削除, mkdir, 名前変更, 移動, コピー
- [x] オフラインダウンロード
- [x] 二つのストレージ間でファイルをコピー
- [x] シングルスレッドのダウンロード/ストリーム向けのマルチスレッド ダウンロード アクセラレーション
## ドキュメント
@ -111,15 +112,16 @@ https://alist.nn.ci/guide/sponsor.html
### スペシャルスポンサー
- [亚洲云 - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商](https://www.asiayun.com/aff/QQCOOQKZ) (sponsored Chinese API server)
- [找资源 - 阿里云盘资源搜索引擎](https://zhaoziyuan.pw/)
- [JetBrains: Essential tools for software developers and teams](https://www.jetbrains.com/)
- [VidHub](https://okaapps.com/product/1659622164?ref=alist) - An elegant cloud video player within the Apple ecosystem. Support for iPhone, iPad, Mac, and Apple TV.
- [亚洲云](https://www.asiayun.com/aff/QQCOOQKZ) - 高防服务器|服务器租用|福州高防|广东电信|香港服务器|美国服务器|海外服务器 - 国内靠谱的企业级云计算服务提供商 (sponsored Chinese API server)
- [找资源](https://zhaoziyuan.pw/) - 阿里云盘资源搜索引擎
- [JetBrains](https://www.jetbrains.com/) - Essential tools for software developers and teams
## コントリビューター
これらの素晴らしい人々に感謝します:
[![Contributors](http://contributors.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
[![Contributors](http://contrib.nn.ci/api?repo=alist-org/alist&repo=alist-org/alist-web&repo=alist-org/docs)](https://github.com/alist-org/alist/graphs/contributors)
## ライセンス

View File

@ -89,18 +89,31 @@ BuildDocker() {
}
BuildRelease() {
rm -rf .git/
mkdir -p "build"
BuildWinArm64 ./build/alist-windows-arm64.exe
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./alist-linux-amd64
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
upx -9 ./alist-windows-amd64-upx.exe
mv alist-* build
}
BuildReleaseLinuxMusl() {
rm -rf .git/
mkdir -p "build"
muslflags="--extldflags '-static -fpic' $ldflags"
BASE="https://musl.nn.ci/"
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross arm-linux-musleabihf-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
FILES=(x86_64-linux-musl-cross aarch64-linux-musl-cross mips-linux-musl-cross mips64-linux-musl-cross mips64el-linux-musl-cross mipsel-linux-musl-cross powerpc64le-linux-musl-cross s390x-linux-musl-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
curl -L -o "${i}.tgz" "${url}"
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
rm -f "${i}.tgz"
done
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-arm linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc arm-linux-musleabihf-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
OS_ARCHES=(linux-musl-amd64 linux-musl-arm64 linux-musl-mips linux-musl-mips64 linux-musl-mips64le linux-musl-mipsle linux-musl-ppc64le linux-musl-s390x)
CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc mips-linux-musl-gcc mips64-linux-musl-gcc mips64el-linux-musl-gcc mipsel-linux-musl-gcc powerpc64le-linux-musl-gcc s390x-linux-musl-gcc)
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@ -111,13 +124,39 @@ BuildRelease() {
export CGO_ENABLED=1
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
done
BuildWinArm64 ./build/alist-windows-arm64.exe
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
# why? Because some target platforms seem to have issues with upx compression
upx -9 ./alist-linux-amd64
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
upx -9 ./alist-windows-amd64-upx.exe
mv alist-* build
}
BuildReleaseLinuxMuslArm() {
rm -rf .git/
mkdir -p "build"
muslflags="--extldflags '-static -fpic' $ldflags"
BASE="https://musl.nn.ci/"
# FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armeb-linux-musleabi-cross armeb-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
FILES=(arm-linux-musleabi-cross arm-linux-musleabihf-cross armel-linux-musleabi-cross armel-linux-musleabihf-cross armv5l-linux-musleabi-cross armv5l-linux-musleabihf-cross armv6-linux-musleabi-cross armv6-linux-musleabihf-cross armv7l-linux-musleabihf-cross armv7m-linux-musleabi-cross armv7r-linux-musleabihf-cross)
for i in "${FILES[@]}"; do
url="${BASE}${i}.tgz"
curl -L -o "${i}.tgz" "${url}"
sudo tar xf "${i}.tgz" --strip-components 1 -C /usr/local
rm -f "${i}.tgz"
done
# OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armeb linux-musleabihf-armeb linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
# CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armeb-linux-musleabi-gcc armeb-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
# GOARMS=('' '' '' '' '' '' '5' '5' '6' '6' '7' '7' '7')
OS_ARCHES=(linux-musleabi-arm linux-musleabihf-arm linux-musleabi-armel linux-musleabihf-armel linux-musleabi-armv5l linux-musleabihf-armv5l linux-musleabi-armv6 linux-musleabihf-armv6 linux-musleabihf-armv7l linux-musleabi-armv7m linux-musleabihf-armv7r)
CGO_ARGS=(arm-linux-musleabi-gcc arm-linux-musleabihf-gcc armel-linux-musleabi-gcc armel-linux-musleabihf-gcc armv5l-linux-musleabi-gcc armv5l-linux-musleabihf-gcc armv6-linux-musleabi-gcc armv6-linux-musleabihf-gcc armv7l-linux-musleabihf-gcc armv7m-linux-musleabi-gcc armv7r-linux-musleabihf-gcc)
GOARMS=('' '' '' '' '5' '5' '6' '6' '7' '7' '7')
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
arm=${GOARMS[$i]}
echo building for ${os_arch}
export GOOS=linux
export GOARCH=arm
export CC=${cgo_cc}
export CGO_ENABLED=1
export GOARM=${arm}
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
done
}
MakeRelease() {
@ -139,8 +178,8 @@ MakeRelease() {
rm -f alist.exe
done
cd compress
find . -type f -print0 | xargs -0 md5sum >md5.txt
cat md5.txt
find . -type f -print0 | xargs -0 md5sum >"$1"
cat "$1"
cd ../..
}
@ -155,9 +194,15 @@ elif [ "$1" = "release" ]; then
FetchWebRelease
if [ "$2" = "docker" ]; then
BuildDocker
elif [ "$2" = "linux_musl_arm" ]; then
BuildReleaseLinuxMuslArm
MakeRelease "md5-linux-musl-arm.txt"
elif [ "$2" = "linux_musl" ]; then
BuildReleaseLinuxMusl
MakeRelease "md5-linux-musl.txt"
else
BuildRelease
MakeRelease
MakeRelease "md5.txt"
fi
else
echo -e "Parameter error"

View File

@ -4,30 +4,90 @@ Copyright © 2022 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/pkg/utils/random"
"github.com/spf13/cobra"
)
// PasswordCmd represents the password command
var PasswordCmd = &cobra.Command{
// AdminCmd represents the password command
var AdminCmd = &cobra.Command{
Use: "admin",
Aliases: []string{"password"},
Short: "Show admin user's info",
Short: "Show admin user's info and some operations about admin user's password",
Run: func(cmd *cobra.Command, args []string) {
Init()
defer Release()
admin, err := op.GetAdmin()
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
} else {
utils.Log.Infof("admin user's info: \nusername: %s\npassword: %s", admin.Username, admin.Password)
utils.Log.Infof("Admin user's username: %s", admin.Username)
utils.Log.Infof("The password can only be output at the first startup, and then stored as a hash value, which cannot be reversed")
utils.Log.Infof("You can reset the password with a random string by running [alist admin random]")
utils.Log.Infof("You can also set a new password by running [alist admin set NEW_PASSWORD]")
}
},
}
func init() {
RootCmd.AddCommand(PasswordCmd)
var RandomPasswordCmd = &cobra.Command{
Use: "random",
Short: "Reset admin user's password to a random string",
Run: func(cmd *cobra.Command, args []string) {
newPwd := random.String(8)
setAdminPassword(newPwd)
},
}
var SetPasswordCmd = &cobra.Command{
Use: "set",
Short: "Set admin user's password",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
utils.Log.Errorf("Please enter the new password")
return
}
setAdminPassword(args[0])
},
}
var ShowTokenCmd = &cobra.Command{
Use: "token",
Short: "Show admin token",
Run: func(cmd *cobra.Command, args []string) {
Init()
defer Release()
token := setting.GetStr(conf.Token)
utils.Log.Infof("Admin token: %s", token)
},
}
func setAdminPassword(pwd string) {
Init()
defer Release()
admin, err := op.GetAdmin()
if err != nil {
utils.Log.Errorf("failed get admin user: %+v", err)
return
}
admin.SetPassword(pwd)
if err := op.UpdateUser(admin); err != nil {
utils.Log.Errorf("failed update admin user: %+v", err)
return
}
utils.Log.Infof("admin user has been updated:")
utils.Log.Infof("username: %s", admin.Username)
utils.Log.Infof("password: %s", pwd)
DelAdminCacheOnline()
}
func init() {
RootCmd.AddCommand(AdminCmd)
AdminCmd.AddCommand(RandomPasswordCmd)
AdminCmd.AddCommand(SetPasswordCmd)
AdminCmd.AddCommand(ShowTokenCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

View File

@ -15,6 +15,7 @@ var Cancel2FACmd = &cobra.Command{
Short: "Delete 2FA of admin user",
Run: func(cmd *cobra.Command, args []string) {
Init()
defer Release()
admin, err := op.GetAdmin()
if err != nil {
utils.Log.Errorf("failed to get admin user: %+v", err)
@ -24,6 +25,7 @@ var Cancel2FACmd = &cobra.Command{
utils.Log.Errorf("failed to cancel 2FA: %+v", err)
} else {
utils.Log.Info("2FA canceled")
DelAdminCacheOnline()
}
}
},

View File

@ -7,6 +7,7 @@ import (
"github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/bootstrap/data"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
@ -19,6 +20,10 @@ func Init() {
bootstrap.InitIndex()
}
func Release() {
db.Close()
}
var pid = -1
var pidFile string

View File

@ -5,6 +5,8 @@ import (
"os"
"github.com/alist-org/alist/v3/cmd/flags"
_ "github.com/alist-org/alist/v3/drivers"
_ "github.com/alist-org/alist/v3/internal/offline_download"
"github.com/spf13/cobra"
)

View File

@ -2,6 +2,7 @@ package cmd
import (
"context"
"errors"
"fmt"
"net"
"net/http"
@ -13,7 +14,6 @@ import (
"time"
"github.com/alist-org/alist/v3/cmd/flags"
_ "github.com/alist-org/alist/v3/drivers"
"github.com/alist-org/alist/v3/internal/bootstrap"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/pkg/utils"
@ -35,9 +35,9 @@ the address is defined in config file`,
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
}
bootstrap.InitAria2()
bootstrap.InitQbittorrent()
bootstrap.InitOfflineDownloadTools()
bootstrap.LoadStorages()
bootstrap.InitTaskManager()
if !flags.Debug && !flags.Dev {
gin.SetMode(gin.ReleaseMode)
}
@ -51,7 +51,7 @@ the address is defined in config file`,
httpSrv = &http.Server{Addr: httpBase, Handler: r}
go func() {
err := httpSrv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start http: %s", err.Error())
}
}()
@ -62,7 +62,7 @@ the address is defined in config file`,
httpsSrv = &http.Server{Addr: httpsBase, Handler: r}
go func() {
err := httpsSrv.ListenAndServeTLS(conf.Conf.Scheme.CertFile, conf.Conf.Scheme.KeyFile)
if err != nil && err != http.ErrServerClosed {
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start https: %s", err.Error())
}
}()
@ -86,7 +86,7 @@ the address is defined in config file`,
}
}
err = unixSrv.Serve(listener)
if err != nil && err != http.ErrServerClosed {
if err != nil && !errors.Is(err, http.ErrServerClosed) {
utils.Log.Fatalf("failed to start unix: %s", err.Error())
}
}()
@ -100,7 +100,7 @@ the address is defined in config file`,
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
<-quit
utils.Log.Println("Shutdown server...")
Release()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
var wg sync.WaitGroup

View File

@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
package cmd
import (
"os"
"strconv"
"github.com/alist-org/alist/v3/internal/db"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/charmbracelet/bubbles/table"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/spf13/cobra"
)
@ -15,13 +21,17 @@ var storageCmd = &cobra.Command{
Short: "Manage storage",
}
func init() {
var mountPath string
var disable = &cobra.Command{
var disableStorageCmd = &cobra.Command{
Use: "disable",
Short: "Disable a storage",
Run: func(cmd *cobra.Command, args []string) {
if len(args) < 1 {
utils.Log.Errorf("mount path is required")
return
}
mountPath := args[0]
Init()
defer Release()
storage, err := db.GetStorageByMountPath(mountPath)
if err != nil {
utils.Log.Errorf("failed to query storage: %+v", err)
@ -36,10 +46,111 @@ func init() {
}
},
}
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
RootCmd.AddCommand(storageCmd)
storageCmd.AddCommand(disable)
var baseStyle = lipgloss.NewStyle().
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("240"))
type model struct {
table table.Model
}
func (m model) Init() tea.Cmd { return nil }
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.KeyMsg:
switch msg.String() {
case "esc":
if m.table.Focused() {
m.table.Blur()
} else {
m.table.Focus()
}
case "q", "ctrl+c":
return m, tea.Quit
//case "enter":
// return m, tea.Batch(
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
// )
}
}
m.table, cmd = m.table.Update(msg)
return m, cmd
}
func (m model) View() string {
return baseStyle.Render(m.table.View()) + "\n"
}
var storageTableHeight int
var listStorageCmd = &cobra.Command{
Use: "list",
Short: "List all storages",
Run: func(cmd *cobra.Command, args []string) {
Init()
defer Release()
storages, _, err := db.GetStorages(1, -1)
if err != nil {
utils.Log.Errorf("failed to query storages: %+v", err)
} else {
utils.Log.Infof("Found %d storages", len(storages))
columns := []table.Column{
{Title: "ID", Width: 4},
{Title: "Driver", Width: 16},
{Title: "Mount Path", Width: 30},
{Title: "Enabled", Width: 7},
}
var rows []table.Row
for i := range storages {
storage := storages[i]
enabled := "true"
if storage.Disabled {
enabled = "false"
}
rows = append(rows, table.Row{
strconv.Itoa(int(storage.ID)),
storage.Driver,
storage.MountPath,
enabled,
})
}
t := table.New(
table.WithColumns(columns),
table.WithRows(rows),
table.WithFocused(true),
table.WithHeight(storageTableHeight),
)
s := table.DefaultStyles()
s.Header = s.Header.
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(lipgloss.Color("240")).
BorderBottom(true).
Bold(false)
s.Selected = s.Selected.
Foreground(lipgloss.Color("229")).
Background(lipgloss.Color("57")).
Bold(false)
t.SetStyles(s)
m := model{t}
if _, err := tea.NewProgram(m).Run(); err != nil {
utils.Log.Errorf("failed to run program: %+v", err)
os.Exit(1)
}
}
},
}
func init() {
RootCmd.AddCommand(storageCmd)
storageCmd.AddCommand(disableStorageCmd)
storageCmd.AddCommand(listStorageCmd)
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command

52
cmd/user.go Normal file
View File

@ -0,0 +1,52 @@
package cmd
import (
"crypto/tls"
"fmt"
"time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
)
func DelAdminCacheOnline() {
admin, err := op.GetAdmin()
if err != nil {
utils.Log.Errorf("[del_admin_cache] get admin error: %+v", err)
return
}
DelUserCacheOnline(admin.Username)
}
func DelUserCacheOnline(username string) {
client := resty.New().SetTimeout(1 * time.Second).SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
token := setting.GetStr(conf.Token)
port := conf.Conf.Scheme.HttpPort
u := fmt.Sprintf("http://localhost:%d/api/admin/user/del_cache", port)
if port == -1 {
if conf.Conf.Scheme.HttpsPort == -1 {
utils.Log.Warnf("[del_user_cache] no open port")
return
}
u = fmt.Sprintf("https://localhost:%d/api/admin/user/del_cache", conf.Conf.Scheme.HttpsPort)
}
res, err := client.R().SetHeader("Authorization", token).SetQueryParam("username", username).Post(u)
if err != nil {
utils.Log.Warnf("[del_user_cache_online] failed: %+v", err)
return
}
if res.StatusCode() != 200 {
utils.Log.Warnf("[del_user_cache_online] failed: %+v", res.String())
return
}
code := utils.Json.Get(res.Body(), "code").ToInt()
msg := utils.Json.Get(res.Body(), "message").ToString()
if code != 200 {
utils.Log.Errorf("[del_user_cache_online] error: %s", msg)
return
}
utils.Log.Debugf("[del_user_cache_online] del user [%s] cache success", username)
}

View File

@ -2,19 +2,22 @@ package _115
import (
"context"
"os"
"strings"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"golang.org/x/time/rate"
)
type Pan115 struct {
model.Storage
Addition
client *driver115.Pan115Client
limiter *rate.Limiter
}
func (d *Pan115) Config() driver.Config {
@ -26,29 +29,42 @@ func (d *Pan115) GetAddition() driver.Additional {
}
func (d *Pan115) Init(ctx context.Context) error {
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return d.login()
}
func (d *Pan115) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
func (d *Pan115) Drop(ctx context.Context) error {
return nil
}
func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
files, err := d.getFiles(dir.GetID())
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
return nil, err
}
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
return src, nil
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
return &src, nil
})
}
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
downloadInfo, err := d.client.
SetUserAgent(driver115.UA115Browser).
Download(file.(driver115.File).PickCode)
// recover for upload
d.client.SetUserAgent(driver115.UA115Desktop)
DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
if err != nil {
return nil, err
}
@ -60,6 +76,9 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
return err
}
@ -67,31 +86,99 @@ func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
}
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Move(dstDir.GetID(), srcObj.GetID())
}
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Rename(srcObj.GetID(), newName)
}
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Copy(dstDir.GetID(), srcObj.GetID())
}
func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
if err := d.WaitLimit(ctx); err != nil {
return err
}
return d.client.Delete(obj.GetID())
}
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
if err := d.WaitLimit(ctx); err != nil {
return err
}
var (
fastInfo *driver115.UploadInitResp
dirID = dstDir.GetID()
)
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
return err
}
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
return driver115.ErrUploadTooLarge
}
//if digest, err = d.client.GetDigestResult(stream); err != nil {
// return err
//}
const PreHashSize int64 = 128 * utils.KB
hashSize := PreHashSize
if stream.GetSize() < PreHashSize {
hashSize = stream.GetSize()
}
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
preHash, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return err
}
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return err
}
}
fullHash = strings.ToUpper(fullHash)
// rapid-upload
// note that 115 add timeout for rapid-upload,
// and "sig invalid" err is thrown even when the hash is correct after timeout.
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
return err
}
if matched, err := fastInfo.Ok(); err != nil {
return err
} else if matched {
return nil
}
// 闪传失败,上传
if stream.GetSize() <= utils.KB { // 文件大小小于1KB改用普通模式上传
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
}
// 分片上传
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
}
var _ driver.Driver = (*Pan115)(nil)

View File

@ -9,6 +9,7 @@ type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
driver.RootID
}
@ -16,7 +17,7 @@ var config = driver.Config{
Name: "115 Cloud",
DefaultRoot: "0",
OnlyProxy: true,
OnlyLocal: true,
//OnlyLocal: true,
NoOverwriteUpload: true,
}

View File

@ -3,6 +3,20 @@ package _115
import (
"github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
var _ model.Obj = (*driver.File)(nil)
var _ model.Obj = (*FileObj)(nil)
type FileObj struct {
driver.File
}
func (f *FileObj) CreateTime() time.Time {
return f.File.CreateTime
}
func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}

View File

@ -1,25 +1,42 @@
package _115
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/orzogc/fake115uploader/cipher"
"io"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/drivers/base"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/pkg/errors"
)
var UserAgent = driver.UA115Desktop
var UserAgent = driver115.UA115Desktop
func (d *Pan115) login() error {
var err error
opts := []driver.Option{
driver.UA(UserAgent),
opts := []driver115.Option{
driver115.UA(UserAgent),
func(c *driver115.Pan115Client) {
c.Client.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify})
},
}
d.client = driver.New(opts...)
d.client.SetHttpClient(base.HttpClient)
cr := &driver.Credential{}
d.client = driver115.New(opts...)
cr := &driver115.Credential{}
if d.Addition.QRCodeToken != "" {
s := &driver.QRCodeSession{
s := &driver115.QRCodeSession{
UID: d.Addition.QRCodeToken,
}
if cr, err = d.client.QRCodeLogin(s); err != nil {
@ -38,17 +55,367 @@ func (d *Pan115) login() error {
return d.client.LoginCheck()
}
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
res := make([]driver.File, 0)
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
res := make([]FileObj, 0)
if d.PageSize <= 0 {
d.PageSize = driver.FileListLimit
d.PageSize = driver115.FileListLimit
}
files, err := d.client.ListWithLimit(fileId, d.PageSize)
if err != nil {
return nil, err
}
for _, file := range *files {
res = append(res, file)
res = append(res, FileObj{file})
}
return res, nil
}
const (
appVer = "2.0.3.6"
)
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
var (
ecdhCipher *cipher.EcdhCipher
encrypted []byte
decrypted []byte
encodedToken string
err error
target = "U_1_" + dirID
bodyBytes []byte
result = driver115.UploadInitResp{}
fileSizeStr = strconv.FormatInt(fileSize, 10)
)
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
return nil, err
}
userID := strconv.FormatInt(d.client.UserID, 10)
form := url.Values{}
form.Set("appid", "0")
form.Set("appversion", appVer)
form.Set("userid", userID)
form.Set("filename", fileName)
form.Set("filesize", fileSizeStr)
form.Set("fileid", fileID)
form.Set("target", target)
form.Set("sig", d.client.GenerateSignature(fileID, target))
signKey, signVal := "", ""
for retry := true; retry; {
t := driver115.Now()
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
return nil, err
}
params := map[string]string{
"k_ec": encodedToken,
}
form.Set("t", t.String())
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
if signKey != "" && signVal != "" {
form.Set("sign_key", signKey)
form.Set("sign_val", signVal)
}
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
return nil, err
}
req := d.client.NewRequest().
SetQueryParams(params).
SetBody(encrypted).
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
SetDoNotParseResponse(true)
resp, err := req.Post(driver115.ApiUploadInit)
if err != nil {
return nil, err
}
data := resp.RawBody()
defer data.Close()
if bodyBytes, err = io.ReadAll(data); err != nil {
return nil, err
}
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
return nil, err
}
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
return nil, err
}
if result.Status == 7 {
// Update signKey & signVal
signKey = result.SignKey
signVal, err = UploadDigestRange(stream, result.SignCheck)
if err != nil {
return nil, err
}
} else {
retry = false
}
result.SHA1 = fileID
}
return &result, nil
}
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
var start, end int64
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
return
}
length := end - start + 1
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
hashStr, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return "", err
}
result = strings.ToUpper(hashStr)
return
}
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
ossToken *driver115.UploadOSSTokenResp
err error
)
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
options := driver115.DefalutUploadMultipartOptions()
if len(opts) > 0 {
for _, f := range opts {
f(options)
}
}
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
return err
}
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
return err
}
// ossToken一小时后就会失效所以每50分钟重新获取一次
ticker := time.NewTicker(options.TokenRefreshTime)
defer ticker.Stop()
// 设置超时
timeout := time.NewTimer(options.Timeout)
if chunks, err = SplitFile(fileSize); err != nil {
return err
}
if imur, err = bucket.InitiateMultipartUpload(params.Object,
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
oss.UserAgentHeader(driver115.OSSUserAgent),
); err != nil {
return err
}
wg := sync.WaitGroup{}
wg.Add(len(chunks))
chunksCh := make(chan oss.FileChunk)
errCh := make(chan error)
UploadedPartsCh := make(chan oss.UploadPart)
quit := make(chan struct{})
// producer
go chunksProducer(chunksCh, chunks)
go func() {
wg.Wait()
quit <- struct{}{}
}()
// consumers
for i := 0; i < options.ThreadsNum; i++ {
go func(threadId int) {
defer func() {
if r := recover(); r != nil {
errCh <- fmt.Errorf("recovered in %v", r)
}
}()
for chunk := range chunksCh {
var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ {
select {
case <-ticker.C:
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
errCh <- errors.Wrap(err, "刷新token时出现错误")
}
default:
}
buf := make([]byte, chunk.Size)
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue
}
b := bytes.NewBuffer(buf)
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}
}
if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
}
UploadedPartsCh <- part
}
}(i)
}
go func() {
for part := range UploadedPartsCh {
parts = append(parts, part)
wg.Done()
}
}()
LOOP:
for {
select {
case <-ticker.C:
// 到时重新获取ossToken
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
case <-quit:
break LOOP
case <-errCh:
return err
case <-timeout.C:
return fmt.Errorf("time out")
}
}
// EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
return err
}
}
return d.checkUploadStatus(dirID, params.SHA1)
}
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
for _, chunk := range chunks {
ch <- chunk
}
}
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
// 验证上传是否成功
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
opts := []driver115.GetFileOptions{
driver115.WithOrder(driver115.FileOrderByTime),
driver115.WithShowDirEnable(false),
driver115.WithAsc(false),
driver115.WithLimit(500),
}
fResp, err := driver115.GetFiles(req, dirID, opts...)
if err != nil {
return err
}
for _, fileInfo := range fResp.Files {
if fileInfo.Sha1 == sha1 {
return nil
}
}
return driver115.ErrUploadFailed
}
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
for i := int64(1); i < 10; i++ {
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
return
}
break
}
}
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
return
}
}
// 单个分片大小不能小于100KB
if chunks[0].Size < 100*utils.KB {
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
return
}
}
return
}
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
if int64(chunkNum) > fileSize {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (fileSize / chunkN)
if i == chunkN-1 {
chunk.Size = fileSize/chunkN + fileSize%chunkN
} else {
chunk.Size = fileSize / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
var chunkN = fileSize / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if fileSize%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = fileSize % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}

112
drivers/115_share/driver.go Normal file
View File

@ -0,0 +1,112 @@
package _115_share
import (
"context"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"golang.org/x/time/rate"
)
type Pan115Share struct {
model.Storage
Addition
client *driver115.Pan115Client
limiter *rate.Limiter
}
func (d *Pan115Share) Config() driver.Config {
return config
}
func (d *Pan115Share) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Pan115Share) Init(ctx context.Context) error {
if d.LimitRate > 0 {
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
}
return d.login()
}
func (d *Pan115Share) WaitLimit(ctx context.Context) error {
if d.limiter != nil {
return d.limiter.Wait(ctx)
}
return nil
}
func (d *Pan115Share) Drop(ctx context.Context) error {
return nil
}
func (d *Pan115Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
files := make([]driver115.ShareFile, 0)
fileResp, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, dir.GetID(), driver115.QueryLimit(int(d.PageSize)))
if err != nil {
return nil, err
}
files = append(files, fileResp.Data.List...)
total := fileResp.Data.Count
count := len(fileResp.Data.List)
for total > count {
fileResp, err := d.client.GetShareSnap(
d.ShareCode, d.ReceiveCode, dir.GetID(),
driver115.QueryLimit(int(d.PageSize)), driver115.QueryOffset(count),
)
if err != nil {
return nil, err
}
files = append(files, fileResp.Data.List...)
count += len(fileResp.Data.List)
}
return utils.SliceConvert(files, transFunc)
}
func (d *Pan115Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
downloadInfo, err := d.client.DownloadByShareCode(d.ShareCode, d.ReceiveCode, file.GetID())
if err != nil {
return nil, err
}
return &model.Link{URL: downloadInfo.URL.URL}, nil
}
func (d *Pan115Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return errs.NotSupport
}
func (d *Pan115Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
return errs.NotSupport
}
func (d *Pan115Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
return errs.NotSupport
}
func (d *Pan115Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
return errs.NotSupport
}
func (d *Pan115Share) Remove(ctx context.Context, obj model.Obj) error {
return errs.NotSupport
}
func (d *Pan115Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
return errs.NotSupport
}
var _ driver.Driver = (*Pan115Share)(nil)

33
drivers/115_share/meta.go Normal file
View File

@ -0,0 +1,33 @@
package _115_share
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
PageSize int64 `json:"page_size" type:"number" default:"20" help:"list api per page size of 115 driver"`
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
ShareCode string `json:"share_code" type:"text" required:"true" help:"share code of 115 share link"`
ReceiveCode string `json:"receive_code" type:"text" required:"true" help:"receive code of 115 share link"`
driver.RootID
}
var config = driver.Config{
Name: "115 Share",
DefaultRoot: "",
// OnlyProxy: true,
// OnlyLocal: true,
CheckStatus: false,
Alert: "",
NoOverwriteUpload: true,
NoUpload: true,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan115Share{}
})
}

111
drivers/115_share/utils.go Normal file
View File

@ -0,0 +1,111 @@
package _115_share
import (
"fmt"
"strconv"
"time"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
)
var _ model.Obj = (*FileObj)(nil)
type FileObj struct {
Size int64
Sha1 string
Utm time.Time
FileName string
isDir bool
FileID string
}
func (f *FileObj) CreateTime() time.Time {
return f.Utm
}
func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}
func (f *FileObj) GetSize() int64 {
return f.Size
}
func (f *FileObj) GetName() string {
return f.FileName
}
func (f *FileObj) ModTime() time.Time {
return f.Utm
}
func (f *FileObj) IsDir() bool {
return f.isDir
}
func (f *FileObj) GetID() string {
return f.FileID
}
func (f *FileObj) GetPath() string {
return ""
}
func transFunc(sf driver115.ShareFile) (model.Obj, error) {
timeInt, err := strconv.ParseInt(sf.UpdateTime, 10, 64)
if err != nil {
return nil, err
}
var (
utm = time.Unix(timeInt, 0)
isDir = (sf.IsFile == 0)
fileID = string(sf.FileID)
)
if isDir {
fileID = string(sf.CategoryID)
}
return &FileObj{
Size: int64(sf.Size),
Sha1: sf.Sha1,
Utm: utm,
FileName: string(sf.FileName),
isDir: isDir,
FileID: fileID,
}, nil
}
var UserAgent = driver115.UA115Browser
func (d *Pan115Share) login() error {
var err error
opts := []driver115.Option{
driver115.UA(UserAgent),
}
d.client = driver115.New(opts...)
if _, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, ""); err != nil {
return errors.Wrap(err, "failed to get share snap")
}
cr := &driver115.Credential{}
if d.QRCodeToken != "" {
s := &driver115.QRCodeSession{
UID: d.QRCodeToken,
}
if cr, err = d.client.QRCodeLogin(s); err != nil {
return errors.Wrap(err, "failed to login by qrcode")
}
d.Cookie = fmt.Sprintf("UID=%s;CID=%s;SEID=%s", cr.UID, cr.CID, cr.SEID)
d.QRCodeToken = ""
} else if d.Cookie != "" {
if err = cr.FromCookie(d.Cookie); err != nil {
return errors.Wrap(err, "failed to login by cookies")
}
d.client.ImportCredential(cr)
} else {
return errors.New("missing cookie or qrcode account")
}
return d.client.LoginCheck()
}

View File

@ -6,11 +6,6 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"os"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -22,6 +17,9 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"io"
"net/http"
"net/url"
)
type Pan123 struct {
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// const DEFAULT int64 = 10485760
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
if _, err = io.Copy(h, tempFile); err != nil {
return err

View File

@ -1,6 +1,7 @@
package _123
import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url"
"path"
"strconv"
@ -21,6 +22,14 @@ type File struct {
DownloadUrl string `json:"DownloadUrl"`
}
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string {
return ""
}

View File

@ -107,7 +107,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
if err != nil {
return err
}
up(j * 100 / chunkCount)
up(float64(j) * 100 / float64(chunkCount))
}
}
// complete s3 upload

View File

@ -1,14 +1,10 @@
package _123
import (
"crypto/md5"
"errors"
"fmt"
"math/rand"
"net/http"
"net/url"
"strconv"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
@ -19,9 +15,10 @@ import (
// do others that not defined in Driver interface
const (
Api = "https://www.123pan.com/api"
AApi = "https://www.123pan.com/a/api"
BApi = "https://www.123pan.com/b/api"
MainApi = BApi
MainApi = Api
SignIn = MainApi + "/user/sign_in"
Logout = MainApi + "/user/logout"
UserInfo = MainApi + "/user/info"
@ -37,7 +34,7 @@ const (
S3Auth = MainApi + "/file/s3_upload_object/auth"
UploadCompleteV2 = MainApi + "/file/upload_complete/v2"
S3Complete = MainApi + "/file/s3_complete_multipart_upload"
AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
)
func (d *Pan123) login() error {
@ -59,9 +56,10 @@ func (d *Pan123) login() error {
SetHeaders(map[string]string{
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"platform": "web",
"app-version": "3",
"user-agent": base.UserAgent,
"user-agent": "Dart/2.19(dart:io)",
"platform": "android",
"app-version": "36",
//"user-agent": base.UserAgent,
}).
SetBody(body).Post(SignIn)
if err != nil {
@ -75,19 +73,19 @@ func (d *Pan123) login() error {
return err
}
func authKey(reqUrl string) (*string, error) {
reqURL, err := url.Parse(reqUrl)
if err != nil {
return nil, err
}
nowUnix := time.Now().Unix()
random := rand.Intn(0x989680)
p4 := fmt.Sprintf("%d|%d|%s|%s|%s|%s", nowUnix, random, reqURL.Path, "web", "3", AuthKeySalt)
authKey := fmt.Sprintf("%d-%d-%x", nowUnix, random, md5.Sum([]byte(p4)))
return &authKey, nil
}
//func authKey(reqUrl string) (*string, error) {
// reqURL, err := url.Parse(reqUrl)
// if err != nil {
// return nil, err
// }
//
// nowUnix := time.Now().Unix()
// random := rand.Intn(0x989680)
//
// p4 := fmt.Sprintf("%d|%d|%s|%s|%s|%s", nowUnix, random, reqURL.Path, "web", "3", AuthKeySalt)
// authKey := fmt.Sprintf("%d-%d-%x", nowUnix, random, md5.Sum([]byte(p4)))
// return &authKey, nil
//}
func (d *Pan123) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
@ -95,9 +93,10 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"authorization": "Bearer " + d.AccessToken,
"platform": "web",
"app-version": "3",
"user-agent": base.UserAgent,
"user-agent": "Dart/2.19(dart:io)",
"platform": "android",
"app-version": "36",
//"user-agent": base.UserAgent,
})
if callback != nil {
callback(req)
@ -105,11 +104,11 @@ func (d *Pan123) request(url string, method string, callback base.ReqCallback, r
if resp != nil {
req.SetResult(resp)
}
authKey, err := authKey(url)
if err != nil {
return nil, err
}
req.SetQueryParam("auth-key", *authKey)
//authKey, err := authKey(url)
//if err != nil {
// return nil, err
//}
//req.SetQueryParam("auth-key", *authKey)
res, err := req.Execute(method, url)
if err != nil {
return nil, err

View File

@ -0,0 +1,77 @@
package _123Link
import (
"context"
stdpath "path"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type Pan123Link struct {
model.Storage
Addition
root *Node
}
func (d *Pan123Link) Config() driver.Config {
return config
}
func (d *Pan123Link) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Pan123Link) Init(ctx context.Context) error {
node, err := BuildTree(d.OriginURLs)
if err != nil {
return err
}
node.calSize()
d.root = node
return nil
}
func (d *Pan123Link) Drop(ctx context.Context) error {
return nil
}
func (d *Pan123Link) Get(ctx context.Context, path string) (model.Obj, error) {
node := GetNodeFromRootByPath(d.root, path)
return nodeToObj(node, path)
}
func (d *Pan123Link) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
node := GetNodeFromRootByPath(d.root, dir.GetPath())
if node == nil {
return nil, errs.ObjectNotFound
}
if node.isFile() {
return nil, errs.NotFolder
}
return utils.SliceConvert(node.Children, func(node *Node) (model.Obj, error) {
return nodeToObj(node, stdpath.Join(dir.GetPath(), node.Name))
})
}
func (d *Pan123Link) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
node := GetNodeFromRootByPath(d.root, file.GetPath())
if node == nil {
return nil, errs.ObjectNotFound
}
if node.isFile() {
signUrl, err := SignURL(node.Url, d.PrivateKey, d.UID, time.Duration(d.ValidDuration)*time.Minute)
if err != nil {
return nil, err
}
return &model.Link{
URL: signUrl,
}, nil
}
return nil, errs.NotFile
}
var _ driver.Driver = (*Pan123Link)(nil)

23
drivers/123_link/meta.go Normal file
View File

@ -0,0 +1,23 @@
package _123Link
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
OriginURLs string `json:"origin_urls" type:"text" required:"true" default:"https://vip.123pan.com/29/folder/file.mp3" help:"structure:FolderName:\n [FileSize:][Modified:]Url"`
PrivateKey string `json:"private_key"`
UID uint64 `json:"uid" type:"number"`
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
}
var config = driver.Config{
Name: "123PanLink",
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123Link{}
})
}

152
drivers/123_link/parse.go Normal file
View File

@ -0,0 +1,152 @@
package _123Link
import (
"fmt"
url2 "net/url"
stdpath "path"
"strconv"
"strings"
"time"
)
// build tree from text, text structure definition:
/**
* FolderName:
* [FileSize:][Modified:]Url
*/
/**
* For example:
* folder1:
* name1:url1
* url2
* folder2:
* url3
* url4
* url5
* folder3:
* url6
* url7
* url8
*/
// if there are no name, use the last segment of url as name
func BuildTree(text string) (*Node, error) {
lines := strings.Split(text, "\n")
var root = &Node{Level: -1, Name: "root"}
stack := []*Node{root}
for _, line := range lines {
// calculate indent
indent := 0
for i := 0; i < len(line); i++ {
if line[i] != ' ' {
break
}
indent++
}
// if indent is not a multiple of 2, it is an error
if indent%2 != 0 {
return nil, fmt.Errorf("the line '%s' is not a multiple of 2", line)
}
// calculate level
level := indent / 2
line = strings.TrimSpace(line[indent:])
// if the line is empty, skip
if line == "" {
continue
}
// if level isn't greater than the level of the top of the stack
// it is not the child of the top of the stack
for level <= stack[len(stack)-1].Level {
// pop the top of the stack
stack = stack[:len(stack)-1]
}
// if the line is a folder
if isFolder(line) {
// create a new node
node := &Node{
Level: level,
Name: strings.TrimSuffix(line, ":"),
}
// add the node to the top of the stack
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
// push the node to the stack
stack = append(stack, node)
} else {
// if the line is a file
// create a new node
node, err := parseFileLine(line)
if err != nil {
return nil, err
}
node.Level = level
// add the node to the top of the stack
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
}
}
return root, nil
}
func isFolder(line string) bool {
return strings.HasSuffix(line, ":")
}
// line definition:
// [FileSize:][Modified:]Url
func parseFileLine(line string) (*Node, error) {
// if there is no url, it is an error
if !strings.Contains(line, "http://") && !strings.Contains(line, "https://") {
return nil, fmt.Errorf("invalid line: %s, because url is required for file", line)
}
index := strings.Index(line, "http://")
if index == -1 {
index = strings.Index(line, "https://")
}
url := line[index:]
info := line[:index]
node := &Node{
Url: url,
}
name := stdpath.Base(url)
unescape, err := url2.PathUnescape(name)
if err == nil {
name = unescape
}
node.Name = name
if index > 0 {
if !strings.HasSuffix(info, ":") {
return nil, fmt.Errorf("invalid line: %s, because file info must end with ':'", line)
}
info = info[:len(info)-1]
if info == "" {
return nil, fmt.Errorf("invalid line: %s, because file name can't be empty", line)
}
infoParts := strings.Split(info, ":")
size, err := strconv.ParseInt(infoParts[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid line: %s, because file size must be an integer", line)
}
node.Size = size
if len(infoParts) > 1 {
modified, err := strconv.ParseInt(infoParts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid line: %s, because file modified must be an unix timestamp", line)
}
node.Modified = modified
} else {
node.Modified = time.Now().Unix()
}
}
return node, nil
}
func splitPath(path string) []string {
if path == "/" {
return []string{"root"}
}
parts := strings.Split(path, "/")
parts[0] = "root"
return parts
}
func GetNodeFromRootByPath(root *Node, path string) *Node {
return root.getByPath(splitPath(path))
}

66
drivers/123_link/types.go Normal file
View File

@ -0,0 +1,66 @@
package _123Link
import (
"time"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
)
// Node is a node in the folder tree
type Node struct {
Url string
Name string
Level int
Modified int64
Size int64
Children []*Node
}
func (node *Node) getByPath(paths []string) *Node {
if len(paths) == 0 || node == nil {
return nil
}
if node.Name != paths[0] {
return nil
}
if len(paths) == 1 {
return node
}
for _, child := range node.Children {
tmp := child.getByPath(paths[1:])
if tmp != nil {
return tmp
}
}
return nil
}
func (node *Node) isFile() bool {
return node.Url != ""
}
func (node *Node) calSize() int64 {
if node.isFile() {
return node.Size
}
var size int64 = 0
for _, child := range node.Children {
size += child.calSize()
}
node.Size = size
return size
}
func nodeToObj(node *Node, path string) (model.Obj, error) {
if node == nil {
return nil, errs.ObjectNotFound
}
return &model.Object{
Name: node.Name,
Size: node.Size,
Modified: time.Unix(node.Modified, 0),
IsFolder: !node.isFile(),
Path: path,
}, nil
}

30
drivers/123_link/util.go Normal file
View File

@ -0,0 +1,30 @@
package _123Link
import (
"crypto/md5"
"fmt"
"math/rand"
"net/url"
"time"
)
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
if privateKey == "" {
return originURL, nil
}
var (
ts = time.Now().Add(validDuration).Unix() // 有效时间戳
rInt = rand.Int() // 随机正整数
objURL *url.URL
)
objURL, err = url.Parse(originURL)
if err != nil {
return "", err
}
authKey := fmt.Sprintf("%d-%d-%d-%x", ts, rInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
objURL.Path, ts, rInt, uid, privateKey))))
v := objURL.Query()
v.Add("auth_key", authKey)
objURL.RawQuery = v.Encode()
return objURL.String(), nil
}

149
drivers/123_share/driver.go Normal file
View File

@ -0,0 +1,149 @@
package _123Share
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
type Pan123Share struct {
model.Storage
Addition
}
func (d *Pan123Share) Config() driver.Config {
return config
}
func (d *Pan123Share) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Pan123Share) Init(ctx context.Context) error {
// TODO login / refresh token
//op.MustSaveDriverStorage(d)
return nil
}
func (d *Pan123Share) Drop(ctx context.Context) error {
return nil
}
func (d *Pan123Share) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
// TODO return the files list, required
files, err := d.getFiles(dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return src, nil
})
}
func (d *Pan123Share) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
// TODO return link of file, required
if f, ok := file.(File); ok {
//var resp DownResp
var headers map[string]string
if !utils.IsLocalIPAddr(args.IP) {
headers = map[string]string{
//"X-Real-IP": "1.1.1.1",
"X-Forwarded-For": args.IP,
}
}
data := base.Json{
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
"etag": f.Etag,
"fileId": f.FileId,
"s3keyFlag": f.S3KeyFlag,
"size": f.Size,
}
resp, err := d.request(DownloadInfo, http.MethodPost, func(req *resty.Request) {
req.SetBody(data).SetHeaders(headers)
}, nil)
if err != nil {
return nil, err
}
downloadUrl := utils.Json.Get(resp, "data", "DownloadURL").ToString()
u, err := url.Parse(downloadUrl)
if err != nil {
return nil, err
}
nu := u.Query().Get("params")
if nu != "" {
du, _ := base64.StdEncoding.DecodeString(nu)
u, err = url.Parse(string(du))
if err != nil {
return nil, err
}
}
u_ := u.String()
log.Debug("download url: ", u_)
res, err := base.NoRedirectClient.R().SetHeader("Referer", "https://www.123pan.com/").Get(u_)
if err != nil {
return nil, err
}
log.Debug(res.String())
link := model.Link{
URL: u_,
}
log.Debugln("res code: ", res.StatusCode())
if res.StatusCode() == 302 {
link.URL = res.Header().Get("location")
} else if res.StatusCode() < 300 {
link.URL = utils.Json.Get(res.Body(), "data", "redirect_url").ToString()
}
link.Header = http.Header{
"Referer": []string{"https://www.123pan.com/"},
}
return &link, nil
}
return nil, fmt.Errorf("can't convert obj")
}
func (d *Pan123Share) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
// TODO create folder, optional
return errs.NotSupport
}
func (d *Pan123Share) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
// TODO move obj, optional
return errs.NotSupport
}
func (d *Pan123Share) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
// TODO rename obj, optional
return errs.NotSupport
}
func (d *Pan123Share) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// TODO copy obj, optional
return errs.NotSupport
}
func (d *Pan123Share) Remove(ctx context.Context, obj model.Obj) error {
// TODO remove obj, optional
return errs.NotSupport
}
func (d *Pan123Share) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
// TODO upload file, optional
return errs.NotSupport
}
//func (d *Pan123Share) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Pan123Share)(nil)

34
drivers/123_share/meta.go Normal file
View File

@ -0,0 +1,34 @@
package _123Share
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
ShareKey string `json:"sharekey" required:"true"`
SharePwd string `json:"sharepassword" required:"true"`
driver.RootID
OrderBy string `json:"order_by" type:"select" options:"file_name,size,update_at" default:"file_name"`
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
}
var config = driver.Config{
Name: "123PanShare",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Pan123Share{}
})
}

View File

@ -0,0 +1,99 @@
package _123Share
import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type File struct {
FileName string `json:"FileName"`
Size int64 `json:"Size"`
UpdateAt time.Time `json:"UpdateAt"`
FileId int64 `json:"FileId"`
Type int `json:"Type"`
Etag string `json:"Etag"`
S3KeyFlag string `json:"S3KeyFlag"`
DownloadUrl string `json:"DownloadUrl"`
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string {
return ""
}
func (f File) GetSize() int64 {
return f.Size
}
func (f File) GetName() string {
return f.FileName
}
func (f File) ModTime() time.Time {
return f.UpdateAt
}
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) IsDir() bool {
return f.Type == 1
}
func (f File) GetID() string {
return strconv.FormatInt(f.FileId, 10)
}
func (f File) Thumb() string {
if f.DownloadUrl == "" {
return ""
}
du, err := url.Parse(f.DownloadUrl)
if err != nil {
return ""
}
du.Path = strings.TrimSuffix(du.Path, "_24_24") + "_70_70"
query := du.Query()
query.Set("w", "70")
query.Set("h", "70")
if !query.Has("type") {
query.Set("type", strings.TrimPrefix(path.Base(f.FileName), "."))
}
if !query.Has("trade_key") {
query.Set("trade_key", "123pan-thumbnail")
}
du.RawQuery = query.Encode()
return du.String()
}
var _ model.Obj = (*File)(nil)
var _ model.Thumb = (*File)(nil)
//func (f File) Thumb() string {
//
//}
//var _ model.Thumb = (*File)(nil)
type Files struct {
//BaseResp
Data struct {
InfoList []File `json:"InfoList"`
Next string `json:"Next"`
} `json:"data"`
}
//type DownResp struct {
// //BaseResp
// Data struct {
// DownloadUrl string `json:"DownloadUrl"`
// } `json:"data"`
//}

81
drivers/123_share/util.go Normal file
View File

@ -0,0 +1,81 @@
package _123Share
import (
"errors"
"net/http"
"strconv"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
jsoniter "github.com/json-iterator/go"
)
const (
Api = "https://www.123pan.com/api"
AApi = "https://www.123pan.com/a/api"
BApi = "https://www.123pan.com/b/api"
MainApi = Api
FileList = MainApi + "/share/get"
DownloadInfo = MainApi + "/share/download/info"
//AuthKeySalt = "8-8D$sL8gPjom7bk#cY"
)
func (d *Pan123Share) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"origin": "https://www.123pan.com",
"referer": "https://www.123pan.com/",
"user-agent": "Dart/2.19(dart:io)",
"platform": "android",
"app-version": "36",
})
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
res, err := req.Execute(method, url)
if err != nil {
return nil, err
}
body := res.Body()
code := utils.Json.Get(body, "code").ToInt()
if code != 0 {
return nil, errors.New(jsoniter.Get(body, "message").ToString())
}
return body, nil
}
func (d *Pan123Share) getFiles(parentId string) ([]File, error) {
page := 1
res := make([]File, 0)
for {
var resp Files
query := map[string]string{
"limit": "100",
"next": "0",
"orderBy": d.OrderBy,
"orderDirection": d.OrderDirection,
"parentFileId": parentId,
"Page": strconv.Itoa(page),
"shareKey": d.ShareKey,
"SharePwd": d.SharePwd,
}
_, err := d.request(FileList, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return nil, err
}
page++
res = append(res, resp.Data.InfoList...)
if len(resp.Data.InfoList) == 0 || resp.Data.Next == "-1" {
break
}
}
return res, nil
}
// do others that not defined in Driver interface

View File

@ -103,9 +103,9 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
return err
}
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.isFamily() {
return errs.NotImplement
return nil, errs.NotImplement
}
var contentInfoList []string
var catalogInfoList []string
@ -131,7 +131,10 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
_, err := d.post(pathname, data, nil)
return err
if err != nil {
return nil, err
}
return srcObj, nil
}
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
@ -300,6 +303,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
var partSize = getPartSize(stream.GetSize())
part := (stream.GetSize() + partSize - 1) / partSize
if part == 0 {
part = 1
}
for i := int64(0); i < part; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
@ -331,13 +337,11 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
if err != nil {
return err
}
_ = res.Body.Close()
log.Debugf("%+v", res)
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
}
res.Body.Close()
}
return nil

View File

@ -10,7 +10,7 @@ type Catalog struct {
CatalogID string `json:"catalogID"`
CatalogName string `json:"catalogName"`
//CatalogType int `json:"catalogType"`
//CreateTime string `json:"createTime"`
CreateTime string `json:"createTime"`
UpdateTime string `json:"updateTime"`
//IsShared bool `json:"isShared"`
//CatalogLevel int `json:"catalogLevel"`
@ -63,7 +63,7 @@ type Content struct {
//ParentCatalogID string `json:"parentCatalogId"`
//Channel string `json:"channel"`
//GeoLocFlag string `json:"geoLocFlag"`
//Digest string `json:"digest"`
Digest string `json:"digest"`
//Version string `json:"version"`
//FileEtag string `json:"fileEtag"`
//FileVersion string `json:"fileVersion"`
@ -141,7 +141,7 @@ type CloudContent struct {
//ContentSuffix string `json:"contentSuffix"`
ContentSize int64 `json:"contentSize"`
//ContentDesc string `json:"contentDesc"`
//CreateTime string `json:"createTime"`
CreateTime string `json:"createTime"`
//Shottime interface{} `json:"shottime"`
LastUpdateTime string `json:"lastUpdateTime"`
ThumbnailURL string `json:"thumbnailURL"`
@ -165,7 +165,7 @@ type CloudCatalog struct {
CatalogID string `json:"catalogID"`
CatalogName string `json:"catalogName"`
//CloudID string `json:"cloudID"`
//CreateTime string `json:"createTime"`
CreateTime string `json:"createTime"`
LastUpdateTime string `json:"lastUpdateTime"`
//Creator string `json:"creator"`
//CreatorNickname string `json:"creatorNickname"`

View File

@ -48,7 +48,7 @@ func calSign(body, ts, randStr string) string {
}
func getTime(t string) time.Time {
stamp, _ := time.ParseInLocation("20060102150405", t, time.Local)
stamp, _ := time.ParseInLocation("20060102150405", t, utils.CNLoc)
return stamp
}
@ -139,6 +139,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
Name: catalog.CatalogName,
Size: 0,
Modified: getTime(catalog.UpdateTime),
Ctime: getTime(catalog.CreateTime),
IsFolder: true,
}
files = append(files, &f)
@ -150,6 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
Name: content.ContentName,
Size: content.ContentSize,
Modified: getTime(content.UpdateTime),
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,
@ -202,6 +204,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Size: 0,
IsFolder: true,
Modified: getTime(catalog.LastUpdateTime),
Ctime: getTime(catalog.CreateTime),
}
files = append(files, &f)
}
@ -212,6 +215,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
Name: content.ContentName,
Size: content.ContentSize,
Modified: getTime(content.LastUpdateTime),
Ctime: getTime(content.CreateTime),
},
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
//Thumbnail: content.BigthumbnailURL,

View File

@ -380,7 +380,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
if err != nil {
return err
}
up(int(i * 100 / count))
up(float64(i) * 100 / float64(count))
}
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
sliceMd5 := fileMd5

View File

@ -3,10 +3,13 @@ package _189pc
import (
"context"
"net/http"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
@ -22,10 +25,17 @@ type Cloud189PC struct {
loginParam *LoginParam
tokenInfo *AppSessionResp
uploadThread int
storageConfig driver.Config
}
func (y *Cloud189PC) Config() driver.Config {
return config
if y.storageConfig.Name == "" {
y.storageConfig = config
}
return y.storageConfig
}
func (y *Cloud189PC) GetAddition() driver.Additional {
@ -33,6 +43,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
}
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
// 兼容旧上传接口
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
// 处理个人云和家庭云参数
if y.isFamily() && y.RootFolderID == "-11" {
y.RootFolderID = ""
@ -42,6 +55,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
y.FamilyID = ""
}
// 限制上传线程数
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
if y.uploadThread < 1 || y.uploadThread > 32 {
y.uploadThread, y.UploadThread = 3, "3"
}
// 初始化请求客户端
if y.client == nil {
y.client = base.NewRestyClient().SetHeaders(map[string]string{
@ -107,10 +126,11 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
// 重定向获取真实链接
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&amp;", "&"), "http://", "https://", 1)
res, err := base.NoRedirectClient.R().SetContext(ctx).Get(downloadUrl.URL)
res, err := base.NoRedirectClient.R().SetContext(ctx).SetDoNotParseResponse(true).Get(downloadUrl.URL)
if err != nil {
return nil, err
}
defer res.RawBody().Close()
if res.StatusCode() == 302 {
downloadUrl.URL = res.Header().Get("location")
}
@ -135,13 +155,14 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
return like, nil
}
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
fullUrl := API_URL
if y.isFamily() {
fullUrl += "/family/file"
}
fullUrl += "/createFolder.action"
var newFolder Cloud189Folder
_, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
req.SetQueryParams(map[string]string{
@ -158,11 +179,15 @@ func (y *Cloud189PC) MakeDir(ctx context.Context, parentDir model.Obj, dirName s
"parentFolderId": parentDir.GetID(),
})
}
}, nil)
return err
}, &newFolder)
if err != nil {
return nil, err
}
return &newFolder, nil
}
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
@ -182,11 +207,17 @@ func (y *Cloud189PC) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
"familyId": y.FamilyID,
})
}
}, nil)
return err
}, &resp)
if err != nil {
return nil, err
}
if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil {
return nil, err
}
return srcObj, nil
}
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
queryParam := make(map[string]string)
fullUrl := API_URL
method := http.MethodPost
@ -195,23 +226,34 @@ func (y *Cloud189PC) Rename(ctx context.Context, srcObj model.Obj, newName strin
method = http.MethodGet
queryParam["familyId"] = y.FamilyID
}
if srcObj.IsDir() {
fullUrl += "/renameFolder.action"
queryParam["folderId"] = srcObj.GetID()
queryParam["destFolderName"] = newName
} else {
var newObj model.Obj
switch f := srcObj.(type) {
case *Cloud189File:
fullUrl += "/renameFile.action"
queryParam["fileId"] = srcObj.GetID()
queryParam["destFileName"] = newName
newObj = &Cloud189File{Icon: f.Icon} // 复用预览
case *Cloud189Folder:
fullUrl += "/renameFolder.action"
queryParam["folderId"] = srcObj.GetID()
queryParam["destFolderName"] = newName
newObj = &Cloud189Folder{}
default:
return nil, errs.NotSupport
}
_, err := y.request(fullUrl, method, func(req *resty.Request) {
req.SetContext(ctx)
req.SetQueryParams(queryParam)
}, nil, nil)
return err
req.SetContext(ctx).SetQueryParams(queryParam)
}, nil, newObj)
if err != nil {
return nil, err
}
return newObj, nil
}
func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
@ -232,11 +274,15 @@ func (y *Cloud189PC) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
"familyId": y.FamilyID,
})
}
}, nil)
}, &resp)
if err != nil {
return err
}
return y.WaitBatchTask("COPY", resp.TaskID, time.Second)
}
func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
var resp CreateBatchTaskResp
_, err := y.post(API_URL+"/batch/createBatchTask.action", func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
@ -256,19 +302,33 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
"familyId": y.FamilyID,
})
}
}, nil)
}, &resp)
if err != nil {
return err
}
// 批量任务数量限制,过快会导致无法删除
return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200)
}
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 响应时间长,按需启用
if y.Addition.RapidUpload {
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
return newObj, nil
}
}
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
switch y.UploadMethod {
case "stream":
return y.CommonUpload(ctx, dstDir, stream, up)
case "old":
return y.OldUpload(ctx, dstDir, stream, up)
case "rapid":
return y.FastUpload(ctx, dstDir, stream, up)
case "stream":
if stream.GetSize() == 0 {
return y.FastUpload(ctx, dstDir, stream, up)
}
fallthrough
default:
return y.CommonUpload(ctx, dstDir, stream, up)
return y.StreamUpload(ctx, dstDir, stream, up)
}
}

View File

@ -10,6 +10,7 @@ import (
"crypto/x509"
"encoding/hex"
"encoding/pem"
"encoding/xml"
"fmt"
"math"
"net/http"
@ -83,6 +84,55 @@ func MustParseTime(str string) *time.Time {
return &lastOpTime
}
type Time time.Time
func (t *Time) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
func (t *Time) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
b, err := e.Token()
if err != nil {
return err
}
if b, ok := b.(xml.CharData); ok {
if err = t.Unmarshal(b); err != nil {
return err
}
}
return e.Skip()
}
func (t *Time) Unmarshal(b []byte) error {
bs := strings.Trim(string(b), "\"")
var v time.Time
var err error
for _, f := range []string{"2006-01-02 15:04:05 -07", "Jan 2, 2006 15:04:05 PM -07"} {
v, err = time.ParseInLocation(f, bs+" +08", time.Local)
if err == nil {
break
}
}
*t = Time(v)
return err
}
type String string
func (t *String) UnmarshalJSON(b []byte) error { return t.Unmarshal(b) }
func (t *String) UnmarshalXML(e *xml.Decoder, ee xml.StartElement) error {
b, err := e.Token()
if err != nil {
return err
}
if b, ok := b.(xml.CharData); ok {
if err = t.Unmarshal(b); err != nil {
return err
}
}
return e.Skip()
}
func (s *String) Unmarshal(b []byte) error {
*s = String(bytes.Trim(b, "\""))
return nil
}
func toFamilyOrderBy(o string) string {
switch o {
case "filename":
@ -110,9 +160,8 @@ func toDesc(o string) string {
func ParseHttpHeader(str string) map[string]string {
header := make(map[string]string)
for _, value := range strings.Split(str, "&") {
i := strings.Index(value, "=")
if i > 0 {
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
if k, v, found := strings.Cut(value, "="); found {
header[k] = v
}
}
return header
@ -122,10 +171,6 @@ func MustString(str string, err error) string {
return str
}
func MustToBytes(b []byte, err error) []byte {
return b
}
func BoolToNumber(b bool) int {
if b {
return 1

View File

@ -15,6 +15,8 @@ type Addition struct {
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
FamilyID string `json:"family_id"`
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
RapidUpload bool `json:"rapid_upload"`
NoUseOcr bool `json:"no_use_ocr"`
}

View File

@ -3,6 +3,7 @@ package _189pc
import (
"encoding/xml"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"sort"
"strings"
"time"
@ -151,8 +152,13 @@ type FamilyInfoResp struct {
/*文件部分*/
// 文件
type Cloud189File struct {
CreateDate string `json:"createDate"`
FileCata int64 `json:"fileCata"`
ID String `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
Md5 string `json:"md5"`
LastOpTime Time `json:"lastOpTime"`
CreateDate Time `json:"createDate"`
Icon struct {
//iconOption 5
SmallUrl string `json:"smallUrl"`
@ -162,61 +168,59 @@ type Cloud189File struct {
Max600 string `json:"max600"`
MediumURL string `json:"mediumUrl"`
} `json:"icon"`
ID int64 `json:"id"`
LastOpTime string `json:"lastOpTime"`
Md5 string `json:"md5"`
MediaType int `json:"mediaType"`
Name string `json:"name"`
Orientation int64 `json:"orientation"`
Rev string `json:"rev"`
Size int64 `json:"size"`
StarLabel int64 `json:"starLabel"`
parseTime *time.Time
// Orientation int64 `json:"orientation"`
// FileCata int64 `json:"fileCata"`
// MediaType int `json:"mediaType"`
// Rev string `json:"rev"`
// StarLabel int64 `json:"starLabel"`
}
func (c *Cloud189File) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
func (c *Cloud189File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
}
func (c *Cloud189File) GetSize() int64 { return c.Size }
func (c *Cloud189File) GetName() string { return c.Name }
func (c *Cloud189File) ModTime() time.Time {
if c.parseTime == nil {
c.parseTime = MustParseTime(c.LastOpTime)
}
return *c.parseTime
}
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
func (c *Cloud189File) IsDir() bool { return false }
func (c *Cloud189File) GetID() string { return fmt.Sprint(c.ID) }
func (c *Cloud189File) GetID() string { return string(c.ID) }
func (c *Cloud189File) GetPath() string { return "" }
func (c *Cloud189File) Thumb() string { return c.Icon.SmallUrl }
// 文件夹
type Cloud189Folder struct {
ID int64 `json:"id"`
ID String `json:"id"`
ParentID int64 `json:"parentId"`
Name string `json:"name"`
FileCata int64 `json:"fileCata"`
FileCount int64 `json:"fileCount"`
LastOpTime Time `json:"lastOpTime"`
CreateDate Time `json:"createDate"`
LastOpTime string `json:"lastOpTime"`
CreateDate string `json:"createDate"`
// FileListSize int64 `json:"fileListSize"`
// FileCount int64 `json:"fileCount"`
// FileCata int64 `json:"fileCata"`
// Rev string `json:"rev"`
// StarLabel int64 `json:"starLabel"`
}
FileListSize int64 `json:"fileListSize"`
Rev string `json:"rev"`
StarLabel int64 `json:"starLabel"`
func (c *Cloud189Folder) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
parseTime *time.Time
func (c *Cloud189Folder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (c *Cloud189Folder) GetSize() int64 { return 0 }
func (c *Cloud189Folder) GetName() string { return c.Name }
func (c *Cloud189Folder) ModTime() time.Time {
if c.parseTime == nil {
c.parseTime = MustParseTime(c.LastOpTime)
}
return *c.parseTime
}
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
func (c *Cloud189Folder) IsDir() bool { return true }
func (c *Cloud189Folder) GetID() string { return fmt.Sprint(c.ID) }
func (c *Cloud189Folder) GetID() string { return string(c.ID) }
func (c *Cloud189Folder) GetPath() string { return "" }
type Cloud189FilesResp struct {
@ -253,13 +257,24 @@ type InitMultiUploadResp struct {
}
type UploadUrlsResp struct {
Code string `json:"code"`
UploadUrls map[string]Part `json:"uploadUrls"`
Data map[string]UploadUrlsData `json:"uploadUrls"`
}
type Part struct {
type UploadUrlsData struct {
RequestURL string `json:"requestURL"`
RequestHeader string `json:"requestHeader"`
}
type UploadUrlInfo struct {
PartNumber int
Headers map[string]string
UploadUrlsData
}
type UploadProgress struct {
UploadInfo InitMultiUploadResp
UploadParts []string
}
/* 第二种上传方式 */
type CreateUploadFileResp struct {
// 上传文件请求ID
@ -284,15 +299,60 @@ func (r *GetUploadFileStatusResp) GetSize() int64 {
return r.DataSize + r.Size
}
type CommitUploadFileResp struct {
type CommitMultiUploadFileResp struct {
File struct {
UserFileID String `json:"userFileId"`
FileName string `json:"fileName"`
FileSize int64 `json:"fileSize"`
FileMd5 string `json:"fileMd5"`
CreateDate Time `json:"createDate"`
} `json:"file"`
}
func (f *CommitMultiUploadFileResp) toFile() *Cloud189File {
return &Cloud189File{
ID: f.File.UserFileID,
Name: f.File.FileName,
Size: f.File.FileSize,
Md5: f.File.FileMd5,
LastOpTime: f.File.CreateDate,
CreateDate: f.File.CreateDate,
}
}
type OldCommitUploadFileResp struct {
XMLName xml.Name `xml:"file"`
Id string `xml:"id"`
ID String `xml:"id"`
Name string `xml:"name"`
Size string `xml:"size"`
Size int64 `xml:"size"`
Md5 string `xml:"md5"`
CreateDate string `xml:"createDate"`
Rev string `xml:"rev"`
UserId string `xml:"userId"`
CreateDate Time `xml:"createDate"`
}
func (f *OldCommitUploadFileResp) toFile() *Cloud189File {
return &Cloud189File{
ID: f.ID,
Name: f.Name,
Size: f.Size,
Md5: f.Md5,
CreateDate: f.CreateDate,
LastOpTime: f.CreateDate,
}
}
type CreateBatchTaskResp struct {
TaskID string `json:"taskId"`
}
type BatchTaskStateResp struct {
FailedCount int `json:"failedCount"`
Process int `json:"process"`
SkipCount int `json:"skipCount"`
SubTaskCount int `json:"subTaskCount"`
SuccessedCount int `json:"successedCount"`
SuccessedFileIDList []int64 `json:"successedFileIdList"`
TaskID string `json:"taskId"`
TaskStatus int `json:"taskStatus"` //1 初始化 2 存在冲突 3 执行中4 完成
}
/* query 加密参数*/

View File

@ -13,8 +13,9 @@ import (
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"regexp"
"sort"
"strconv"
"strings"
"time"
@ -24,6 +25,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
@ -268,7 +270,7 @@ func (y *Cloud189PC) login() (err error) {
"validateCode": y.VCode,
"captchaToken": param.CaptchaToken,
"returnUrl": RETURN_URL,
"mailSuffix": "@189.cn",
// "mailSuffix": "@189.cn",
"dynamicCheck": "FALSE",
"clientType": CLIENT_TYPE,
"cb_SaveName": "1",
@ -434,15 +436,20 @@ func (y *Cloud189PC) refreshSession() (err error) {
}
// 普通上传
func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
var DEFAULT = partSize(file.GetSize())
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
// 无法上传大小为0的文件
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
lastPartSize := file.GetSize() % sliceSize
if file.GetSize() > 0 && lastPartSize == 0 {
lastPartSize = sliceSize
}
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"sliceSize": fmt.Sprint(DEFAULT),
"sliceSize": fmt.Sprint(sliceSize),
"lazyCheck": "1",
}
@ -457,72 +464,71 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
// 初始化上传
var initMultiUpload InitMultiUploadResp
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
_, err := y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, params, &initMultiUpload)
if err != nil {
return err
return nil, err
}
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
fileMd5 := md5.New()
silceMd5 := md5.New()
silceMd5Hexs := make([]string, 0, count)
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
if utils.IsCanceled(upCtx) {
break
}
byteData := make([]byte, sliceSize)
if i == count {
byteData = byteData[:lastPartSize]
}
// 读取块
byteData.Reset()
silceMd5.Reset()
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
return err
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
return nil, err
}
// 计算块md5并进行hex和base64编码
md5Bytes := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
// 获取上传链接
var uploadUrl UploadUrlsResp
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
func(req *resty.Request) {
req.SetContext(ctx)
}, Params{
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
"uploadFileId": initMultiUpload.Data.UploadFileID,
}, &uploadUrl)
threadG.Go(func(ctx context.Context) error {
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {
return err
}
// 开始上传
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
err = retry.Do(func() error {
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
return err
},
retry.Context(ctx),
retry.Attempts(3),
retry.Delay(time.Second),
retry.MaxDelay(5*time.Second))
// step.4 上传切片
uploadUrl := uploadUrls[0]
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
if err != nil {
return err
}
up(int(i * 100 / count))
up(float64(threadG.Success()) * 100 / float64(count))
return nil
})
}
if err = threadG.Wait(); err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
sliceMd5Hex := fileMd5Hex
if file.GetSize() > DEFAULT {
if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
}
// 提交上传
var resp CommitMultiUploadFileResp
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
func(req *resty.Request) {
req.SetContext(ctx)
@ -533,199 +539,240 @@ func (y *Cloud189PC) CommonUpload(ctx context.Context, dstDir model.Obj, file mo
"lazyCheck": "1",
"isLog": "0",
"opertype": "3",
}, nil)
return err
}, &resp)
if err != nil {
return nil, err
}
return resp.toFile(), nil
}
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
fileMd5 := stream.GetHash().GetHash(utils.MD5)
if len(fileMd5) < utils.MD5.Width {
return nil, errors.New("invalid hash")
}
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
if err != nil {
return nil, err
}
if uploadInfo.FileDataExists != 1 {
return nil, errors.New("rapid upload fail")
}
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
}
// 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return err
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var DEFAULT = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
var sliceSize = partSize(file.GetSize())
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
lastSliceSize := file.GetSize() % sliceSize
if file.GetSize() > 0 && lastSliceSize == 0 {
lastSliceSize = sliceSize
}
// 优先计算所需信息
//step.1 优先计算所需信息
byteSize := sliceSize
fileMd5 := md5.New()
silceMd5 := md5.New()
silceMd5Hexs := make([]string, 0, count)
silceMd5Base64s := make([]string, 0, count)
partInfos := make([]string, 0, count)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
return nil, ctx.Err()
}
if i == count {
byteSize = lastSliceSize
}
silceMd5.Reset()
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
return nil, err
}
md5Byte := silceMd5.Sum(nil)
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
}
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
return err
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
sliceMd5Hex := fileMd5Hex
if file.GetSize() > DEFAULT {
if file.GetSize() > sliceSize {
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
}
// 检测是否支持快传
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"fileMd5": fileMd5Hex,
"sliceSize": fmt.Sprint(DEFAULT),
"sliceMd5": sliceMd5Hex,
}
fullUrl := UPLOAD_URL
if y.isFamily() {
params.Set("familyId", y.FamilyID)
fullUrl += "/family"
} else {
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
fullUrl += "/person"
}
// 尝试恢复进度
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
if !ok {
//step.2 预上传
params := Params{
"parentFolderId": dstDir.GetID(),
"fileName": url.QueryEscape(file.GetName()),
"fileSize": fmt.Sprint(file.GetSize()),
"fileMd5": fileMd5Hex,
"sliceSize": fmt.Sprint(sliceSize),
"sliceMd5": sliceMd5Hex,
}
if y.isFamily() {
params.Set("familyId", y.FamilyID)
}
var uploadInfo InitMultiUploadResp
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
req.SetContext(ctx)
}, params, &uploadInfo)
if err != nil {
return err
return nil, err
}
uploadProgress = &UploadProgress{
UploadInfo: uploadInfo,
UploadParts: partInfos,
}
}
uploadInfo := uploadProgress.UploadInfo.Data
// 网盘中不存在该文件,开始上传
if uploadInfo.Data.FileDataExists != 1 {
var uploadUrls UploadUrlsResp
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
func(req *resty.Request) {
req.SetContext(ctx)
}, Params{
"uploadFileId": uploadInfo.Data.UploadFileID,
"partInfo": strings.Join(silceMd5Base64s, ","),
}, &uploadUrls)
if err != nil {
return err
}
buf := make([]byte, DEFAULT)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
n, err := io.ReadFull(tempFile, buf)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
return err
}
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
err = retry.Do(func() error {
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
return err
},
retry.Context(ctx),
if uploadInfo.FileDataExists != 1 {
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.MaxDelay(5*time.Second))
retry.DelayType(retry.BackOffDelay))
for i, uploadPart := range uploadProgress.UploadParts {
if utils.IsCanceled(upCtx) {
break
}
i, uploadPart := i, uploadPart
threadG.Go(func(ctx context.Context) error {
// step.3 获取上传链接
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
if err != nil {
return err
}
uploadUrl := uploadUrls[0]
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
if uploadUrl.PartNumber == count {
byteSize = lastSliceSize
}
// step.4 上传切片
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
if err != nil {
return err
}
up(int(i * 100 / count))
up(float64(threadG.Success()) * 100 / float64(len(uploadUrls)))
uploadProgress.UploadParts[i] = ""
return nil
})
}
if err = threadG.Wait(); err != nil {
if errors.Is(err, context.Canceled) {
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
}
return nil, err
}
}
// 提交
// step.5 提交
var resp CommitMultiUploadFileResp
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
func(req *resty.Request) {
req.SetContext(ctx)
}, Params{
"uploadFileId": uploadInfo.Data.UploadFileID,
"uploadFileId": uploadInfo.UploadFileID,
"isLog": "0",
"opertype": "3",
}, nil)
return err
}
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (err error) {
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(file.GetReadCloser())
}, &resp)
if err != nil {
return err
return nil, err
}
return resp.toFile(), nil
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
// 计算md5
fileMd5 := md5.New()
if _, err := io.Copy(fileMd5, tempFile); err != nil {
return err
// 获取上传切片信息
// 对http body有大小限制分片信息太多会出错
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
fullUrl := UPLOAD_URL
if y.isFamily() {
fullUrl += "/family"
} else {
fullUrl += "/person"
}
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
return err
var uploadUrlsResp UploadUrlsResp
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
func(req *resty.Request) {
req.SetContext(ctx)
}, Params{
"uploadFileId": uploadFileId,
"partInfo": strings.Join(partInfo, ","),
}, &uploadUrlsResp)
if err != nil {
return nil, err
}
uploadUrls := uploadUrlsResp.Data
if len(uploadUrls) != len(partInfo) {
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
}
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
for k, uploadUrl := range uploadUrls {
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
if err != nil {
return nil, err
}
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
PartNumber: partNumber,
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
UploadUrlsData: uploadUrl,
})
}
sort.Slice(uploadUrlInfos, func(i, j int) bool {
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
})
return uploadUrlInfos, nil
}
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
if err != nil {
return nil, err
}
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
// 创建上传会话
var uploadInfo CreateUploadFileResp
fullUrl := API_URL + "/createUploadFile.action"
if y.isFamily() {
fullUrl = API_URL + "/family/file/createFamilyFile.action"
}
_, err = y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"fileMd5": fileMd5Hex,
"fileName": file.GetName(),
"fileSize": fmt.Sprint(file.GetSize()),
"parentId": dstDir.GetID(),
"resumePolicy": "1",
})
} else {
req.SetFormData(map[string]string{
"parentFolderId": dstDir.GetID(),
"fileName": file.GetName(),
"size": fmt.Sprint(file.GetSize()),
"md5": fileMd5Hex,
"opertype": "3",
"flag": "1",
"resumePolicy": "1",
"isLog": "0",
// "baseFileId": "",
// "lastWrite":"",
// "localPath": strings.ReplaceAll(param.LocalPath, "\\", "/"),
// "fileExt": "",
})
}
}, &uploadInfo)
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
if err != nil {
return err
return nil, err
}
// 网盘中不存在该文件,开始上传
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
for status.Size < file.GetSize() && status.FileDataExists != 1 {
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
if utils.IsCanceled(ctx) {
return ctx.Err()
return nil, ctx.Err()
}
header := map[string]string{
@ -742,7 +789,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
_, err := y.put(ctx, status.FileUploadUrl, header, true, io.NopCloser(tempFile))
if err, ok := err.(*RespErr); ok && err.Code != "InputStreamReadError" {
return err
return nil, err
}
// 获取断点状态
@ -760,35 +807,80 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
}
}, &status)
if err != nil {
return err
return nil, err
}
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
return err
return nil, err
}
up(int(status.Size / file.GetSize()))
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
}
// 提交
var resp CommitUploadFileResp
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
}
// 创建上传会话
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
var uploadInfo CreateUploadFileResp
fullUrl := API_URL + "/createUploadFile.action"
if y.isFamily() {
fullUrl = API_URL + "/family/file/createFamilyFile.action"
}
_, err := y.post(fullUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
req.SetQueryParams(map[string]string{
"familyId": y.FamilyID,
"parentId": parentID,
"fileMd5": fileMd5,
"fileName": fileName,
"fileSize": fileSize,
"resumePolicy": "1",
})
} else {
req.SetFormData(map[string]string{
"parentFolderId": parentID,
"fileName": fileName,
"size": fileSize,
"md5": fileMd5,
"opertype": "3",
"flag": "1",
"resumePolicy": "1",
"isLog": "0",
})
}
}, &uploadInfo)
if err != nil {
return nil, err
}
return &uploadInfo, nil
}
// 提交上传文件
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
var resp OldCommitUploadFileResp
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
req.SetContext(ctx)
if y.isFamily() {
req.SetHeaders(map[string]string{
"ResumePolicy": "1",
"UploadFileId": fmt.Sprint(status.UploadFileId),
"UploadFileId": fmt.Sprint(uploadFileID),
"FamilyId": fmt.Sprint(y.FamilyID),
})
} else {
req.SetFormData(map[string]string{
"opertype": "3",
"resumePolicy": "1",
"uploadFileId": fmt.Sprint(status.UploadFileId),
"uploadFileId": fmt.Sprint(uploadFileID),
"isLog": "0",
})
}
}, &resp)
return err
if err != nil {
return nil, err
}
return resp.toFile(), nil
}
func (y *Cloud189PC) isFamily() bool {
@ -829,3 +921,33 @@ func (y *Cloud189PC) getFamilyID() (string, error) {
}
return fmt.Sprint(infos[0].FamilyID), nil
}
func (y *Cloud189PC) CheckBatchTask(aType string, taskID string) (*BatchTaskStateResp, error) {
var resp BatchTaskStateResp
_, err := y.post(API_URL+"/batch/checkBatchTask.action", func(req *resty.Request) {
req.SetFormData(map[string]string{
"type": aType,
"taskId": taskID,
})
}, &resp)
if err != nil {
return nil, err
}
return &resp, nil
}
func (y *Cloud189PC) WaitBatchTask(aType string, taskID string, t time.Duration) error {
for {
state, err := y.CheckBatchTask(aType, taskID)
if err != nil {
return err
}
switch state.TaskStatus {
case 2:
return errors.New("there is a conflict with the target object")
case 4:
return nil
}
time.Sleep(t)
}
}

View File

@ -3,6 +3,7 @@ package alist_v3
import (
"context"
"fmt"
"io"
"net/http"
"path"
"strconv"
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
SetHeader("Password", d.MetaPassword).
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
SetContentLength(true).
SetBody(stream.GetReadCloser())
SetBody(io.ReadCloser(stream))
})
return err
}

View File

@ -18,9 +18,11 @@ type ObjResp struct {
Size int64 `json:"size"`
IsDir bool `json:"is_dir"`
Modified time.Time `json:"modified"`
Created time.Time `json:"created"`
Sign string `json:"sign"`
Thumb string `json:"thumb"`
Type int `json:"type"`
HashInfo string `json:"hashinfo"`
}
type FsListResp struct {

View File

@ -14,6 +14,8 @@ import (
"os"
"time"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
@ -67,7 +69,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
return nil
}
// init deviceID
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
// init privateKey
privateKey, _ := NewPrivateKeyFromHex(deviceID)
state := State{
@ -163,14 +165,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
file := model.FileStream{
Obj: stream,
ReadCloser: stream,
Mimetype: stream.GetMimetype(),
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := stream.FileStream{
Obj: streamer,
Reader: streamer,
Mimetype: streamer.GetMimetype(),
}
const DEFAULT int64 = 10485760
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
partInfoList := make([]base.Json, 0, count)
for i := 1; i <= count; i++ {
@ -187,25 +189,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
}
var localFile *os.File
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
localFile, _ = fileStream.ReadCloser.(*os.File)
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
localFile, _ = fileStream.Reader.(*os.File)
}
if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
io.CopyN(buf, file, 1024)
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return err
}
} else {
// 把头部拼接回去
file.ReadCloser = struct {
file.Reader = struct {
io.Reader
io.Closer
}{
Reader: io.MultiReader(buf, file),
Closer: file,
Closer: &file,
}
}
} else {
@ -281,7 +283,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
return err
}
file.ReadCloser = localFile
file.Reader = localFile
}
for i, partInfo := range resp.PartInfoList {
@ -303,7 +305,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
}
res.Body.Close()
if count > 0 {
up(i * 100 / count)
up(float64(i) * 100 / float64(count))
}
}
var resp2 base.Json

View File

@ -2,10 +2,12 @@ package aliyundrive_open
import (
"context"
"errors"
"fmt"
"net/http"
"time"
"github.com/Xhofe/rateg"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -34,13 +36,25 @@ func (d *AliyundriveOpen) GetAddition() driver.Additional {
}
func (d *AliyundriveOpen) Init(ctx context.Context) error {
if d.LIVPDownloadFormat == "" {
d.LIVPDownloadFormat = "jpeg"
}
if d.DriveType == "" {
d.DriveType = "default"
}
res, err := d.request("/adrive/v1.0/user/getDriveInfo", http.MethodPost, nil)
if err != nil {
return err
}
d.DriveId = utils.Json.Get(res, "default_drive_id").ToString()
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
d.DriveId = utils.Json.Get(res, d.DriveType+"_drive_id").ToString()
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}
@ -66,14 +80,20 @@ func (d *AliyundriveOpen) link(ctx context.Context, file model.Obj) (*model.Link
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": file.GetID(),
"expire_sec": 14400,
"expire_sec": 900,
})
})
if err != nil {
return nil, err
}
url := utils.Json.Get(res, "url").ToString()
exp := time.Hour
if url == "" {
if utils.Ext(file.GetName()) != "livp" {
return nil, errors.New("get download url failed: " + string(res))
}
url = utils.Json.Get(res, "streamsUrl", d.LIVPDownloadFormat).ToString()
}
exp := time.Minute
return &model.Link{
URL: url,
Expiration: &exp,
@ -87,7 +107,9 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
return d.limitLink(ctx, file)
}
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
nowTime, _ := getNowTime()
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
@ -95,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
"name": dirName,
"type": "folder",
"check_name_mode": "refuse",
}).SetResult(&newDir)
})
})
return err
if err != nil {
return nil, err
}
return fileToObj(newDir), nil
}
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
var resp MoveOrCopyResp
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
@ -108,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
"to_parent_file_id": dstDir.GetID(),
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
//"new_name": "newName", // The new name to use when a file of the same name exists
}).SetResult(&resp)
})
})
return err
if err != nil {
return nil, err
}
if resp.Exist {
return nil, errors.New("existence of files with the same name")
}
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.ID = resp.FileID
srcObj.Modified = time.Now()
return srcObj, nil
}
return nil, nil
}
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
var newFile File
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": srcObj.GetID(),
"name": newName,
}).SetResult(&newFile)
})
})
return err
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -150,7 +192,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
return d.upload(ctx, dstDir, stream, up)
}
@ -165,7 +207,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
case "video_preview":
uri = "/adrive/v1.0/openFile/getVideoPreviewPlayInfo"
data["category"] = "live_transcoding"
data["url_expire_sec"] = 14400
data["url_expire_sec"] = 900
default:
return nil, errs.NotSupport
}
@ -179,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
}
var _ driver.Driver = (*AliyundriveOpen)(nil)
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
var _ driver.PutResult = (*AliyundriveOpen)(nil)

View File

@ -6,16 +6,18 @@ import (
)
type Addition struct {
DriveType string `json:"drive_type" type:"select" options:"default,resource,backup" default:"default"`
driver.RootID
RefreshToken string `json:"refresh_token" required:"true"`
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
RapidUpload bool `json:"rapid_upload" help:"If you enable this option, the file will be uploaded to the server first, so the progress will be incorrect"`
InternalUpload bool `json:"internal_upload" help:"If you are using Aliyun ECS is located in Beijing, you can turn it on to boost the upload speed"`
LIVPDownloadFormat string `json:"livp_download_format" type:"select" options:"jpeg,mov" default:"jpeg"`
AccessToken string
}

View File

@ -1,6 +1,7 @@
package aliyundrive_open
import (
"github.com/alist-org/alist/v3/pkg/utils"
"time"
"github.com/alist-org/alist/v3/internal/model"
@ -28,11 +29,17 @@ type File struct {
Type string `json:"type"`
Thumbnail string `json:"thumbnail"`
Url string `json:"url"`
CreatedAt *time.Time `json:"created_at"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
// create only
FileName string `json:"file_name"`
}
func fileToObj(f File) *model.ObjThumb {
if f.Name == "" {
f.Name = f.FileName
}
return &model.ObjThumb{
Object: model.Object{
ID: f.FileId,
@ -40,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
Size: f.Size,
Modified: f.UpdatedAt,
IsFolder: f.Type == "folder",
Ctime: f.CreatedAt,
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
}
@ -67,3 +76,9 @@ type CreateResp struct {
RapidUpload bool `json:"rapid_upload"`
PartInfoList []PartInfo `json:"part_info_list"`
}
type MoveOrCopyResp struct {
Exist bool `json:"exist"`
DriveID string `json:"drive_id"`
FileID string `json:"file_id"`
}

View File

@ -3,14 +3,11 @@ package aliyundrive_open
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"math"
"net/http"
"os"
"strconv"
"strings"
"time"
@ -18,7 +15,9 @@ import (
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@ -32,19 +31,19 @@ func makePartInfos(size int) []base.Json {
}
func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * 1024 * 1024
var partSize int64 = 20 * utils.MB
if fileSize > partSize {
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
} else if fileSize > 768*1024*1024*1024 { // over 768GB
if fileSize > 1*utils.TB { // file Size over 1TB
partSize = 5 * utils.GB // file part size 5GB
} else if fileSize > 768*utils.GB { // over 768GB
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
} else if fileSize > 512*1024*1024*1024 { // over 512GB
} else if fileSize > 512*utils.GB { // over 512GB
partSize = 82463373 // ≈ 78.6432MB
} else if fileSize > 384*1024*1024*1024 { // over 384GB
} else if fileSize > 384*utils.GB { // over 384GB
partSize = 54975582 // ≈ 52.4288MB
} else if fileSize > 256*1024*1024*1024 { // over 256GB
} else if fileSize > 256*utils.GB { // over 256GB
partSize = 41231687 // ≈ 39.3216MB
} else if fileSize > 128*1024*1024*1024 { // over 128GB
} else if fileSize > 128*utils.GB { // over 128GB
partSize = 27487791 // ≈ 26.2144MB
}
}
@ -65,73 +64,40 @@ func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]Pa
return resp.PartInfoList, err
}
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
partInfo := resp.PartInfoList[i-1]
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
uploadUrl := partInfo.UploadUrl
if d.InternalUpload {
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
}
req, err := http.NewRequest("PUT", uploadUrl, reader)
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
if err != nil {
return err
}
req = req.WithContext(ctx)
res, err := base.HttpClient.Do(req)
if err != nil {
if retry {
reader.Reset()
return d.uploadPart(ctx, i, count, reader, resp, false)
}
return err
}
res.Body.Close()
if retry && res.StatusCode == http.StatusForbidden {
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
if err != nil {
return err
}
reader.Reset()
return d.uploadPart(ctx, i, count, reader, resp, false)
}
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
return fmt.Errorf("upload status: %d", res.StatusCode)
}
return nil
}
func (d *AliyundriveOpen) normalUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, createResp CreateResp, count int, partSize int64) error {
log.Debugf("[aliyundive_open] normal upload")
// 2. upload
preTime := time.Now()
for i := 1; i <= len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
err := d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
if err != nil {
return err
}
if count > 0 {
up(i * 100 / count)
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
if err != nil {
return err
}
preTime = time.Now()
}
}
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
// 3. complete
var newFile File
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
req.SetBody(base.Json{
"drive_id": d.DriveId,
"file_id": createResp.FileId,
"upload_id": createResp.UploadId,
"file_id": fileId,
"upload_id": uploadId,
}).SetResult(&newFile)
})
})
return err
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
type ProofRange struct {
@ -159,110 +125,146 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
return pr, nil
}
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
proofRange, err := getProofRange(d.AccessToken, fileSize)
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
if err != nil {
return "", err
}
buf := make([]byte, proofRange.End-proofRange.Start)
_, err = file.ReadAt(buf, proofRange.Start)
length := proofRange.End - proofRange.Start
buf := bytes.NewBuffer(make([]byte, 0, length))
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf), nil
_, err = io.CopyN(buf, reader, length)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
}
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 1. create
// Part Size Unit: Bytes, Default: 20MB,
// Maximum number of slices 10,000, ≈195.3125GB
var partSize = calPartSize(stream.GetSize())
const dateFormat = "2006-01-02T15:04:05.000Z"
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
createData := base.Json{
"drive_id": d.DriveId,
"parent_file_id": dstDir.GetID(),
"name": stream.GetName(),
"type": "file",
"check_name_mode": "ignore",
"local_modified_at": mtimeStr,
"local_created_at": ctimeStr,
}
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
createData["part_info_list"] = makePartInfos(count)
// rapid upload
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
if rapidUpload {
log.Debugf("[aliyundrive_open] start cal pre_hash")
// read 1024 bytes to calculate pre hash
buf := bytes.NewBuffer(make([]byte, 0, 1024))
_, err := io.CopyN(buf, stream, 1024)
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
if err != nil {
return err
return nil, err
}
hash, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return nil, err
}
createData["size"] = stream.GetSize()
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
// if support seek, seek to start
if localFile, ok := stream.(io.Seeker); ok {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return err
}
} else {
// Put spliced head back to stream
stream.SetReadCloser(struct {
io.Reader
io.Closer
}{
Reader: io.MultiReader(buf, stream.GetReadCloser()),
Closer: stream.GetReadCloser(),
})
}
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
var tmpF model.File
if err != nil {
if e.Code != "PreHashMatched" || !rapidUpload {
return err
return nil, err
}
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
// convert to local file
file, err := utils.CreateTempFile(stream)
hi := stream.GetHash()
hash := hi.GetHash(utils.SHA1)
if len(hash) <= 0 {
tmpF, err = stream.CacheFullInTempFile()
if err != nil {
return err
return nil, err
}
_ = stream.GetReadCloser().Close()
stream.SetReadCloser(file)
// calculate full hash
h := sha1.New()
_, err = io.Copy(h, file)
hash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return err
return nil, err
}
}
delete(createData, "pre_hash")
createData["proof_version"] = "v1"
createData["content_hash_name"] = "sha1"
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
// seek to start
if _, err = file.Seek(0, io.SeekStart); err != nil {
return err
}
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
createData["content_hash"] = hash
createData["proof_code"], err = d.calProofCode(stream)
if err != nil {
return fmt.Errorf("cal proof code error: %s", err.Error())
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
if err != nil {
return err
return nil, err
}
if createResp.RapidUpload {
}
if !createResp.RapidUpload {
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
// refresh upload url if 50 minutes passed
if time.Since(preTime) > 50*time.Minute {
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
if err != nil {
return nil, err
}
preTime = time.Now()
}
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
err = retry.Do(func() error {
//rd.Reset()
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
if err != nil {
return nil, err
}
offset += partSize
up(float64(i*100) / float64(count))
}
} else {
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
return nil
}
// failed to rapid upload, try normal upload
if _, err = file.Seek(0, io.SeekStart); err != nil {
return err
}
}
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
return d.normalUpload(ctx, stream, up, createResp, count, partSize)
// 3. complete
return d.completeUpload(createResp.FileId, createResp.UploadId)
}

View File

@ -2,46 +2,92 @@ package aliyundrive_open
import (
"context"
"encoding/base64"
"errors"
"fmt"
"net/http"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *AliyundriveOpen) refreshToken() error {
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
url := d.base + "/oauth/access_token"
if d.OauthTokenURL != "" && d.ClientID == "" {
url = d.OauthTokenURL
}
var resp base.TokenResp
//var resp base.TokenResp
var e ErrResp
_, err := base.RestyClient.R().
ForceContentType("application/json").
res, err := base.RestyClient.R().
//ForceContentType("application/json").
SetBody(base.Json{
"client_id": d.ClientID,
"client_secret": d.ClientSecret,
"grant_type": "refresh_token",
"refresh_token": d.RefreshToken,
}).
SetResult(&resp).
//SetResult(&resp).
SetError(&e).
Post(url)
if err != nil {
return "", "", err
}
log.Debugf("[ali_open] refresh token response: %s", res.String())
if e.Code != "" {
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
}
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
if refresh == "" {
return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
}
curSub, err := getSub(d.RefreshToken)
if err != nil {
return "", "", err
}
newSub, err := getSub(refresh)
if err != nil {
return "", "", err
}
if curSub != newSub {
return "", "", errors.New("failed to refresh token: sub not match")
}
return refresh, access, nil
}
func getSub(token string) (string, error) {
segments := strings.Split(token, ".")
if len(segments) != 3 {
return "", errors.New("not a jwt token because of invalid segments")
}
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
if err != nil {
return "", errors.New("failed to decode jwt token")
}
return utils.Json.Get(bs, "sub").ToString(), nil
}
func (d *AliyundriveOpen) refreshToken() error {
refresh, access, err := d._refreshToken()
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[ali_open] failed to refresh token: %s", err)
}
refresh, access, err = d._refreshToken()
}
if err != nil {
return err
}
if e.Code != "" {
return fmt.Errorf("failed to refresh token: %s", e.Message)
}
if resp.RefreshToken == "" {
return errors.New("failed to refresh token: refresh token is empty")
}
d.RefreshToken, d.AccessToken = resp.RefreshToken, resp.AccessToken
log.Infof("[ali_open] token exchange: %s -> %s", d.RefreshToken, refresh)
d.RefreshToken, d.AccessToken = refresh, access
op.MustSaveDriverStorage(d)
return nil
}
@ -65,6 +111,9 @@ func (d *AliyundriveOpen) requestReturnErrResp(uri, method string, callback base
req.SetError(&e)
res, err := req.Execute(method, d.base+uri)
if err != nil {
if res != nil {
log.Errorf("[aliyundrive_open] request error: %s", res.String())
}
return nil, err, nil
}
isRetry := len(retry) > 0 && retry[0]
@ -121,3 +170,9 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
}
return res, nil
}
func getNowTime() (time.Time, string) {
nowTime := time.Now()
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
return nowTime, nowTimeStr
}

View File

@ -6,6 +6,7 @@ import (
"net/http"
"time"
"github.com/Xhofe/rateg"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -52,8 +53,14 @@ func (d *AliyundriveShare) Init(ctx context.Context) error {
log.Errorf("%+v", err)
}
})
d.limitList = utils.LimitRateCtx(d.list, time.Second/4)
d.limitLink = utils.LimitRateCtx(d.link, time.Second)
d.limitList = rateg.LimitFnCtx(d.list, rateg.LimitFnOption{
Limit: 4,
Bucket: 1,
})
d.limitLink = rateg.LimitFnCtx(d.link, rateg.LimitFnOption{
Limit: 1,
Bucket: 1,
})
return nil
}

View File

@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
Name: f.Name,
Size: f.Size,
Modified: f.UpdatedAt,
Ctime: f.CreatedAt,
IsFolder: f.Type == "folder",
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},

View File

@ -2,7 +2,10 @@ package drivers
import (
_ "github.com/alist-org/alist/v3/drivers/115"
_ "github.com/alist-org/alist/v3/drivers/115_share"
_ "github.com/alist-org/alist/v3/drivers/123"
_ "github.com/alist-org/alist/v3/drivers/123_link"
_ "github.com/alist-org/alist/v3/drivers/123_share"
_ "github.com/alist-org/alist/v3/drivers/139"
_ "github.com/alist-org/alist/v3/drivers/189"
_ "github.com/alist-org/alist/v3/drivers/189pc"
@ -15,7 +18,9 @@ import (
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/ftp"
_ "github.com/alist-org/alist/v3/drivers/google_drive"
@ -42,7 +47,9 @@ import (
_ "github.com/alist-org/alist/v3/drivers/url_tree"
_ "github.com/alist-org/alist/v3/drivers/uss"
_ "github.com/alist-org/alist/v3/drivers/virtual"
_ "github.com/alist-org/alist/v3/drivers/vtencent"
_ "github.com/alist-org/alist/v3/drivers/webdav"
_ "github.com/alist-org/alist/v3/drivers/weiyun"
_ "github.com/alist-org/alist/v3/drivers/wopan"
_ "github.com/alist-org/alist/v3/drivers/yandex_disk"
)

View File

@ -1,28 +1,33 @@
package baidu_netdisk
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"errors"
"io"
"math"
"os"
"net/url"
stdpath "path"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
log "github.com/sirupsen/logrus"
)
type BaiduNetdisk struct {
model.Storage
Addition
uploadThread int
vipType int // 会员类型0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
}
func (d *BaiduNetdisk) Config() driver.Config {
@ -34,12 +39,25 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
}
func (d *BaiduNetdisk) Init(ctx context.Context) error {
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
}
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
d.UploadAPI = "https://d.pcs.baidu.com"
}
res, err := d.get("/xpan/nas", map[string]string{
"method": "uinfo",
}, nil)
log.Debugf("[baidu] get uinfo: %s", string(res))
if err != nil {
return err
}
d.vipType = utils.Json.Get(res, "vip_type").ToInt()
return nil
}
func (d *BaiduNetdisk) Drop(ctx context.Context) error {
return nil
@ -62,12 +80,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
return d.linkOfficial(file, args)
}
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
return err
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var newDir File
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
if err != nil {
return nil, err
}
return fileToObj(newDir), nil
}
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
data := []base.Json{
{
"path": srcObj.GetPath(),
@ -76,10 +98,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
},
}
_, err := d.manage("move", data)
return err
if err != nil {
return nil, err
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
srcObj.Modified = time.Now()
return srcObj, nil
}
return nil, nil
}
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
data := []base.Json{
{
"path": srcObj.GetPath(),
@ -87,7 +117,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
},
}
_, err := d.manage("rename", data)
return err
if err != nil {
return nil, err
}
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
srcObj.Name = newName
srcObj.Modified = time.Now()
return srcObj, nil
}
return nil, nil
}
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -108,126 +148,175 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
if err != nil {
return err
func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
contentMd5 := stream.GetHash().GetHash(utils.MD5)
if len(contentMd5) < utils.MD5.Width {
return nil, errors.New("invalid hash")
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var Default int64 = 4 * 1024 * 1024
defaultByteData := make([]byte, Default)
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
var SliceSize int64 = 256 * 1024
streamSize := stream.GetSize()
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
var newFile File
_, err := d.create(path, streamSize, 0, "", blockList, &newFile, mtime, ctime)
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// rapid upload
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
return newObj, nil
}
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
streamSize := stream.GetSize()
sliceSize := d.getSliceSize()
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
lastBlockSize := streamSize % sliceSize
if streamSize > 0 && lastBlockSize == 0 {
lastBlockSize = sliceSize
}
//cal md5 for first 256k data
const SliceSize int64 = 256 * 1024
// cal md5
h1 := md5.New()
h2 := md5.New()
block_list := make([]string, 0)
content_md5 := ""
slice_md5 := ""
left := stream.GetSize()
for i := 0; i < count; i++ {
byteSize := Default
var byteData []byte
if left < Default {
byteSize = left
byteData = make([]byte, byteSize)
} else {
byteData = defaultByteData
blockList := make([]string, 0, count)
byteSize := sliceSize
fileMd5H := md5.New()
sliceMd5H := md5.New()
sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
left -= byteSize
_, err = io.ReadFull(tempFile, byteData)
if err != nil {
return err
if i == count {
byteSize = lastBlockSize
}
h1.Write(byteData)
h2.Write(byteData)
block_list = append(block_list, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
h2.Reset()
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
if err != nil && err != io.EOF {
return nil, err
}
content_md5 = hex.EncodeToString(h1.Sum(nil))
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset()
}
if stream.GetSize() <= SliceSize {
slice_md5 = content_md5
} else {
sliceData := make([]byte, SliceSize)
_, err = io.ReadFull(tempFile, sliceData)
if err != nil {
return err
}
h2.Write(sliceData)
slice_md5 = hex.EncodeToString(h2.Sum(nil))
_, err = tempFile.Seek(0, io.SeekStart)
if err != nil {
return err
}
}
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
path := encodeURIComponent(rawPath)
block_list_str := fmt.Sprintf("[%s]", strings.Join(block_list, ","))
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
path, stream.GetSize(),
block_list_str,
content_md5, slice_md5)
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(blockList)
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 预上传
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
params := map[string]string{
"method": "precreate",
}
var precreateResp PrecreateResp
_, err = d.post("/xpan/file", params, data, &precreateResp)
form := map[string]string{
"path": path,
"size": strconv.FormatInt(streamSize, 10),
"isdir": "0",
"autoinit": "1",
"rtype": "3",
"block_list": blockListStr,
"content-md5": contentMd5,
"slice-md5": sliceMd5,
}
joinTime(form, ctime, mtime)
log.Debugf("[baidu_netdisk] precreate data: %s", form)
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
if err != nil {
return err
return nil, err
}
log.Debugf("%+v", precreateResp)
if precreateResp.ReturnType == 2 {
return nil
//rapid upload, since got md5 match from baidu server
if err != nil {
return nil, err
}
params = map[string]string{
return fileToObj(precreateResp.File), nil
}
}
// step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
params := map[string]string{
"method": "upload",
"access_token": d.AccessToken,
"type": "tmpfile",
"path": path,
"uploadid": precreateResp.Uploadid,
"partseq": strconv.Itoa(partseq),
}
left = stream.GetSize()
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
byteSize := Default
var byteData []byte
if left < Default {
byteSize = left
byteData = make([]byte, byteSize)
} else {
byteData = defaultByteData
}
left -= byteSize
_, err = io.ReadFull(tempFile, byteData)
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
if err != nil {
return err
}
u := "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
params["partseq"] = strconv.Itoa(partseq)
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
}
if err = threadG.Wait(); err != nil {
// 如果属于用户主动取消,则保存上传进度
if errors.Is(err, context.Canceled) {
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
}
return nil, err
}
// step.3 创建文件
var newFile File
_, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
if err != nil {
return nil, err
}
return fileToObj(newFile), nil
}
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
res, err := base.RestyClient.R().
SetContext(ctx).
SetQueryParams(params).
SetFileReader("file", stream.GetName(), bytes.NewReader(byteData)).
Post(u)
SetFileReader("file", fileName, file).
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
if err != nil {
return err
}
log.Debugln(res.String())
if len(precreateResp.BlockList) > 0 {
up(i * 100 / len(precreateResp.BlockList))
log.Debugln(res.RawResponse.Status + res.String())
errCode := utils.Json.Get(res.Body(), "error_code").ToInt()
errNo := utils.Json.Get(res.Body(), "errno").ToInt()
if errCode != 0 || errNo != 0 {
return errs.NewErr(errs.StreamIncomplete, "error in uploading to baidu, will retry. response=%s", res.String())
}
}
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
return err
return nil
}
var _ driver.Driver = (*BaiduNetdisk)(nil)

View File

@ -15,6 +15,8 @@ type Addition struct {
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
AccessToken string
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
}
var config = driver.Config{

View File

@ -1,6 +1,7 @@
package baidu_netdisk
import (
"path"
"strconv"
"time"
@ -18,9 +19,7 @@ type File struct {
//Category int `json:"category"`
//RealCategory string `json:"real_category"`
FsId int64 `json:"fs_id"`
ServerMtime int64 `json:"server_mtime"`
//OperId int `json:"oper_id"`
//ServerCtime int `json:"server_ctime"`
Thumbs struct {
//Icon string `json:"icon"`
Url3 string `json:"url3"`
@ -28,29 +27,52 @@ type File struct {
//Url1 string `json:"url1"`
} `json:"thumbs"`
//Wpfile int `json:"wpfile"`
//LocalMtime int `json:"local_mtime"`
Size int64 `json:"size"`
//ExtentTinyint7 int `json:"extent_tinyint7"`
Path string `json:"path"`
//Share int `json:"share"`
//ServerAtime int `json:"server_atime"`
//Pl int `json:"pl"`
//LocalCtime int `json:"local_ctime"`
ServerFilename string `json:"server_filename"`
//Md5 string `json:"md5"`
Md5 string `json:"md5"`
//OwnerId int `json:"owner_id"`
//Unlist int `json:"unlist"`
Isdir int `json:"isdir"`
// list resp
ServerCtime int64 `json:"server_ctime"`
ServerMtime int64 `json:"server_mtime"`
LocalMtime int64 `json:"local_mtime"`
LocalCtime int64 `json:"local_ctime"`
//ServerAtime int64 `json:"server_atime"` `
// only create and precreate resp
Ctime int64 `json:"ctime"`
Mtime int64 `json:"mtime"`
}
func fileToObj(f File) *model.ObjThumb {
if f.ServerFilename == "" {
f.ServerFilename = path.Base(f.Path)
}
if f.LocalCtime == 0 {
f.LocalCtime = f.Ctime
}
if f.LocalMtime == 0 {
f.LocalMtime = f.Mtime
}
return &model.ObjThumb{
Object: model.Object{
ID: strconv.FormatInt(f.FsId, 10),
Path: f.Path,
Name: f.ServerFilename,
Size: f.Size,
Modified: time.Unix(f.ServerMtime, 0),
Modified: time.Unix(f.LocalMtime, 0),
Ctime: time.Unix(f.LocalCtime, 0),
IsFolder: f.Isdir == 1,
// 直接获取的MD5是错误的
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
}
@ -154,10 +176,15 @@ type DownloadResp2 struct {
}
type PrecreateResp struct {
Path string `json:"path"`
Uploadid string `json:"uploadid"`
ReturnType int `json:"return_type"`
BlockList []int `json:"block_list"`
Errno int `json:"errno"`
RequestId int64 `json:"request_id"`
ReturnType int `json:"return_type"`
// return_type=1
Path string `json:"path"`
Uploadid string `json:"uploadid"`
BlockList []int `json:"block_list"`
// return_type=2
File File `json:"info"`
}

View File

@ -1,25 +1,27 @@
package baidu_netdisk
import (
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *BaiduNetdisk) refreshToken() error {
err := d._refreshToken()
if err != nil && err == errs.EmptyToken {
if err != nil && errors.Is(err, errs.EmptyToken) {
err = d._refreshToken()
}
return err
@ -50,6 +52,8 @@ func (d *BaiduNetdisk) _refreshToken() error {
}
func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
var result []byte
err := retry.Do(func() error {
req := base.RestyClient.R()
req.SetQueryParam("access_token", d.AccessToken)
if callback != nil {
@ -60,20 +64,28 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
}
res, err := req.Execute(method, furl)
if err != nil {
return nil, err
return err
}
log.Debugf("[baidu_netdisk] req: %s, resp: %s", furl, res.String())
errno := utils.Json.Get(res.Body(), "errno").ToInt()
if errno != 0 {
if errno == -6 {
err = d.refreshToken()
if err != nil {
return nil, err
if utils.SliceContains([]int{111, -6}, errno) {
log.Info("refreshing baidu_netdisk token.")
err2 := d.refreshToken()
if err2 != nil {
return retry.Unrecoverable(err2)
}
return d.request(furl, method, callback, resp)
}
return nil, fmt.Errorf("errno: %d, refer to https://pan.baidu.com/union/doc/", errno)
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
}
return res.Body(), nil
result = res.Body()
return nil
},
retry.LastErrorOnly(true),
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
return result, err
}
func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp interface{}) ([]byte, error) {
@ -82,10 +94,10 @@ func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp inter
}, resp)
}
func (d *BaiduNetdisk) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) {
func (d *BaiduNetdisk) postForm(pathname string, params map[string]string, form map[string]string, resp interface{}) ([]byte, error) {
return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) {
req.SetQueryParams(params)
req.SetBody(data)
req.SetFormData(form)
}, resp)
}
@ -140,6 +152,9 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
//if res.StatusCode() == 302 {
u = res.Header().Get("location")
//}
updateObjMd5(file, "pan.baidu.com", u)
return &model.Link{
URL: u,
Header: http.Header{
@ -162,6 +177,9 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
if err != nil {
return nil, err
}
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
return &model.Link{
URL: resp.Info[0].Dlink,
Header: http.Header{
@ -170,32 +188,79 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
}, nil
}
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
params := map[string]string{
"method": "filemanager",
"opera": opera,
}
marshal, err := utils.Json.Marshal(filelist)
if err != nil {
return nil, err
}
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
return d.post("/xpan/file", params, data, nil)
marshal, _ := utils.Json.MarshalToString(filelist)
return d.postForm("/xpan/file", params, map[string]string{
"async": "0",
"filelist": marshal,
"ondup": "fail",
}, nil)
}
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
params := map[string]string{
"method": "create",
}
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
if uploadid != "" {
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
form := map[string]string{
"path": path,
"size": strconv.FormatInt(size, 10),
"isdir": strconv.Itoa(isdir),
"rtype": "3",
}
return d.post("/xpan/file", params, data, nil)
if mtime != 0 && ctime != 0 {
joinTime(form, ctime, mtime)
}
func encodeURIComponent(str string) string {
r := url.QueryEscape(str)
r = strings.ReplaceAll(r, "+", "%20")
return r
if uploadid != "" {
form["uploadid"] = uploadid
}
if block_list != "" {
form["block_list"] = block_list
}
return d.postForm("/xpan/file", params, form, resp)
}
func joinTime(form map[string]string, ctime, mtime int64) {
form["local_mtime"] = strconv.FormatInt(mtime, 10)
form["local_ctime"] = strconv.FormatInt(ctime, 10)
}
func updateObjMd5(obj model.Obj, userAgent, u string) {
object := model.GetRawObject(obj)
if object != nil {
req, _ := http.NewRequest(http.MethodHead, u, nil)
req.Header.Add("User-Agent", userAgent)
resp, _ := base.HttpClient.Do(req)
if resp != nil {
contentMd5 := resp.Header.Get("Content-Md5")
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
}
}
}
const (
DefaultSliceSize int64 = 4 * utils.MB
VipSliceSize = 16 * utils.MB
SVipSliceSize = 32 * utils.MB
)
func (d *BaiduNetdisk) getSliceSize() int64 {
switch d.vipType {
case 1:
return VipSliceSize
case 2:
return SVipSliceSize
default:
return DefaultSliceSize
}
}
// func encodeURIComponent(str string) string {
// r := url.QueryEscape(str)
// r = strings.ReplaceAll(r, "+", "%20")
// return r
// }

View File

@ -4,18 +4,22 @@ import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"math"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
)
@ -26,6 +30,8 @@ type BaiduPhoto struct {
AccessToken string
Uk int64
root model.Obj
uploadThread int
}
func (d *BaiduPhoto) Config() driver.Config {
@ -37,6 +43,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
}
func (d *BaiduPhoto) Init(ctx context.Context) error {
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
if d.uploadThread < 1 || d.uploadThread > 32 {
d.uploadThread, d.UploadThread = 3, "3"
}
if err := d.refreshToken(); err != nil {
return err
}
@ -126,7 +137,13 @@ func (d *BaiduPhoto) Link(ctx context.Context, file model.Obj, args model.LinkAr
case *File:
return d.linkFile(ctx, file, args)
case *AlbumFile:
return d.linkAlbum(ctx, file, args)
f, err := d.CopyAlbumFile(ctx, file)
if err != nil {
return nil, err
}
return d.linkFile(ctx, f, args)
// 有概率无法获取到链接
//return d.linkAlbum(ctx, file, args)
}
return nil, errs.NotFile
}
@ -169,9 +186,9 @@ func (d *BaiduPhoto) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.
}
func (d *BaiduPhoto) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// 仅支持相册之间移动
if file, ok := srcObj.(*AlbumFile); ok {
if _, ok := dstDir.(*Album); ok {
switch dstDir.(type) {
case *Album, *Root: // albumfile -> root -> album or albumfile -> root
newObj, err := d.Copy(ctx, srcObj, dstDir)
if err != nil {
return nil, err
@ -205,45 +222,57 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 不支持大小为0的文件
if stream.GetSize() == 0 {
return nil, fmt.Errorf("file size cannot be zero")
}
// TODO:
// 暂时没有找到妙传方式
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(stream.GetReadCloser())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
const DEFAULT int64 = 1 << 22
const SliceSize int64 = 1 << 18
// 计算需要的数据
const DEFAULT = 1 << 22
const SliceSize = 1 << 18
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
streamSize := stream.GetSize()
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
lastBlockSize := streamSize % DEFAULT
if lastBlockSize == 0 {
lastBlockSize = DEFAULT
}
// step.1 计算MD5
sliceMD5List := make([]string, 0, count)
fileMd5 := md5.New()
sliceMd5 := md5.New()
sliceMd52 := md5.New()
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
byteSize := int64(DEFAULT)
fileMd5H := md5.New()
sliceMd5H := md5.New()
sliceMd5H2 := md5.New()
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
for i := 1; i <= count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
if i == count {
byteSize = lastBlockSize
}
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
if err != nil && err != io.EOF {
return nil, err
}
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
sliceMd5.Reset()
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
sliceMd5H.Reset()
}
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
return nil, err
}
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
// 开始执行上传
// step.2 预上传
params := map[string]string{
"autoinit": "1",
"isdir": "0",
@ -251,13 +280,14 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
"ctype": "11",
"path": fmt.Sprintf("/%s", stream.GetName()),
"size": fmt.Sprint(stream.GetSize()),
"slice-md5": slice_md5,
"content-md5": content_md5,
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
"slice-md5": sliceMd5,
"content-md5": contentMd5,
"block_list": blockListStr,
}
// 预上传
var precreateResp PrecreateResp
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
r.SetContext(ctx)
r.SetFormData(params)
@ -265,32 +295,54 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return nil, err
}
}
switch precreateResp.ReturnType {
case 1: // 上传文件
case 1: //step.3 上传文件切片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(3),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
uploadParams := map[string]string{
"method": "upload",
"path": params["path"],
"partseq": fmt.Sprint(partseq),
"uploadid": precreateResp.UploadID,
}
for i := 0; i < count; i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
}
uploadParams["partseq"] = fmt.Sprint(i)
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(uploadParams)
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
}, nil)
if err != nil {
return err
}
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
precreateResp.BlockList[i] = -1
return nil
})
}
if err = threadG.Wait(); err != nil {
if errors.Is(err, context.Canceled) {
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
}
return nil, err
}
up(i * 100 / count)
}
fallthrough
case 2: // 创建文件
case 2: //step.4 创建文件
params["uploadid"] = precreateResp.UploadID
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
r.SetContext(ctx)
@ -300,7 +352,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
return nil, err
}
fallthrough
case 3: // 增加到相册
case 3: //step.5 增加到相册
rootfile := precreateResp.Data.toFile()
if album, ok := dstDir.(*Album); ok {
return d.AddAlbumFile(ctx, album, rootfile)

View File

@ -64,8 +64,15 @@ func renameAlbum(album *Album, newName string) *Album {
AlbumID: album.AlbumID,
Tid: album.Tid,
JoinTime: album.JoinTime,
CreateTime: album.CreateTime,
CreationTime: album.CreationTime,
Title: newName,
Mtime: time.Now().Unix(),
}
}
func BoolToIntStr(b bool) string {
if b {
return "1"
}
return "0"
}

View File

@ -10,8 +10,10 @@ type Addition struct {
ShowType string `json:"show_type" type:"select" options:"root,root_only_album,root_only_file" default:"root"`
AlbumID string `json:"album_id"`
//AlbumPassword string `json:"album_password"`
DeleteOrigin bool `json:"delete_origin"`
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
}
var config = driver.Config{

View File

@ -4,6 +4,8 @@ import (
"fmt"
"time"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/internal/model"
)
@ -51,19 +53,14 @@ type (
Ctime int64 `json:"ctime"` // 创建时间 s
Mtime int64 `json:"mtime"` // 修改时间 s
Thumburl []string `json:"thumburl"`
parseTime *time.Time
Md5 string `json:"md5"`
}
)
func (c *File) GetSize() int64 { return c.Size }
func (c *File) GetName() string { return getFileName(c.Path) }
func (c *File) ModTime() time.Time {
if c.parseTime == nil {
c.parseTime = toTime(c.Mtime)
}
return *c.parseTime
}
func (c *File) CreateTime() time.Time { return time.Unix(c.Ctime, 0) }
func (c *File) ModTime() time.Time { return time.Unix(c.Mtime, 0) }
func (c *File) IsDir() bool { return false }
func (c *File) GetID() string { return "" }
func (c *File) GetPath() string { return "" }
@ -74,6 +71,10 @@ func (c *File) Thumb() string {
return ""
}
func (c *File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
}
/*相册部分*/
type (
AlbumListResp struct {
@ -88,7 +89,7 @@ type (
Tid int64 `json:"tid"`
Title string `json:"title"`
JoinTime int64 `json:"join_time"`
CreateTime int64 `json:"create_time"`
CreationTime int64 `json:"create_time"`
Mtime int64 `json:"mtime"`
parseTime *time.Time
@ -109,14 +110,14 @@ type (
}
)
func (a *Album) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (a *Album) GetSize() int64 { return 0 }
func (a *Album) GetName() string { return a.Title }
func (a *Album) ModTime() time.Time {
if a.parseTime == nil {
a.parseTime = toTime(a.Mtime)
}
return *a.parseTime
}
func (a *Album) CreateTime() time.Time { return time.Unix(a.CreationTime, 0) }
func (a *Album) ModTime() time.Time { return time.Unix(a.Mtime, 0) }
func (a *Album) IsDir() bool { return true }
func (a *Album) GetID() string { return "" }
func (a *Album) GetPath() string { return "" }
@ -162,7 +163,7 @@ type (
//不存在返回
Path string `json:"path"`
UploadID string `json:"uploadid"`
Blocklist []int64 `json:"block_list"`
BlockList []int `json:"block_list"`
}
)

View File

@ -21,7 +21,7 @@ const (
FILE_API_URL_V2 = API_URL + "/file/v2"
)
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
req := base.RestyClient.R().
SetQueryParam("access_token", d.AccessToken)
if callback != nil {
@ -52,9 +52,17 @@ func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallba
default:
return nil, fmt.Errorf("errno: %d, refer to https://photo.baidu.com/union/doc", erron)
}
return res.Body(), nil
return res, nil
}
//func (d *BaiduPhoto) Request(furl string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
// res, err := d.request(furl, method, callback, resp)
// if err != nil {
// return nil, err
// }
// return res.Body(), nil
//}
func (d *BaiduPhoto) refreshToken() error {
u := "https://openapi.baidu.com/oauth/2.0/token"
var resp base.TokenResp
@ -79,11 +87,11 @@ func (d *BaiduPhoto) refreshToken() error {
return nil
}
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
func (d *BaiduPhoto) Get(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
return d.Request(furl, http.MethodGet, callback, resp)
}
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
func (d *BaiduPhoto) Post(furl string, callback base.ReqCallback, resp interface{}) (*resty.Response, error) {
return d.Request(furl, http.MethodPost, callback, resp)
}
@ -223,7 +231,7 @@ func (d *BaiduPhoto) DeleteAlbum(ctx context.Context, album *Album) error {
r.SetFormData(map[string]string{
"album_id": album.AlbumID,
"tid": fmt.Sprint(album.Tid),
"delete_origin_image": "0", // 是否删除原图 0 不删除 1 删除
"delete_origin_image": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
})
}, nil)
return err
@ -237,7 +245,7 @@ func (d *BaiduPhoto) DeleteAlbumFile(ctx context.Context, file *AlbumFile) error
"album_id": fmt.Sprint(file.AlbumID),
"tid": fmt.Sprint(file.Tid),
"list": fmt.Sprintf(`[{"fsid":%d,"uk":%d}]`, file.Fsid, file.Uk),
"del_origin": "0", // 是否删除原图 0 不删除 1 删除
"del_origin": BoolToIntStr(d.DeleteOrigin), // 是否删除原图 0 不删除 1 删除
})
}, nil)
return err
@ -391,6 +399,49 @@ func (d *BaiduPhoto) linkFile(ctx context.Context, file *File, args model.LinkAr
return link, nil
}
/*func (d *BaiduPhoto) linkStreamAlbum(ctx context.Context, file *AlbumFile) (*model.Link, error) {
return &model.Link{
Header: http.Header{},
Writer: func(w io.Writer) error {
res, err := d.Get(ALBUM_API_URL+"/streaming", func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"fsid": fmt.Sprint(file.Fsid),
"album_id": file.AlbumID,
"tid": fmt.Sprint(file.Tid),
"uk": fmt.Sprint(file.Uk),
}).SetDoNotParseResponse(true)
}, nil)
if err != nil {
return err
}
defer res.RawBody().Close()
_, err = io.Copy(w, res.RawBody())
return err
},
}, nil
}*/
/*func (d *BaiduPhoto) linkStream(ctx context.Context, file *File) (*model.Link, error) {
return &model.Link{
Header: http.Header{},
Writer: func(w io.Writer) error {
res, err := d.Get(FILE_API_URL_V1+"/streaming", func(r *resty.Request) {
r.SetContext(ctx)
r.SetQueryParams(map[string]string{
"fsid": fmt.Sprint(file.Fsid),
}).SetDoNotParseResponse(true)
}, nil)
if err != nil {
return err
}
defer res.RawBody().Close()
_, err = io.Copy(w, res.RawBody())
return err
},
}, nil
}*/
// 获取uk
func (d *BaiduPhoto) uInfo() (*UInfo, error) {
var info UInfo

31
drivers/base/upload.go Normal file
View File

@ -0,0 +1,31 @@
package base
import (
"fmt"
"strings"
"time"
"github.com/Xhofe/go-cache"
"github.com/alist-org/alist/v3/internal/driver"
)
// storage upload progress, for upload recovery
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
// Save upload progress for 20 minutes
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
return UploadStateCache.Set(
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
state,
cache.WithEx[any](time.Minute*20))
}
// An upload progress can only be made by one process alone,
// so here you need to get it and then delete it.
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
if ok {
state, ok = v.(T)
}
return
}

View File

@ -1,30 +1 @@
package base
import (
"io"
"net/http"
"strconv"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
)
func HandleRange(link *model.Link, file io.ReadSeekCloser, header http.Header, size int64) {
if header.Get("Range") != "" {
r, err := http_range.ParseRange(header.Get("Range"), size)
if err == nil && len(r) > 0 {
_, err := file.Seek(r[0].Start, io.SeekStart)
if err == nil {
link.Data = utils.NewLimitReadCloser(file, func() error {
return file.Close()
}, r[0].Length)
link.Status = http.StatusPartialContent
link.Header = http.Header{
"Content-Range": []string{r[0].ContentRange(size)},
"Content-Length": []string{strconv.FormatInt(r[0].Length, 10)},
}
}
}
}
}

297
drivers/chaoxing/driver.go Normal file
View File

@ -0,0 +1,297 @@
package chaoxing
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"mime/multipart"
"net/http"
"net/url"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/cron"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
"google.golang.org/appengine/log"
)
type ChaoXing struct {
model.Storage
Addition
cron *cron.Cron
config driver.Config
conf Conf
}
func (d *ChaoXing) Config() driver.Config {
return d.config
}
func (d *ChaoXing) GetAddition() driver.Additional {
return &d.Addition
}
func (d *ChaoXing) refreshCookie() error {
cookie, err := d.Login()
if err != nil {
d.Status = err.Error()
op.MustSaveDriverStorage(d)
return nil
}
d.Addition.Cookie = cookie
op.MustSaveDriverStorage(d)
return nil
}
func (d *ChaoXing) Init(ctx context.Context) error {
err := d.refreshCookie()
if err != nil {
log.Errorf(ctx, err.Error())
}
d.cron = cron.NewCron(time.Hour * 12)
d.cron.Do(func() {
err = d.refreshCookie()
if err != nil {
log.Errorf(ctx, err.Error())
}
})
return nil
}
func (d *ChaoXing) Drop(ctx context.Context) error {
d.cron.Stop()
return nil
}
func (d *ChaoXing) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
files, err := d.GetFiles(dir.GetID())
if err != nil {
return nil, err
}
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
return fileToObj(src), nil
})
}
func (d *ChaoXing) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
var resp DownResp
ua := d.conf.ua
fileId := strings.Split(file.GetID(), "$")[1]
_, err := d.requestDownload("/screen/note_note/files/status/"+fileId, http.MethodPost, func(req *resty.Request) {
req.SetHeader("User-Agent", ua)
}, &resp)
if err != nil {
return nil, err
}
u := resp.Download
return &model.Link{
URL: u,
Header: http.Header{
"Cookie": []string{d.Cookie},
"Referer": []string{d.conf.referer},
"User-Agent": []string{ua},
},
Concurrency: 2,
PartSize: 10 * utils.MB,
}, nil
}
func (d *ChaoXing) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"name": dirName,
"pid": parentDir.GetID(),
}
var resp ListFileResp
_, err := d.request("/pc/resource/addResourceFolder", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return err
}
if resp.Result != 1 {
msg := fmt.Sprintf("error:%s", resp.Msg)
return errors.New(msg)
}
return nil
}
func (d *ChaoXing) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"folderIds": srcObj.GetID(),
"targetId": dstDir.GetID(),
}
if !srcObj.IsDir() {
query = map[string]string{
"bbsid": d.Addition.Bbsid,
"recIds": strings.Split(srcObj.GetID(), "$")[0],
"targetId": dstDir.GetID(),
}
}
var resp ListFileResp
_, err := d.request("/pc/resource/moveResource", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return err
}
if !resp.Status {
msg := fmt.Sprintf("error:%s", resp.Msg)
return errors.New(msg)
}
return nil
}
func (d *ChaoXing) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"folderId": srcObj.GetID(),
"name": newName,
}
path := "/pc/resource/updateResourceFolderName"
if !srcObj.IsDir() {
// path = "/pc/resource/updateResourceFileName"
// query = map[string]string{
// "bbsid": d.Addition.Bbsid,
// "recIds": strings.Split(srcObj.GetID(), "$")[0],
// "name": newName,
// }
return errors.New("此网盘不支持修改文件名")
}
var resp ListFileResp
_, err := d.request(path, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return err
}
if resp.Result != 1 {
msg := fmt.Sprintf("error:%s", resp.Msg)
return errors.New(msg)
}
return nil
}
func (d *ChaoXing) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// TODO copy obj, optional
return errs.NotImplement
}
func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"folderIds": obj.GetID(),
}
path := "/pc/resource/deleteResourceFolder"
var resp ListFileResp
if !obj.IsDir() {
path = "/pc/resource/deleteResourceFile"
query = map[string]string{
"bbsid": d.Addition.Bbsid,
"recIds": strings.Split(obj.GetID(), "$")[0],
}
}
_, err := d.request(path, http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return err
}
if resp.Result != 1 {
msg := fmt.Sprintf("error:%s", resp.Msg)
return errors.New(msg)
}
return nil
}
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
var resp UploadDataRsp
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
}, &resp)
if err != nil {
return err
}
if resp.Result != 1 {
return errors.New("get upload data error")
}
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
filePart, err := writer.CreateFormFile("file", stream.GetName())
if err != nil {
return err
}
_, err = io.Copy(filePart, stream)
if err != nil {
return err
}
err = writer.WriteField("_token", resp.Msg.Token)
if err != nil {
return err
}
err = writer.WriteField("puid", fmt.Sprintf("%d", resp.Msg.Puid))
if err != nil {
fmt.Println("Error writing param2 to request body:", err)
return err
}
err = writer.Close()
if err != nil {
return err
}
req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
if err != nil {
return err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resps, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resps.Body.Close()
bodys, err := io.ReadAll(resps.Body)
if err != nil {
return err
}
var fileRsp UploadFileDataRsp
err = json.Unmarshal(bodys, &fileRsp)
if err != nil {
return err
}
if fileRsp.Msg != "success" {
return errors.New(fileRsp.Msg)
}
uploadDoneParam := UploadDoneParam{Key: fileRsp.ObjectID, Cataid: "100000019", Param: fileRsp.Data}
params, err := json.Marshal(uploadDoneParam)
if err != nil {
return err
}
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"pid": dstDir.GetID(),
"type": "yunpan",
"params": url.QueryEscape("[" + string(params) + "]"),
}
var respd ListFileResp
_, err = d.request("/pc/resource/addResource", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &respd)
if err != nil {
return err
}
if respd.Result != 1 {
msg := fmt.Sprintf("error:%v", resp.Msg)
return errors.New(msg)
}
return nil
}
var _ driver.Driver = (*ChaoXing)(nil)

47
drivers/chaoxing/meta.go Normal file
View File

@ -0,0 +1,47 @@
package chaoxing
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
// 此程序挂载的是超星小组网盘,需要代理才能使用;
// 登录超星后进入个人空间,进入小组,新建小组,点击进去。
// url中就有bbsid的参数系统限制单文件大小2G没有总容量限制
type Addition struct {
// 超星用户名及密码
UserName string `json:"user_name" required:"true"`
Password string `json:"password" required:"true"`
// 从自己新建的小组url里获取
Bbsid string `json:"bbsid" required:"true"`
driver.RootID
// 可不填,程序会自动登录获取
Cookie string `json:"cookie"`
}
type Conf struct {
ua string
referer string
api string
DowloadApi string
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &ChaoXing{
config: driver.Config{
Name: "ChaoXingGroupDrive",
OnlyProxy: true,
OnlyLocal: false,
DefaultRoot: "-1",
NoOverwriteUpload: true,
},
conf: Conf{
ua: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) quark-cloud-drive/2.5.20 Chrome/100.0.4896.160 Electron/18.3.5.4-b478491100 Safari/537.36 Channel/pckk_other_ch",
referer: "https://chaoxing.com/",
api: "https://groupweb.chaoxing.com",
DowloadApi: "https://noteyd.chaoxing.com",
},
}
})
}

263
drivers/chaoxing/types.go Normal file
View File

@ -0,0 +1,263 @@
package chaoxing
import (
"fmt"
"time"
"github.com/alist-org/alist/v3/internal/model"
)
type Resp struct {
Result int `json:"result"`
}
type UserAuth struct {
GroupAuth struct {
AddData int `json:"addData"`
AddDataFolder int `json:"addDataFolder"`
AddLebel int `json:"addLebel"`
AddManager int `json:"addManager"`
AddMem int `json:"addMem"`
AddTopicFolder int `json:"addTopicFolder"`
AnonymousAddReply int `json:"anonymousAddReply"`
AnonymousAddTopic int `json:"anonymousAddTopic"`
BatchOperation int `json:"batchOperation"`
DelData int `json:"delData"`
DelDataFolder int `json:"delDataFolder"`
DelMem int `json:"delMem"`
DelTopicFolder int `json:"delTopicFolder"`
Dismiss int `json:"dismiss"`
ExamEnc string `json:"examEnc"`
GroupChat int `json:"groupChat"`
IsShowCircleChatButton int `json:"isShowCircleChatButton"`
IsShowCircleCloudButton int `json:"isShowCircleCloudButton"`
IsShowCompanyButton int `json:"isShowCompanyButton"`
Join int `json:"join"`
MemberShowRankSet int `json:"memberShowRankSet"`
ModifyDataFolder int `json:"modifyDataFolder"`
ModifyExpose int `json:"modifyExpose"`
ModifyName int `json:"modifyName"`
ModifyShowPic int `json:"modifyShowPic"`
ModifyTopicFolder int `json:"modifyTopicFolder"`
ModifyVisibleState int `json:"modifyVisibleState"`
OnlyMgrScoreSet int `json:"onlyMgrScoreSet"`
Quit int `json:"quit"`
SendNotice int `json:"sendNotice"`
ShowActivityManage int `json:"showActivityManage"`
ShowActivitySet int `json:"showActivitySet"`
ShowAttentionSet int `json:"showAttentionSet"`
ShowAutoClearStatus int `json:"showAutoClearStatus"`
ShowBarcode int `json:"showBarcode"`
ShowChatRoomSet int `json:"showChatRoomSet"`
ShowCircleActivitySet int `json:"showCircleActivitySet"`
ShowCircleSet int `json:"showCircleSet"`
ShowCmem int `json:"showCmem"`
ShowDataFolder int `json:"showDataFolder"`
ShowDelReason int `json:"showDelReason"`
ShowForward int `json:"showForward"`
ShowGroupChat int `json:"showGroupChat"`
ShowGroupChatSet int `json:"showGroupChatSet"`
ShowGroupSquareSet int `json:"showGroupSquareSet"`
ShowLockAddSet int `json:"showLockAddSet"`
ShowManager int `json:"showManager"`
ShowManagerIdentitySet int `json:"showManagerIdentitySet"`
ShowNeedDelReasonSet int `json:"showNeedDelReasonSet"`
ShowNotice int `json:"showNotice"`
ShowOnlyManagerReplySet int `json:"showOnlyManagerReplySet"`
ShowRank int `json:"showRank"`
ShowRank2 int `json:"showRank2"`
ShowRecycleBin int `json:"showRecycleBin"`
ShowReplyByClass int `json:"showReplyByClass"`
ShowReplyNeedCheck int `json:"showReplyNeedCheck"`
ShowSignbanSet int `json:"showSignbanSet"`
ShowSpeechSet int `json:"showSpeechSet"`
ShowTopicCheck int `json:"showTopicCheck"`
ShowTopicNeedCheck int `json:"showTopicNeedCheck"`
ShowTransferSet int `json:"showTransferSet"`
} `json:"groupAuth"`
OperationAuth struct {
Add int `json:"add"`
AddTopicToFolder int `json:"addTopicToFolder"`
ChoiceSet int `json:"choiceSet"`
DelTopicFromFolder int `json:"delTopicFromFolder"`
Delete int `json:"delete"`
Reply int `json:"reply"`
ScoreSet int `json:"scoreSet"`
TopSet int `json:"topSet"`
Update int `json:"update"`
} `json:"operationAuth"`
}
type File struct {
Cataid int `json:"cataid"`
Cfid int `json:"cfid"`
Content struct {
Cfid int `json:"cfid"`
Pid int `json:"pid"`
FolderName string `json:"folderName"`
ShareType int `json:"shareType"`
Preview string `json:"preview"`
Filetype string `json:"filetype"`
PreviewURL string `json:"previewUrl"`
IsImg bool `json:"isImg"`
ParentPath string `json:"parentPath"`
Icon string `json:"icon"`
Suffix string `json:"suffix"`
Duration int `json:"duration"`
Pantype string `json:"pantype"`
Puid int `json:"puid"`
Filepath string `json:"filepath"`
Crc string `json:"crc"`
Isfile bool `json:"isfile"`
Residstr string `json:"residstr"`
ObjectID string `json:"objectId"`
Extinfo string `json:"extinfo"`
Thumbnail string `json:"thumbnail"`
Creator int `json:"creator"`
ResTypeValue int `json:"resTypeValue"`
UploadDateFormat string `json:"uploadDateFormat"`
DisableOpt bool `json:"disableOpt"`
DownPath string `json:"downPath"`
Sort int `json:"sort"`
Topsort int `json:"topsort"`
Restype string `json:"restype"`
Size int `json:"size"`
UploadDate string `json:"uploadDate"`
FileSize string `json:"fileSize"`
Name string `json:"name"`
FileID string `json:"fileId"`
} `json:"content"`
CreatorID int `json:"creatorId"`
DesID string `json:"des_id"`
ID int `json:"id"`
Inserttime int64 `json:"inserttime"`
Key string `json:"key"`
Norder int `json:"norder"`
OwnerID int `json:"ownerId"`
OwnerType int `json:"ownerType"`
Path string `json:"path"`
Rid int `json:"rid"`
Status int `json:"status"`
Topsign int `json:"topsign"`
}
type ListFileResp struct {
Msg string `json:"msg"`
Result int `json:"result"`
Status bool `json:"status"`
UserAuth UserAuth `json:"userAuth"`
List []File `json:"list"`
}
type DownResp struct {
Msg string `json:"msg"`
Duration int `json:"duration"`
Download string `json:"download"`
FileStatus string `json:"fileStatus"`
URL string `json:"url"`
Status bool `json:"status"`
}
type UploadDataRsp struct {
Result int `json:"result"`
Msg struct {
Puid int `json:"puid"`
Token string `json:"token"`
} `json:"msg"`
}
type UploadFileDataRsp struct {
Result bool `json:"result"`
Msg string `json:"msg"`
Crc string `json:"crc"`
ObjectID string `json:"objectId"`
Resid int64 `json:"resid"`
Puid int `json:"puid"`
Data struct {
DisableOpt bool `json:"disableOpt"`
Resid int64 `json:"resid"`
Crc string `json:"crc"`
Puid int `json:"puid"`
Isfile bool `json:"isfile"`
Pantype string `json:"pantype"`
Size int `json:"size"`
Name string `json:"name"`
ObjectID string `json:"objectId"`
Restype string `json:"restype"`
UploadDate time.Time `json:"uploadDate"`
ModifyDate time.Time `json:"modifyDate"`
UploadDateFormat string `json:"uploadDateFormat"`
Residstr string `json:"residstr"`
Suffix string `json:"suffix"`
Preview string `json:"preview"`
Thumbnail string `json:"thumbnail"`
Creator int `json:"creator"`
Duration int `json:"duration"`
IsImg bool `json:"isImg"`
PreviewURL string `json:"previewUrl"`
Filetype string `json:"filetype"`
Filepath string `json:"filepath"`
Sort int `json:"sort"`
Topsort int `json:"topsort"`
ResTypeValue int `json:"resTypeValue"`
Extinfo string `json:"extinfo"`
} `json:"data"`
}
type UploadDoneParam struct {
Cataid string `json:"cataid"`
Key string `json:"key"`
Param struct {
DisableOpt bool `json:"disableOpt"`
Resid int64 `json:"resid"`
Crc string `json:"crc"`
Puid int `json:"puid"`
Isfile bool `json:"isfile"`
Pantype string `json:"pantype"`
Size int `json:"size"`
Name string `json:"name"`
ObjectID string `json:"objectId"`
Restype string `json:"restype"`
UploadDate time.Time `json:"uploadDate"`
ModifyDate time.Time `json:"modifyDate"`
UploadDateFormat string `json:"uploadDateFormat"`
Residstr string `json:"residstr"`
Suffix string `json:"suffix"`
Preview string `json:"preview"`
Thumbnail string `json:"thumbnail"`
Creator int `json:"creator"`
Duration int `json:"duration"`
IsImg bool `json:"isImg"`
PreviewURL string `json:"previewUrl"`
Filetype string `json:"filetype"`
Filepath string `json:"filepath"`
Sort int `json:"sort"`
Topsort int `json:"topsort"`
ResTypeValue int `json:"resTypeValue"`
Extinfo string `json:"extinfo"`
} `json:"param"`
}
func fileToObj(f File) *model.Object {
if len(f.Content.FolderName) > 0 {
return &model.Object{
ID: fmt.Sprintf("%d", f.ID),
Name: f.Content.FolderName,
Size: 0,
Modified: time.UnixMilli(f.Inserttime),
IsFolder: true,
}
}
paserTime, err := time.Parse("2006-01-02 15:04", f.Content.UploadDate)
if err != nil {
paserTime = time.Now()
}
return &model.Object{
ID: fmt.Sprintf("%d$%s", f.ID, f.Content.FileID),
Name: f.Content.Name,
Size: int64(f.Content.Size),
Modified: paserTime,
IsFolder: false,
}
}

179
drivers/chaoxing/util.go Normal file
View File

@ -0,0 +1,179 @@
package chaoxing
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"errors"
"fmt"
"mime/multipart"
"net/http"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/go-resty/resty/v2"
)
func (d *ChaoXing) requestDownload(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
u := d.conf.DowloadApi + pathname
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"Accept": "application/json, text/plain, */*",
"Referer": d.conf.referer,
})
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e Resp
req.SetError(&e)
res, err := req.Execute(method, u)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *ChaoXing) request(pathname string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
u := d.conf.api + pathname
if strings.Contains(pathname, "getUploadConfig") {
u = pathname
}
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Cookie": d.Cookie,
"Accept": "application/json, text/plain, */*",
"Referer": d.conf.referer,
})
if callback != nil {
callback(req)
}
if resp != nil {
req.SetResult(resp)
}
var e Resp
req.SetError(&e)
res, err := req.Execute(method, u)
if err != nil {
return nil, err
}
return res.Body(), nil
}
func (d *ChaoXing) GetFiles(parent string) ([]File, error) {
files := make([]File, 0)
query := map[string]string{
"bbsid": d.Addition.Bbsid,
"folderId": parent,
"recType": "1",
}
var resp ListFileResp
_, err := d.request("/pc/resource/getResourceList", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(query)
}, &resp)
if err != nil {
return nil, err
}
if resp.Result != 1 {
msg:=fmt.Sprintf("error code is:%d", resp.Result)
return nil, errors.New(msg)
}
if len(resp.List) > 0 {
files = append(files, resp.List...)
}
querys := map[string]string{
"bbsid": d.Addition.Bbsid,
"folderId": parent,
"recType": "2",
}
var resps ListFileResp
_, err = d.request("/pc/resource/getResourceList", http.MethodGet, func(req *resty.Request) {
req.SetQueryParams(querys)
}, &resps)
if err != nil {
return nil, err
}
if len(resps.List) > 0 {
files = append(files, resps.List...)
}
return files, nil
}
func EncryptByAES(message, key string) (string, error) {
aesKey := []byte(key)
plainText := []byte(message)
block, err := aes.NewCipher(aesKey)
if err != nil {
return "", err
}
iv := aesKey[:aes.BlockSize]
mode := cipher.NewCBCEncrypter(block, iv)
padding := aes.BlockSize - len(plainText)%aes.BlockSize
paddedText := append(plainText, byte(padding))
for i := 0; i < padding-1; i++ {
paddedText = append(paddedText, byte(padding))
}
ciphertext := make([]byte, len(paddedText))
mode.CryptBlocks(ciphertext, paddedText)
encrypted := base64.StdEncoding.EncodeToString(ciphertext)
return encrypted, nil
}
func CookiesToString(cookies []*http.Cookie) string {
var cookieStr string
for _, cookie := range cookies {
cookieStr += cookie.Name + "=" + cookie.Value + "; "
}
if len(cookieStr) > 2 {
cookieStr = cookieStr[:len(cookieStr)-2]
}
return cookieStr
}
func (d *ChaoXing) Login() (string, error) {
transferKey := "u2oh6Vu^HWe4_AES"
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
uname, err := EncryptByAES(d.Addition.UserName, transferKey)
if err != nil {
return "", err
}
password, err := EncryptByAES(d.Addition.Password, transferKey)
if err != nil {
return "", err
}
err = writer.WriteField("uname", uname)
if err != nil {
return "", err
}
err = writer.WriteField("password", password)
if err != nil {
return "", err
}
err = writer.WriteField("t", "true")
if err != nil {
return "", err
}
err = writer.Close()
if err != nil {
return "", err
}
// Create the request
req, err := http.NewRequest("POST", "https://passport2.chaoxing.com/fanyalogin", body)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len()))
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
return CookiesToString(resp.Cookies()), nil
}

View File

@ -49,7 +49,19 @@ func (d *Cloudreve) List(ctx context.Context, dir model.Obj, args model.ListArgs
}
return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) {
return objectToObj(src), nil
thumb, err := d.GetThumb(src)
if err != nil {
return nil, err
}
if src.Type == "dir" && d.EnableThumbAndFolderSize {
var dprop DirectoryProp
err = d.request(http.MethodGet, "/object/property/"+src.Id+"?is_folder=true", nil, &dprop)
if err != nil {
return nil, err
}
src.Size = dprop.Size
}
return objectToObj(src, thumb), nil
})
}
@ -115,7 +127,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if stream.GetReadCloser() == http.NoBody {
if io.ReadCloser(stream) == http.NoBody {
return d.create(ctx, dstDir, stream)
}
var r DirectoryResp

View File

@ -13,6 +13,8 @@ type Addition struct {
Username string `json:"username"`
Password string `json:"password"`
Cookie string `json:"cookie"`
CustomUA string `json:"custom_ua"`
EnableThumbAndFolderSize bool `json:"enable_thumb_and_folder_size"`
}
var config = driver.Config{

View File

@ -44,13 +44,20 @@ type Object struct {
SourceEnabled bool `json:"source_enabled"`
}
func objectToObj(f Object) *model.Object {
return &model.Object{
type DirectoryProp struct {
Size int `json:"size"`
}
func objectToObj(f Object, t model.Thumbnail) *model.ObjThumb {
return &model.ObjThumb{
Object: model.Object{
ID: f.Id,
Name: f.Name,
Size: int64(f.Size),
Modified: f.Date,
IsFolder: f.Type == "dir",
},
Thumbnail: t,
}
}

View File

@ -22,15 +22,18 @@ const loginPath = "/user/session"
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
u := d.Address + "/api/v3" + path
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "application/json, text/plain, */*",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
"User-Agent": ua,
})
var r Resp
req.SetResult(&r)
if callback != nil {
@ -146,3 +149,26 @@ func convertSrc(obj model.Obj) map[string]interface{} {
m["items"] = items
return m
}
func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
if !d.Addition.EnableThumbAndFolderSize {
return model.Thumbnail{}, nil
}
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.NoRedirectClient.R()
req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"User-Agent": ua,
})
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
if err != nil {
return model.Thumbnail{}, err
}
return model.Thumbnail{
Thumbnail: resp.Header().Get("Location"),
}, nil
}

408
drivers/crypt/driver.go Normal file
View File

@ -0,0 +1,408 @@
package crypt
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
stdpath "path"
"regexp"
"strings"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
rcCrypt "github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
log "github.com/sirupsen/logrus"
)
type Crypt struct {
model.Storage
Addition
cipher *rcCrypt.Cipher
remoteStorage driver.Driver
}
const obfuscatedPrefix = "___Obfuscated___"
func (d *Crypt) Config() driver.Config {
return config
}
func (d *Crypt) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Crypt) Init(ctx context.Context) error {
//obfuscate credentials if it's updated or just created
err := d.updateObfusParm(&d.Password)
if err != nil {
return fmt.Errorf("failed to obfuscate password: %w", err)
}
err = d.updateObfusParm(&d.Salt)
if err != nil {
return fmt.Errorf("failed to obfuscate salt: %w", err)
}
isCryptExt := regexp.MustCompile(`^[.][A-Za-z0-9-_]{2,}$`).MatchString
if !isCryptExt(d.EncryptedSuffix) {
return fmt.Errorf("EncryptedSuffix is Illegal")
}
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
op.MustSaveDriverStorage(d)
//need remote storage exist
storage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{})
if err != nil {
return fmt.Errorf("can't find remote storage: %w", err)
}
d.remoteStorage = storage
p, _ := strings.CutPrefix(d.Password, obfuscatedPrefix)
p2, _ := strings.CutPrefix(d.Salt, obfuscatedPrefix)
config := configmap.Simple{
"password": p,
"password2": p2,
"filename_encryption": d.FileNameEnc,
"directory_name_encryption": d.DirNameEnc,
"filename_encoding": d.FileNameEncoding,
"suffix": d.EncryptedSuffix,
"pass_bad_blocks": "",
}
c, err := rcCrypt.NewCipher(config)
if err != nil {
return fmt.Errorf("failed to create Cipher: %w", err)
}
d.cipher = c
return nil
}
func (d *Crypt) updateObfusParm(str *string) error {
temp := *str
if !strings.HasPrefix(temp, obfuscatedPrefix) {
temp, err := obscure.Obscure(temp)
if err != nil {
return err
}
temp = obfuscatedPrefix + temp
*str = temp
}
return nil
}
func (d *Crypt) Drop(ctx context.Context) error {
return nil
}
func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
//return d.list(ctx, d.RemotePath, path)
//remoteFull
objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true})
// the obj must implement the model.SetPath interface
// return objs, err
if err != nil {
return nil, err
}
var result []model.Obj
for _, obj := range objs {
if obj.IsDir() {
name, err := d.cipher.DecryptDirName(obj.GetName())
if err != nil {
//filter illegal files
continue
}
if !d.ShowHidden && strings.HasPrefix(name, ".") {
continue
}
objRes := model.Object{
Name: name,
Size: 0,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
}
result = append(result, &objRes)
} else {
thumb, ok := model.GetThumb(obj)
size, err := d.cipher.DecryptedSize(obj.GetSize())
if err != nil {
//filter illegal files
continue
}
name, err := d.cipher.DecryptFileName(obj.GetName())
if err != nil {
//filter illegal files
continue
}
if !d.ShowHidden && strings.HasPrefix(name, ".") {
continue
}
objRes := model.Object{
Name: name,
Size: size,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
}
if d.Thumbnail && thumb == "" {
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
}
if !ok && !d.Thumbnail {
result = append(result, &objRes)
} else {
objWithThumb := model.ObjThumb{
Object: objRes,
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
},
}
result = append(result, &objWithThumb)
}
}
}
return result, nil
}
func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) {
if utils.PathEqual(path, "/") {
return &model.Object{
Name: "Root",
IsFolder: true,
Path: "/",
}, nil
}
remoteFullPath := ""
var remoteObj model.Obj
var err, err2 error
firstTryIsFolder, secondTry := guessPath(path)
remoteFullPath = d.getPathForRemote(path, firstTryIsFolder)
remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
if err != nil {
if errs.IsObjectNotFound(err) && secondTry {
//try the opposite
remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder)
remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true})
if err2 != nil {
return nil, err2
}
} else {
return nil, err
}
}
var size int64 = 0
name := ""
if !remoteObj.IsDir() {
size, err = d.cipher.DecryptedSize(remoteObj.GetSize())
if err != nil {
log.Warnf("DecryptedSize failed for %s ,will use original size, err:%s", path, err)
size = remoteObj.GetSize()
}
name, err = d.cipher.DecryptFileName(remoteObj.GetName())
if err != nil {
log.Warnf("DecryptFileName failed for %s ,will use original name, err:%s", path, err)
name = remoteObj.GetName()
}
} else {
name, err = d.cipher.DecryptDirName(remoteObj.GetName())
if err != nil {
log.Warnf("DecryptDirName failed for %s ,will use original name, err:%s", path, err)
name = remoteObj.GetName()
}
}
obj := &model.Object{
Path: path,
Name: name,
Size: size,
Modified: remoteObj.ModTime(),
IsFolder: remoteObj.IsDir(),
}
return obj, nil
//return nil, errs.ObjectNotFound
}
func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
dstDirActualPath, err := d.getActualPathForRemote(file.GetPath(), false)
if err != nil {
return nil, fmt.Errorf("failed to convert path to remote path: %w", err)
}
remoteLink, remoteFile, err := op.Link(ctx, d.remoteStorage, dstDirActualPath, args)
if err != nil {
return nil, err
}
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
}
remoteFileSize := remoteFile.GetSize()
remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
length := underlyingLength
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
length = -1
}
rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
if err != nil {
return nil, err
}
rrc = converted
}
if rrc != nil {
//remoteRangeReader, err :=
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
remoteClosers.AddClosers(rrc.GetClosers())
if err != nil {
return nil, err
}
return remoteReader, nil
}
if remoteLink.MFile != nil {
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
if err != nil {
return nil, err
}
//remoteClosers.Add(remoteLink.MFile)
//keep reuse same MFile and close at last.
remoteClosers.Add(remoteLink.MFile)
return io.NopCloser(remoteLink.MFile), nil
}
return nil, errs.NotSupport
}
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil {
return nil, err
}
return readSeeker, nil
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{
Header: remoteLink.Header,
RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration,
}
return resultLink, nil
}
func (d *Crypt) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
dstDirActualPath, err := d.getActualPathForRemote(parentDir.GetPath(), true)
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
dir := d.cipher.EncryptDirName(dirName)
return op.MakeDir(ctx, d.remoteStorage, stdpath.Join(dstDirActualPath, dir))
}
func (d *Crypt) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
return op.Move(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
}
func (d *Crypt) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
remoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
var newEncryptedName string
if srcObj.IsDir() {
newEncryptedName = d.cipher.EncryptDirName(newName)
} else {
newEncryptedName = d.cipher.EncryptFileName(newName)
}
return op.Rename(ctx, d.remoteStorage, remoteActualPath, newEncryptedName)
}
func (d *Crypt) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
srcRemoteActualPath, err := d.getActualPathForRemote(srcObj.GetPath(), srcObj.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
dstRemoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), dstDir.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
return op.Copy(ctx, d.remoteStorage, srcRemoteActualPath, dstRemoteActualPath)
}
func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
remoteActualPath, err := d.getActualPathForRemote(obj.GetPath(), obj.IsDir())
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
}
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
// Encrypt the data into wrappedIn
wrappedIn, err := d.cipher.EncryptData(streamer)
if err != nil {
return fmt.Errorf("failed to EncryptData: %w", err)
}
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
streamOut := &stream.FileStream{
Obj: &model.Object{
ID: streamer.GetID(),
Path: streamer.GetPath(),
Name: d.cipher.EncryptFileName(streamer.GetName()),
Size: d.cipher.EncryptedSize(streamer.GetSize()),
Modified: streamer.ModTime(),
IsFolder: streamer.IsDir(),
},
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: streamer.NeedStore(),
Exist: streamer.GetExist(),
}
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
if err != nil {
return err
}
return nil
}
//func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Crypt)(nil)

46
drivers/crypt/meta.go Normal file
View File

@ -0,0 +1,46 @@
package crypt
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
//driver.RootPath
//driver.RootID
// define other
FileNameEnc string `json:"filename_encryption" type:"select" required:"true" options:"off,standard,obfuscate" default:"off"`
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"`
}
var config = driver.Config{
Name: "Crypt",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: true,
NoCache: true,
NoUpload: false,
NeedMs: false,
DefaultRoot: "/",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Crypt{}
})
}

1
drivers/crypt/types.go Normal file
View File

@ -0,0 +1 @@
package crypt

44
drivers/crypt/util.go Normal file
View File

@ -0,0 +1,44 @@
package crypt
import (
stdpath "path"
"path/filepath"
"strings"
"github.com/alist-org/alist/v3/internal/op"
)
// will give the best guessing based on the path
func guessPath(path string) (isFolder, secondTry bool) {
if strings.HasSuffix(path, "/") {
//confirmed a folder
return true, false
}
lastSlash := strings.LastIndex(path, "/")
if strings.Index(path[lastSlash:], ".") < 0 {
//no dot, try folder then try file
return true, true
}
return false, true
}
func (d *Crypt) getPathForRemote(path string, isFolder bool) (remoteFullPath string) {
if isFolder && !strings.HasSuffix(path, "/") {
path = path + "/"
}
dir, fileName := filepath.Split(path)
remoteDir := d.cipher.EncryptDirName(dir)
remoteFileName := ""
if len(strings.TrimSpace(fileName)) > 0 {
remoteFileName = d.cipher.EncryptFileName(fileName)
}
return stdpath.Join(d.RemotePath, remoteDir, remoteFileName)
}
// actual path is used for internal only. any link for user should come from remoteFullPath
func (d *Crypt) getActualPathForRemote(path string, isFolder bool) (string, error) {
_, remoteActualPath, err := op.GetStorageAndActualPath(d.getPathForRemote(path, isFolder))
return remoteActualPath, err
}

View File

@ -203,7 +203,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
_ = res.Body.Close()
if count > 0 {
up((i + 1) * 100 / count)
up(float64(i+1) * 100 / float64(count))
}
offset += byteSize

View File

@ -4,7 +4,6 @@ import (
"context"
stdpath "path"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
@ -65,11 +64,10 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
return nil, err
}
r := NewFTPFileReader(d.conn, file.GetPath())
r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
link := &model.Link{
Data: r,
MFile: r,
}
base.HandleRange(link, r, args.Header, file.GetSize())
return link, nil
}

View File

@ -4,6 +4,7 @@ import (
"io"
"os"
"sync"
"sync/atomic"
"time"
"github.com/jlaffaye/ftp"
@ -30,43 +31,59 @@ func (d *FTP) login() error {
return nil
}
// An FTP file reader that implements io.ReadSeekCloser for seeking.
type FTPFileReader struct {
// FileReader An FTP file reader that implements io.MFile for seeking.
type FileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset int64
offset atomic.Int64
readAtOffset int64
mu sync.Mutex
path string
size int64
}
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
return &FTPFileReader{
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
return &FileReader{
conn: conn,
path: path,
size: size,
}
}
func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
func (r *FileReader) Read(buf []byte) (n int, err error) {
n, err = r.ReadAt(buf, r.offset.Load())
r.offset.Add(int64(n))
return
}
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.offset += int64(n)
r.readAtOffset += int64(n)
return
}
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
r.mu.Lock()
defer r.mu.Unlock()
oldOffset := r.offset
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
oldOffset := r.offset.Load()
var newOffset int64
switch whence {
case io.SeekStart:
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
case io.SeekCurrent:
newOffset = oldOffset + offset
case io.SeekEnd:
size, err := r.conn.FileSize(r.path)
if err != nil {
return oldOffset, err
}
newOffset = offset + int64(size)
return r.size, nil
default:
return -1, os.ErrInvalid
}
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
// offset not changed, so return directly
return oldOffset, nil
}
r.offset = newOffset
if r.resp != nil {
// close the existing ftp data connection, otherwise the next read will be blocked
_ = r.resp.Close() // we do not care about whether it returns an error
r.resp = nil
}
r.offset.Store(newOffset)
return newOffset, nil
}
func (r *FTPFileReader) Close() error {
func (r *FileReader) Close() error {
if r.resp != nil {
return r.resp.Close()
}

View File

@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
obj := stream.GetOld()
obj := stream.GetExist()
var (
e Error
url string
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
putUrl := res.Header().Get("location")
if stream.GetSize() < d.ChunkSize*1024*1024 {
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
@ -23,12 +24,17 @@ type File struct {
Name string `json:"name"`
MimeType string `json:"mimeType"`
ModifiedTime time.Time `json:"modifiedTime"`
CreatedTime time.Time `json:"createdTime"`
Size string `json:"size"`
ThumbnailLink string `json:"thumbnailLink"`
ShortcutDetails struct {
TargetId string `json:"targetId"`
TargetMimeType string `json:"targetMimeType"`
} `json:"shortcutDetails"`
MD5Checksum string `json:"md5Checksum"`
SHA1Checksum string `json:"sha1Checksum"`
SHA256Checksum string `json:"sha256Checksum"`
}
func fileToObj(f File) *model.ObjThumb {
@ -39,10 +45,18 @@ func fileToObj(f File) *model.ObjThumb {
ID: f.Id,
Name: f.Name,
Size: size,
Ctime: f.CreatedTime,
Modified: f.ModifiedTime,
IsFolder: f.MimeType == "application/vnd.google-apps.folder",
HashInfo: utils.NewHashInfoByMap(map[*utils.HashType]string{
utils.MD5: f.MD5Checksum,
utils.SHA1: f.SHA1Checksum,
utils.SHA256: f.SHA256Checksum,
}),
},
Thumbnail: model.Thumbnail{
Thumbnail: f.ThumbnailLink,
},
Thumbnail: model.Thumbnail{},
}
if f.MimeType == "application/vnd.google-apps.shortcut" {
obj.ID = f.ShortcutDetails.TargetId

View File

@ -5,14 +5,14 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strconv"
"time"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
@ -43,7 +43,7 @@ func (d *GoogleDrive) refreshToken() error {
gdsaFileThis := d.RefreshToken
if gdsaFile.IsDir() {
if len(d.ServiceAccountFileList) <= 0 {
gdsaReadDir, gdsaDirErr := ioutil.ReadDir(d.RefreshToken)
gdsaReadDir, gdsaDirErr := os.ReadDir(d.RefreshToken)
if gdsaDirErr != nil {
log.Error("read dir fail")
return gdsaDirErr
@ -75,7 +75,7 @@ func (d *GoogleDrive) refreshToken() error {
}
}
gdsaFileThisContent, err := ioutil.ReadFile(gdsaFileThis)
gdsaFileThisContent, err := os.ReadFile(gdsaFileThis)
if err != nil {
return err
}
@ -195,7 +195,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
}
query := map[string]string{
"orderBy": orderBy,
"fields": "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken",
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
//"includeItemsFromAllDrives": "true",
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
var finish int64 = 0
for finish < stream.GetSize() {
var offset int64 = 0
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - finish
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil {
return err
}
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
if err != nil {
return err
}
finish += chunkSize
offset += chunkSize
}
return nil
}

View File

@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
}
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
req.SetBody(stream).SetContext(ctx)
}, nil, postHeaders)
if err != nil {

View File

@ -2,9 +2,7 @@ package lanzou
import (
"context"
"fmt"
"net/http"
"regexp"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@ -19,6 +17,8 @@ type LanZou struct {
model.Storage
uid string
vei string
flag int32
}
func (d *LanZou) Config() driver.Config {
@ -30,16 +30,18 @@ func (d *LanZou) GetAddition() driver.Additional {
}
func (d *LanZou) Init(ctx context.Context) (err error) {
if d.IsCookie() {
switch d.Type {
case "account":
_, err := d.Login()
if err != nil {
return err
}
fallthrough
case "cookie":
if d.RootFolderID == "" {
d.RootFolderID = "-1"
}
ylogin := regexp.MustCompile("ylogin=(.*?);").FindStringSubmatch(d.Cookie)
if len(ylogin) < 2 {
return fmt.Errorf("cookie does not contain ylogin")
}
d.uid = ylogin[1]
d.vei, err = d.getVei()
d.vei, d.uid, err = d.getVeiAndUid()
}
return
}
@ -51,7 +53,7 @@ func (d *LanZou) Drop(ctx context.Context) error {
// 获取的大小和时间不准确
func (d *LanZou) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
return d.GetAllFiles(dir.GetID())
} else {
return d.GetFileOrFolderByShareUrl(dir.GetID(), d.SharePassword)
@ -119,7 +121,7 @@ func (d *LanZou) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
data, err := d.doupload(func(req *resty.Request) {
req.SetContext(ctx)
req.SetFormData(map[string]string{
@ -137,11 +139,11 @@ func (d *LanZou) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
FolID: utils.Json.Get(data, "text").ToString(),
}, nil
}
return nil, errs.NotImplement
return nil, errs.NotSupport
}
func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
if !srcObj.IsDir() {
_, err := d.doupload(func(req *resty.Request) {
req.SetContext(ctx)
@ -157,11 +159,11 @@ func (d *LanZou) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
return srcObj, nil
}
}
return nil, errs.NotImplement
return nil, errs.NotSupport
}
func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
if !srcObj.IsDir() {
_, err := d.doupload(func(req *resty.Request) {
req.SetContext(ctx)
@ -179,11 +181,11 @@ func (d *LanZou) Rename(ctx context.Context, srcObj model.Obj, newName string) (
return srcObj, nil
}
}
return nil, errs.NotImplement
return nil, errs.NotSupport
}
func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
_, err := d.doupload(func(req *resty.Request) {
req.SetContext(ctx)
if obj.IsDir() {
@ -200,13 +202,13 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
}, nil)
return err
}
return errs.NotImplement
return errs.NotSupport
}
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if d.IsCookie() {
if d.IsCookie() || d.IsAccount() {
var resp RespText[[]FileOrFolder]
_, err := d._post(d.BaseUrl+"/fileup.php", func(req *resty.Request) {
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
req.SetFormData(map[string]string{
"task": "1",
"vie": "2",
@ -221,5 +223,5 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
}
return &resp.Text[0], nil
}
return nil, errs.NotImplement
return nil, errs.NotSupport
}

View File

@ -3,6 +3,7 @@ package lanzou
import (
"bytes"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
@ -117,13 +118,102 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
// 根据key查询js变量
func findJSVarFunc(key, data string) string {
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
var values []string
if key != "sasign" {
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
} else {
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
if len(matches) == 3 {
values = matches[1]
} else {
if len(matches) > 0 {
values = matches[0]
}
}
}
if len(values) == 0 {
return ""
}
return values[1]
}
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
// 查找所有方法位置
func findJSFunctionIndex(data string, all bool) [][2]int {
findFunction := findFunction
if all {
findFunction = findFunctionAll
}
indexs := findFunction.FindAllStringIndex(data, -1)
fIndexs := make([][2]int, 0, len(indexs))
for _, index := range indexs {
if len(index) != 2 {
continue
}
count, data := 0, data[index[1]:]
for ii, v := range data {
if v == ' ' && count == 0 {
continue
}
if v == '{' {
count++
}
if v == '}' {
count--
}
if count == 0 {
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
break
}
}
}
return fIndexs
}
// 删除JS全局方法
func removeJSGlobalFunction(html string) string {
indexs := findJSFunctionIndex(html, false)
block := make([]string, len(indexs))
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
index := indexs[i]
block[i] = html[index[1]:next]
next = index[0]
}
return strings.Join(block, "")
}
// 根据名称获取方法
func getJSFunctionByName(html string, name string) (string, error) {
indexs := findJSFunctionIndex(html, true)
for _, index := range indexs {
data := html[index[0]:index[1]]
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
return data, nil
}
}
return "", fmt.Errorf("not find %s function", name)
}
// 解析html中的JSON,选择最长的数据
func htmlJsonToMap2(html string) (map[string]string, error) {
datas := findDataReg.FindAllStringSubmatch(html, -1)
var sData string
for _, data := range datas {
if len(datas) > 0 && len(data[1]) > len(sData) {
sData = data[1]
}
}
if sData == "" {
return nil, fmt.Errorf("not find data")
}
return jsonToMap(sData, html), nil
}
// 解析html中的JSON
func htmlJsonToMap(html string) (map[string]string, error) {
datas := findDataReg.FindStringSubmatch(html)
@ -190,3 +280,14 @@ func GetExpirationTime(url string) (etime time.Duration) {
etime = time.Duration(timestamp-time.Now().Unix()) * time.Second
return
}
func CookieToString(cookies []*http.Cookie) string {
if cookies == nil {
return ""
}
cookieStrings := make([]string, len(cookies))
for i, cookie := range cookies {
cookieStrings[i] = cookie.Name + "=" + cookie.Value
}
return strings.Join(cookieStrings, ";")
}

View File

@ -6,8 +6,13 @@ import (
)
type Addition struct {
Type string `json:"type" type:"select" options:"cookie,url" default:"cookie"`
Cookie string `json:"cookie" required:"true" help:"about 15 days valid, ignore if shareUrl is used"`
Type string `json:"type" type:"select" options:"account,cookie,url" default:"cookie"`
Account string `json:"account"`
Password string `json:"password"`
Cookie string `json:"cookie" help:"about 15 days valid, ignore if shareUrl is used"`
driver.RootID
SharePassword string `json:"share_password"`
BaseUrl string `json:"baseUrl" required:"true" default:"https://pc.woozooo.com" help:"basic URL for file operation"`
@ -19,6 +24,10 @@ func (a *Addition) IsCookie() bool {
return a.Type == "cookie"
}
func (a *Addition) IsAccount() bool {
return a.Type == "account"
}
var config = driver.Config{
Name: "Lanzou",
LocalSort: true,

View File

@ -3,11 +3,14 @@ package lanzou
import (
"errors"
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
var ErrFileShareCancel = errors.New("file sharing cancellation")
var ErrFileNotExist = errors.New("file does not exist")
var ErrCookieExpiration = errors.New("cookie expiration")
type RespText[T any] struct {
Text T `json:"text"`
@ -17,6 +20,9 @@ type RespInfo[T any] struct {
Info T `json:"info"`
}
var _ model.Obj = (*FileOrFolder)(nil)
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
type FileOrFolder struct {
Name string `json:"name"`
//Onof string `json:"onof"` // 是否存在提取码
@ -48,6 +54,14 @@ type FileOrFolder struct {
shareInfo *FileShare `json:"-"`
}
func (f *FileOrFolder) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolder) GetID() string {
if f.IsDir() {
return f.FolID
@ -129,6 +143,14 @@ type FileOrFolderByShareUrl struct {
repairFlag bool `json:"-"`
}
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }

View File

@ -5,13 +5,16 @@ import (
"fmt"
"net/http"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
@ -37,8 +40,25 @@ func (d *LanZou) get(url string, callback base.ReqCallback) ([]byte, error) {
}
func (d *LanZou) post(url string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
data, err := d._post(url, callback, resp, false)
if err == ErrCookieExpiration && d.IsAccount() {
if atomic.CompareAndSwapInt32(&d.flag, 0, 1) {
_, err2 := d.Login()
atomic.SwapInt32(&d.flag, 0)
if err2 != nil {
err = errors.Join(err, err2)
d.Status = err.Error()
op.MustSaveDriverStorage(d)
return data, err
}
}
for atomic.LoadInt32(&d.flag) != 0 {
runtime.Gosched()
}
return d._post(url, callback, resp, false)
}
return data, err
}
func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{}, up bool) ([]byte, error) {
data, err := d.request(url, http.MethodPost, func(req *resty.Request) {
@ -49,10 +69,12 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
}
return false
})
if callback != nil {
callback(req)
}
}, up)
if err != nil {
return nil, err
return data, err
}
switch utils.Json.Get(data, "zt").ToInt() {
case 1, 2, 4:
@ -61,12 +83,14 @@ func (d *LanZou) _post(url string, callback base.ReqCallback, resp interface{},
utils.Json.Unmarshal(data, resp)
}
return data, nil
case 9: // 登录过期
return data, ErrCookieExpiration
default:
info := utils.Json.Get(data, "inf").ToString()
if info == "" {
info = utils.Json.Get(data, "info").ToString()
}
return nil, fmt.Errorf(info)
return data, fmt.Errorf(info)
}
}
@ -101,6 +125,28 @@ func (d *LanZou) request(url string, method string, callback base.ReqCallback, u
return res.Body(), err
}
func (d *LanZou) Login() ([]*http.Cookie, error) {
resp, err := base.NewRestyClient().SetRedirectPolicy(resty.NoRedirectPolicy()).
R().SetFormData(map[string]string{
"task": "3",
"uid": d.Account,
"pwd": d.Password,
"setSessionId": "",
"setSig": "",
"setScene": "",
"setTocen": "",
"formhash": "",
}).Post("https://up.woozooo.com/mlogin.php")
if err != nil {
return nil, err
}
if utils.Json.Get(resp.Body(), "zt").ToInt() != 1 {
return nil, fmt.Errorf("login err: %s", resp.Body())
}
d.Cookie = CookieToString(resp.Cookies())
return resp.Cookies(), nil
}
/*
通过cookie获取数据
*/
@ -212,7 +258,7 @@ var sizeFindReg = regexp.MustCompile(`(?i)大小\W*([0-9.]+\s*[bkm]+)`)
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
// 查找分享文件夹子文件夹ID和名称
var findSubFolaerReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
// 获取下载页面链接
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
@ -300,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
// 需要密码
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
param, err := htmlFormToMap(sharePageData)
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
if err != nil {
return nil, err
}
param, err := htmlJsonToMap(sharePageData)
if err != nil {
return nil, err
}
@ -325,7 +375,6 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
return nil, err
}
nextPageData := RemoveNotes(string(data))
param, err = htmlJsonToMap(nextPageData)
if err != nil {
return nil, err
@ -406,7 +455,7 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
files := make([]FileOrFolderByShareUrl, 0)
// vip获取文件夹
floders := findSubFolaerReg.FindAllStringSubmatch(sharePageData, -1)
floders := findSubFolderReg.FindAllStringSubmatch(sharePageData, -1)
for _, floder := range floders {
if len(floder) == 3 {
files = append(files, FileOrFolderByShareUrl{
@ -427,10 +476,10 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
if err != nil {
return nil, err
}
/*// 文件夹中的文件也不加密
// 文件夹中的文件加密
for i := 0; i < len(resp.Text); i++ {
resp.Text[i].Pwd = pwd
}*/
}
if len(resp.Text) == 0 {
break
}
@ -451,21 +500,32 @@ func (d *LanZou) getFileRealInfo(downURL string) (*int64, *time.Time) {
return &size, &time
}
func (d *LanZou) getVei() (string, error) {
resp, err := d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
func (d *LanZou) getVeiAndUid() (vei string, uid string, err error) {
var resp []byte
resp, err = d.get("https://pc.woozooo.com/mydisk.php", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"item": "files",
"action": "index",
"u": d.uid,
})
})
if err != nil {
return "", err
return
}
// uid
uids := regexp.MustCompile(`uid=([^'"&;]+)`).FindStringSubmatch(string(resp))
if len(uids) < 2 {
err = fmt.Errorf("uid variable not find")
return
}
uid = uids[1]
// vei
html := RemoveNotes(string(resp))
data, err := htmlJsonToMap(html)
if err != nil {
return "", err
return
}
return data["vei"], nil
vei = data["vei"]
return
}

Some files were not shown because too many files have changed in this diff Show More