Compare commits
113 Commits
v3.25.1
...
refactor/o
Author | SHA1 | Date | |
---|---|---|---|
3b90f591b5 | |||
5657b12b20 | |||
aba8bc0ec2 | |||
ce6e486666 | |||
4fc0a77565 | |||
aaffaee2b5 | |||
8ef8023c20 | |||
cdfbe6dcf2 | |||
94d028743a | |||
7f7335435c | |||
b9e192b29c | |||
69a98eaef6 | |||
1ebc96a4e5 | |||
66e2324cac | |||
7600dc28df | |||
8ef89ad0a4 | |||
35d672217d | |||
9fb9efb704 | |||
1a283bb272 | |||
1490da8b53 | |||
12dfb60a66 | |||
0380d7fff9 | |||
a008f54f4d | |||
0acb2d6073 | |||
ea9a3432ab | |||
7db3975b18 | |||
3d7f79cba8 | |||
9ff83a7950 | |||
e719a1a456 | |||
40a6fcbdff | |||
0fd51646f6 | |||
e8958019d9 | |||
e1ef690784 | |||
4024050dd0 | |||
eb918658f0 | |||
fb13dae136 | |||
6b67a36d63 | |||
a64dd4885e | |||
0f03a747d8 | |||
30977cdc6d | |||
106cf720c1 | |||
882112ed1c | |||
2a6ab77295 | |||
f0981a0c8d | |||
57eea4db17 | |||
234852ca61 | |||
809105b67e | |||
02e8c31506 | |||
19b39a5c04 | |||
28e2731594 | |||
b1a279cbcc | |||
352a6a741a | |||
109015567a | |||
9e0fa77ca2 | |||
335b11c698 | |||
8e433355e6 | |||
3504f017b9 | |||
cd2f8077fa | |||
d5b68a91d2 | |||
623c7dcea5 | |||
ecbd6d86cd | |||
7200344ace | |||
b313ac4daa | |||
f2f312b43a | |||
6f6d20e1ba | |||
3231c3d930 | |||
b604e21c69 | |||
3c66db9845 | |||
f6ab1f7f61 | |||
8e40465e86 | |||
37dffd0fce | |||
e7c0d94b44 | |||
8102142007 | |||
7c6dec5d47 | |||
dd10c0c5d0 | |||
34fadecc2c | |||
cb8867fcc1 | |||
092ed06833 | |||
6308f1c35d | |||
ce10c9f120 | |||
6c4736fc8f | |||
b301b791c7 | |||
19d34e2eb8 | |||
a3748af772 | |||
9b765ef696 | |||
8f493cccc4 | |||
31a033dff1 | |||
8c3337b88b | |||
7238243664 | |||
ba2b15ab24 | |||
28dc8822b7 | |||
358c5055e9 | |||
b6cd40e6d3 | |||
7d96d8070d | |||
d482fb5f26 | |||
60402ce1fc | |||
1e3950c847 | |||
ed550594da | |||
3bbae29f93 | |||
3b74f8cd9a | |||
e9bdb91e01 | |||
1aa024ed6b | |||
13e8d36e1a | |||
5606c23768 | |||
0b675d6c02 | |||
c1db3a36ad | |||
c59dbb4f9e | |||
df6b306fce | |||
9d45718e5f | |||
b91ed7a78a | |||
95386d777b | |||
635809c376 | |||
af6bb2a6aa |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -3,7 +3,7 @@
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
ko_fi: xhofe # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
|
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -22,8 +22,8 @@ body:
|
||||
I'm sure there are no duplicate issues or discussions.
|
||||
我确定没有重复的issue或讨论。
|
||||
- label: |
|
||||
I'm sure it's due to `AList` and not something else(such as `Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如`依赖`或`操作`)。
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
- label: |
|
||||
I'm sure this issue is not fixed in the latest version.
|
||||
我确定这个问题在最新版本中没有被修复。
|
||||
|
2
.github/stale.yml
vendored
2
.github/stale.yml
vendored
@ -6,6 +6,8 @@ daysUntilClose: 20
|
||||
exemptLabels:
|
||||
- accepted
|
||||
- security
|
||||
- working
|
||||
- pr-welcome
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: stale
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
|
4
.github/workflows/auto_lang.yml
vendored
4
.github/workflows/auto_lang.yml
vendored
@ -11,6 +11,10 @@ on:
|
||||
- 'cmd/lang.go'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
auto_lang:
|
||||
strategy:
|
||||
|
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@ -6,6 +6,10 @@ on:
|
||||
pull_request:
|
||||
branches: [ 'main' ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
|
4
.github/workflows/build_docker.yml
vendored
4
.github/workflows/build_docker.yml
vendored
@ -4,6 +4,10 @@ on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_docker:
|
||||
name: Build docker
|
||||
|
@ -14,4 +14,4 @@ jobs:
|
||||
actions: 'remove-labels'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-number: ${{ github.event.issue.number }}
|
||||
labels: 'working'
|
||||
labels: 'working,pr-welcome'
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
||||
if: github.event.label.name == 'question'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3.5.1
|
||||
uses: actions-cool/issues-helper@v3.5.2
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: release_linux_musl
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.20' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: build/compress/*
|
2
.github/workflows/release_linux_musl_arm.yml
vendored
2
.github/workflows/release_linux_musl_arm.yml
vendored
@ -5,7 +5,7 @@ on:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_arm:
|
||||
release_linux_musl_arm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
|
10
README.md
Executable file → Normal file
10
README.md
Executable file → Normal file
@ -43,7 +43,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
|
||||
|
||||
## Features
|
||||
|
||||
- [x] Multiple storage
|
||||
- [x] Multiple storages
|
||||
- [x] Local storage
|
||||
- [x] [Aliyundrive](https://www.aliyundrive.com/)
|
||||
- [x] OneDrive / Sharepoint ([global](https://www.office.com/), [cn](https://portal.partner.microsoftonline.cn),de,us)
|
||||
@ -86,7 +86,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
|
||||
- [x] Protected routes (password protection and authentication)
|
||||
- [x] WebDav (see https://alist.nn.ci/guide/webdav.html for details)
|
||||
- [x] [Docker Deploy](https://hub.docker.com/r/xhofe/alist)
|
||||
- [x] Cloudflare workers proxy
|
||||
- [x] Cloudflare Workers proxy
|
||||
- [x] File/Folder package download
|
||||
- [x] Web upload(Can allow visitors to upload), delete, mkdir, rename, move and copy
|
||||
- [x] Offline download
|
||||
@ -103,7 +103,7 @@ English | [中文](./README_cn.md)| [日本語](./README_ja.md) | [Contributing]
|
||||
|
||||
## Discussion
|
||||
|
||||
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature request only.**
|
||||
Please go to our [discussion forum](https://github.com/Xhofe/alist/discussions) for general questions, **issues are for bug reports and feature requests only.**
|
||||
|
||||
## Sponsor
|
||||
|
||||
@ -120,14 +120,14 @@ https://alist.nn.ci/guide/sponsor.html
|
||||
|
||||
Thanks goes to these wonderful people:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## License
|
||||
|
||||
The `AList` is open-source software licensed under the AGPL-3.0 license.
|
||||
|
||||
## Disclaimer
|
||||
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
|
||||
- This program is a free and open source project. It is designed to share files on the network disk, which is convenient for downloading and learning Golang. Please abide by relevant laws and regulations when using it, and do not abuse it;
|
||||
- This program is implemented by calling the official sdk/interface, without destroying the official interface behavior;
|
||||
- This program only does 302 redirect/traffic forwarding, and does not intercept, store, or tamper with any user data;
|
||||
- Before using this program, you should understand and bear the corresponding risks, including but not limited to account ban, download speed limit, etc., which is none of this program's business;
|
||||
|
@ -118,7 +118,7 @@ AList 是一个开源软件,如果你碰巧喜欢这个项目,并希望我
|
||||
|
||||
Thanks goes to these wonderful people:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## 许可
|
||||
|
||||
|
@ -120,7 +120,7 @@ https://alist.nn.ci/guide/sponsor.html
|
||||
|
||||
これらの素晴らしい人々に感謝します:
|
||||
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
[](https://github.com/alist-org/alist/graphs/contributors)
|
||||
|
||||
## ライセンス
|
||||
|
||||
|
22
build.sh
22
build.sh
@ -89,6 +89,18 @@ BuildDocker() {
|
||||
}
|
||||
|
||||
BuildRelease() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMusl() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
@ -112,13 +124,6 @@ BuildRelease() {
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMuslArm() {
|
||||
@ -192,6 +197,9 @@ elif [ "$1" = "release" ]; then
|
||||
elif [ "$2" = "linux_musl_arm" ]; then
|
||||
BuildReleaseLinuxMuslArm
|
||||
MakeRelease "md5-linux-musl-arm.txt"
|
||||
elif [ "$2" = "linux_musl" ]; then
|
||||
BuildReleaseLinuxMusl
|
||||
MakeRelease "md5-linux-musl.txt"
|
||||
else
|
||||
BuildRelease
|
||||
MakeRelease "md5.txt"
|
||||
|
@ -19,6 +19,7 @@ var AdminCmd = &cobra.Command{
|
||||
Short: "Show admin user's info and some operations about admin user's password",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||
@ -57,6 +58,7 @@ var ShowTokenCmd = &cobra.Command{
|
||||
Short: "Show admin token",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
token := setting.GetStr(conf.Token)
|
||||
utils.Log.Infof("Admin token: %s", token)
|
||||
},
|
||||
@ -64,6 +66,7 @@ var ShowTokenCmd = &cobra.Command{
|
||||
|
||||
func setAdminPassword(pwd string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed get admin user: %+v", err)
|
||||
|
@ -15,6 +15,7 @@ var Cancel2FACmd = &cobra.Command{
|
||||
Short: "Delete 2FA of admin user",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to get admin user: %+v", err)
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -19,6 +20,10 @@ func Init() {
|
||||
bootstrap.InitIndex()
|
||||
}
|
||||
|
||||
func Release() {
|
||||
db.Close()
|
||||
}
|
||||
|
||||
var pid = -1
|
||||
var pidFile string
|
||||
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
_ "github.com/alist-org/alist/v3/drivers"
|
||||
_ "github.com/alist-org/alist/v3/internal/offline_download"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -13,7 +13,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/cmd/flags"
|
||||
_ "github.com/alist-org/alist/v3/drivers"
|
||||
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -35,8 +34,7 @@ the address is defined in config file`,
|
||||
utils.Log.Infof("delayed start for %d seconds", conf.Conf.DelayedStart)
|
||||
time.Sleep(time.Duration(conf.Conf.DelayedStart) * time.Second)
|
||||
}
|
||||
bootstrap.InitAria2()
|
||||
bootstrap.InitQbittorrent()
|
||||
bootstrap.InitOfflineDownloadTools()
|
||||
bootstrap.LoadStorages()
|
||||
if !flags.Debug && !flags.Dev {
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
@ -100,7 +98,7 @@ the address is defined in config file`,
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-quit
|
||||
utils.Log.Println("Shutdown server...")
|
||||
|
||||
Release()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
|
153
cmd/storage.go
153
cmd/storage.go
@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -15,31 +21,136 @@ var storageCmd = &cobra.Command{
|
||||
Short: "Manage storage",
|
||||
}
|
||||
|
||||
func init() {
|
||||
var mountPath string
|
||||
var disable = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) < 1 {
|
||||
utils.Log.Errorf("mount path is required")
|
||||
return
|
||||
}
|
||||
mountPath := args[0]
|
||||
Init()
|
||||
defer Release()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
},
|
||||
}
|
||||
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disable)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var baseStyle = lipgloss.NewStyle().
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240"))
|
||||
|
||||
type model struct {
|
||||
table table.Model
|
||||
}
|
||||
|
||||
func (m model) Init() tea.Cmd { return nil }
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmd tea.Cmd
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "esc":
|
||||
if m.table.Focused() {
|
||||
m.table.Blur()
|
||||
} else {
|
||||
m.table.Focus()
|
||||
}
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
//case "enter":
|
||||
// return m, tea.Batch(
|
||||
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
|
||||
// )
|
||||
}
|
||||
}
|
||||
m.table, cmd = m.table.Update(msg)
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
return baseStyle.Render(m.table.View()) + "\n"
|
||||
}
|
||||
|
||||
var storageTableHeight int
|
||||
var listStorageCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all storages",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
defer Release()
|
||||
storages, _, err := db.GetStorages(1, -1)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Found %d storages", len(storages))
|
||||
columns := []table.Column{
|
||||
{Title: "ID", Width: 4},
|
||||
{Title: "Driver", Width: 16},
|
||||
{Title: "Mount Path", Width: 30},
|
||||
{Title: "Enabled", Width: 7},
|
||||
}
|
||||
|
||||
var rows []table.Row
|
||||
for i := range storages {
|
||||
storage := storages[i]
|
||||
enabled := "true"
|
||||
if storage.Disabled {
|
||||
enabled = "false"
|
||||
}
|
||||
rows = append(rows, table.Row{
|
||||
strconv.Itoa(int(storage.ID)),
|
||||
storage.Driver,
|
||||
storage.MountPath,
|
||||
enabled,
|
||||
})
|
||||
}
|
||||
t := table.New(
|
||||
table.WithColumns(columns),
|
||||
table.WithRows(rows),
|
||||
table.WithFocused(true),
|
||||
table.WithHeight(storageTableHeight),
|
||||
)
|
||||
|
||||
s := table.DefaultStyles()
|
||||
s.Header = s.Header.
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240")).
|
||||
BorderBottom(true).
|
||||
Bold(false)
|
||||
s.Selected = s.Selected.
|
||||
Foreground(lipgloss.Color("229")).
|
||||
Background(lipgloss.Color("57")).
|
||||
Bold(false)
|
||||
t.SetStyles(s)
|
||||
|
||||
m := model{t}
|
||||
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||
utils.Log.Errorf("failed to run program: %+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
@ -2,19 +2,22 @@ package _115
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
type Pan115 struct {
|
||||
model.Storage
|
||||
Addition
|
||||
client *driver115.Pan115Client
|
||||
client *driver115.Pan115Client
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (d *Pan115) Config() driver.Config {
|
||||
@ -26,29 +29,42 @@ func (d *Pan115) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *Pan115) Init(ctx context.Context) error {
|
||||
if d.LimitRate > 0 {
|
||||
d.limiter = rate.NewLimiter(rate.Limit(d.LimitRate), 1)
|
||||
}
|
||||
return d.login()
|
||||
}
|
||||
|
||||
func (d *Pan115) WaitLimit(ctx context.Context) error {
|
||||
if d.limiter != nil {
|
||||
return d.limiter.Wait(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan115) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := d.getFiles(dir.GetID())
|
||||
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
|
||||
return src, nil
|
||||
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
|
||||
return &src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
downloadInfo, err := d.client.
|
||||
SetUserAgent(driver115.UA115Browser).
|
||||
Download(file.(driver115.File).PickCode)
|
||||
// recover for upload
|
||||
d.client.SetUserAgent(driver115.UA115Desktop)
|
||||
DownloadWithUA(file.(*FileObj).PickCode, driver115.UA115Browser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -60,6 +76,9 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
||||
}
|
||||
|
||||
func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := d.client.Mkdir(parentDir.GetID(), dirName); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -67,31 +86,99 @@ func (d *Pan115) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
}
|
||||
|
||||
func (d *Pan115) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.client.Move(dstDir.GetID(), srcObj.GetID())
|
||||
}
|
||||
|
||||
func (d *Pan115) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.client.Rename(srcObj.GetID(), newName)
|
||||
}
|
||||
|
||||
func (d *Pan115) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.client.Copy(dstDir.GetID(), srcObj.GetID())
|
||||
}
|
||||
|
||||
func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return d.client.Delete(obj.GetID())
|
||||
}
|
||||
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
if err := d.WaitLimit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
fastInfo *driver115.UploadInitResp
|
||||
dirID = dstDir.GetID()
|
||||
)
|
||||
|
||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||
return driver115.ErrUploadTooLarge
|
||||
}
|
||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
const PreHashSize int64 = 128 * utils.KB
|
||||
hashSize := PreHashSize
|
||||
if stream.GetSize() < PreHashSize {
|
||||
hashSize = stream.GetSize()
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
|
||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) <= 0 {
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fullHash = strings.ToUpper(fullHash)
|
||||
|
||||
// rapid-upload
|
||||
// note that 115 add timeout for rapid-upload,
|
||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if matched, err := fastInfo.Ok(); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
@ -6,17 +6,18 @@ import (
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
||||
Cookie string `json:"cookie" type:"text" help:"one of QR code token and cookie required"`
|
||||
QRCodeToken string `json:"qrcode_token" type:"text" help:"one of QR code token and cookie required"`
|
||||
PageSize int64 `json:"page_size" type:"number" default:"56" help:"list api per page size of 115 driver"`
|
||||
LimitRate float64 `json:"limit_rate" type:"number" default:"2" help:"limit all api request rate (1r/[limit_rate]s)"`
|
||||
driver.RootID
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
OnlyProxy: true,
|
||||
OnlyLocal: true,
|
||||
Name: "115 Cloud",
|
||||
DefaultRoot: "0",
|
||||
OnlyProxy: true,
|
||||
//OnlyLocal: true,
|
||||
NoOverwriteUpload: true,
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,20 @@ package _115
|
||||
import (
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ model.Obj = (*driver.File)(nil)
|
||||
var _ model.Obj = (*FileObj)(nil)
|
||||
|
||||
type FileObj struct {
|
||||
driver.File
|
||||
}
|
||||
|
||||
func (f *FileObj) CreateTime() time.Time {
|
||||
return f.File.CreateTime
|
||||
}
|
||||
|
||||
func (f *FileObj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||
}
|
||||
|
@ -1,10 +1,25 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/orzogc/fake115uploader/cipher"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -41,8 +56,8 @@ func (d *Pan115) login() error {
|
||||
return d.client.LoginCheck()
|
||||
}
|
||||
|
||||
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
res := make([]driver.File, 0)
|
||||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||
res := make([]FileObj, 0)
|
||||
if d.PageSize <= 0 {
|
||||
d.PageSize = driver.FileListLimit
|
||||
}
|
||||
@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, file := range *files {
|
||||
res = append(res, file)
|
||||
res = append(res, FileObj{file})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
appVer = "2.0.3.6"
|
||||
)
|
||||
|
||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||
var (
|
||||
ecdhCipher *cipher.EcdhCipher
|
||||
encrypted []byte
|
||||
decrypted []byte
|
||||
encodedToken string
|
||||
err error
|
||||
target = "U_1_" + dirID
|
||||
bodyBytes []byte
|
||||
result = driver115.UploadInitResp{}
|
||||
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||||
)
|
||||
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID := strconv.FormatInt(d.client.UserID, 10)
|
||||
form := url.Values{}
|
||||
form.Set("appid", "0")
|
||||
form.Set("appversion", appVer)
|
||||
form.Set("userid", userID)
|
||||
form.Set("filename", fileName)
|
||||
form.Set("filesize", fileSizeStr)
|
||||
form.Set("fileid", fileID)
|
||||
form.Set("target", target)
|
||||
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||||
|
||||
signKey, signVal := "", ""
|
||||
for retry := true; retry; {
|
||||
t := driver115.Now()
|
||||
|
||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := map[string]string{
|
||||
"k_ec": encodedToken,
|
||||
}
|
||||
|
||||
form.Set("t", t.String())
|
||||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
if signKey != "" && signVal != "" {
|
||||
form.Set("sign_key", signKey)
|
||||
form.Set("sign_val", signVal)
|
||||
}
|
||||
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := d.client.NewRequest().
|
||||
SetQueryParams(params).
|
||||
SetBody(encrypted).
|
||||
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||||
SetDoNotParseResponse(true)
|
||||
resp, err := req.Post(driver115.ApiUploadInit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := resp.RawBody()
|
||||
defer data.Close()
|
||||
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Status == 7 {
|
||||
// Update signKey & signVal
|
||||
signKey = result.SignKey
|
||||
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
retry = false
|
||||
}
|
||||
result.SHA1 = fileID
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||||
var start, end int64
|
||||
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length := end - start + 1
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
result = strings.ToUpper(hashStr)
|
||||
return
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
imur oss.InitiateMultipartUploadResult
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
ossToken *driver115.UploadOSSTokenResp
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := driver115.DefalutUploadMultipartOptions()
|
||||
if len(opts) > 0 {
|
||||
for _, f := range opts {
|
||||
f(options)
|
||||
}
|
||||
}
|
||||
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||
ticker := time.NewTicker(options.TokenRefreshTime)
|
||||
defer ticker.Stop()
|
||||
// 设置超时
|
||||
timeout := time.NewTimer(options.Timeout)
|
||||
|
||||
if chunks, err = SplitFile(fileSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(chunks))
|
||||
|
||||
chunksCh := make(chan oss.FileChunk)
|
||||
errCh := make(chan error)
|
||||
UploadedPartsCh := make(chan oss.UploadPart)
|
||||
quit := make(chan struct{})
|
||||
|
||||
// producer
|
||||
go chunksProducer(chunksCh, chunks)
|
||||
go func() {
|
||||
wg.Wait()
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
// consumers
|
||||
for i := 0; i < options.ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||
}
|
||||
}()
|
||||
for chunk := range chunksCh {
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for part := range UploadedPartsCh {
|
||||
parts = append(parts, part)
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 到时重新获取ossToken
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-quit:
|
||||
break LOOP
|
||||
case <-errCh:
|
||||
return err
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("time out")
|
||||
}
|
||||
}
|
||||
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.checkUploadStatus(dirID, params.SHA1)
|
||||
}
|
||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
ch <- chunk
|
||||
}
|
||||
}
|
||||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||
// 验证上传是否成功
|
||||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||
opts := []driver115.GetFileOptions{
|
||||
driver115.WithOrder(driver115.FileOrderByTime),
|
||||
driver115.WithShowDirEnable(false),
|
||||
driver115.WithAsc(false),
|
||||
driver115.WithLimit(500),
|
||||
}
|
||||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fileInfo := range fResp.Files {
|
||||
if fileInfo.Sha1 == sha1 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return driver115.ErrUploadFailed
|
||||
}
|
||||
|
||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||
for i := int64(1); i < 10; i++ {
|
||||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// 单个分片大小不能小于100KB
|
||||
if chunks[0].Size < 100*utils.KB {
|
||||
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
if int64(chunkNum) > fileSize {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (fileSize / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||||
} else {
|
||||
chunk.Size = fileSize / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
var chunkN = fileSize / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if fileSize%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = fileSize % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
@ -6,11 +6,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -22,6 +17,9 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Pan123 struct {
|
||||
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if _, err = io.Copy(h, tempFile); err != nil {
|
||||
return err
|
||||
|
@ -1,6 +1,7 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -21,6 +22,14 @@ type File struct {
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(j * 100 / chunkCount)
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
}
|
||||
}
|
||||
// complete s3 upload
|
||||
|
77
drivers/123_link/driver.go
Normal file
77
drivers/123_link/driver.go
Normal file
@ -0,0 +1,77 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type Pan123Link struct {
|
||||
model.Storage
|
||||
Addition
|
||||
root *Node
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Config() driver.Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Pan123Link) GetAddition() driver.Additional {
|
||||
return &d.Addition
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Init(ctx context.Context) error {
|
||||
node, err := BuildTree(d.OriginURLs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
node.calSize()
|
||||
d.root = node
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Drop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
node := GetNodeFromRootByPath(d.root, path)
|
||||
return nodeToObj(node, path)
|
||||
}
|
||||
|
||||
func (d *Pan123Link) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
node := GetNodeFromRootByPath(d.root, dir.GetPath())
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if node.isFile() {
|
||||
return nil, errs.NotFolder
|
||||
}
|
||||
return utils.SliceConvert(node.Children, func(node *Node) (model.Obj, error) {
|
||||
return nodeToObj(node, stdpath.Join(dir.GetPath(), node.Name))
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan123Link) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
node := GetNodeFromRootByPath(d.root, file.GetPath())
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
if node.isFile() {
|
||||
signUrl, err := SignURL(node.Url, d.PrivateKey, d.UID, time.Duration(d.ValidDuration)*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.Link{
|
||||
URL: signUrl,
|
||||
}, nil
|
||||
}
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan123Link)(nil)
|
23
drivers/123_link/meta.go
Normal file
23
drivers/123_link/meta.go
Normal file
@ -0,0 +1,23 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
)
|
||||
|
||||
type Addition struct {
|
||||
OriginURLs string `json:"origin_urls" type:"text" required:"true" default:"https://vip.123pan.com/29/folder/file.mp3" help:"structure:FolderName:\n [FileSize:][Modified:]Url"`
|
||||
PrivateKey string `json:"private_key"`
|
||||
UID uint64 `json:"uid" type:"number"`
|
||||
ValidDuration int64 `json:"valid_duration" type:"number" default:"30" help:"minutes"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "123PanLink",
|
||||
}
|
||||
|
||||
func init() {
|
||||
op.RegisterDriver(func() driver.Driver {
|
||||
return &Pan123Link{}
|
||||
})
|
||||
}
|
152
drivers/123_link/parse.go
Normal file
152
drivers/123_link/parse.go
Normal file
@ -0,0 +1,152 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
url2 "net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// build tree from text, text structure definition:
|
||||
/**
|
||||
* FolderName:
|
||||
* [FileSize:][Modified:]Url
|
||||
*/
|
||||
/**
|
||||
* For example:
|
||||
* folder1:
|
||||
* name1:url1
|
||||
* url2
|
||||
* folder2:
|
||||
* url3
|
||||
* url4
|
||||
* url5
|
||||
* folder3:
|
||||
* url6
|
||||
* url7
|
||||
* url8
|
||||
*/
|
||||
// if there are no name, use the last segment of url as name
|
||||
func BuildTree(text string) (*Node, error) {
|
||||
lines := strings.Split(text, "\n")
|
||||
var root = &Node{Level: -1, Name: "root"}
|
||||
stack := []*Node{root}
|
||||
for _, line := range lines {
|
||||
// calculate indent
|
||||
indent := 0
|
||||
for i := 0; i < len(line); i++ {
|
||||
if line[i] != ' ' {
|
||||
break
|
||||
}
|
||||
indent++
|
||||
}
|
||||
// if indent is not a multiple of 2, it is an error
|
||||
if indent%2 != 0 {
|
||||
return nil, fmt.Errorf("the line '%s' is not a multiple of 2", line)
|
||||
}
|
||||
// calculate level
|
||||
level := indent / 2
|
||||
line = strings.TrimSpace(line[indent:])
|
||||
// if the line is empty, skip
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
// if level isn't greater than the level of the top of the stack
|
||||
// it is not the child of the top of the stack
|
||||
for level <= stack[len(stack)-1].Level {
|
||||
// pop the top of the stack
|
||||
stack = stack[:len(stack)-1]
|
||||
}
|
||||
// if the line is a folder
|
||||
if isFolder(line) {
|
||||
// create a new node
|
||||
node := &Node{
|
||||
Level: level,
|
||||
Name: strings.TrimSuffix(line, ":"),
|
||||
}
|
||||
// add the node to the top of the stack
|
||||
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||
// push the node to the stack
|
||||
stack = append(stack, node)
|
||||
} else {
|
||||
// if the line is a file
|
||||
// create a new node
|
||||
node, err := parseFileLine(line)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
node.Level = level
|
||||
// add the node to the top of the stack
|
||||
stack[len(stack)-1].Children = append(stack[len(stack)-1].Children, node)
|
||||
}
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func isFolder(line string) bool {
|
||||
return strings.HasSuffix(line, ":")
|
||||
}
|
||||
|
||||
// line definition:
|
||||
// [FileSize:][Modified:]Url
|
||||
func parseFileLine(line string) (*Node, error) {
|
||||
// if there is no url, it is an error
|
||||
if !strings.Contains(line, "http://") && !strings.Contains(line, "https://") {
|
||||
return nil, fmt.Errorf("invalid line: %s, because url is required for file", line)
|
||||
}
|
||||
index := strings.Index(line, "http://")
|
||||
if index == -1 {
|
||||
index = strings.Index(line, "https://")
|
||||
}
|
||||
url := line[index:]
|
||||
info := line[:index]
|
||||
node := &Node{
|
||||
Url: url,
|
||||
}
|
||||
name := stdpath.Base(url)
|
||||
unescape, err := url2.PathUnescape(name)
|
||||
if err == nil {
|
||||
name = unescape
|
||||
}
|
||||
node.Name = name
|
||||
if index > 0 {
|
||||
if !strings.HasSuffix(info, ":") {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file info must end with ':'", line)
|
||||
}
|
||||
info = info[:len(info)-1]
|
||||
if info == "" {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file name can't be empty", line)
|
||||
}
|
||||
infoParts := strings.Split(info, ":")
|
||||
size, err := strconv.ParseInt(infoParts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file size must be an integer", line)
|
||||
}
|
||||
node.Size = size
|
||||
if len(infoParts) > 1 {
|
||||
modified, err := strconv.ParseInt(infoParts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid line: %s, because file modified must be an unix timestamp", line)
|
||||
}
|
||||
node.Modified = modified
|
||||
} else {
|
||||
node.Modified = time.Now().Unix()
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func splitPath(path string) []string {
|
||||
if path == "/" {
|
||||
return []string{"root"}
|
||||
}
|
||||
parts := strings.Split(path, "/")
|
||||
parts[0] = "root"
|
||||
return parts
|
||||
}
|
||||
|
||||
func GetNodeFromRootByPath(root *Node, path string) *Node {
|
||||
return root.getByPath(splitPath(path))
|
||||
}
|
66
drivers/123_link/types.go
Normal file
66
drivers/123_link/types.go
Normal file
@ -0,0 +1,66 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
// Node is a node in the folder tree
|
||||
type Node struct {
|
||||
Url string
|
||||
Name string
|
||||
Level int
|
||||
Modified int64
|
||||
Size int64
|
||||
Children []*Node
|
||||
}
|
||||
|
||||
func (node *Node) getByPath(paths []string) *Node {
|
||||
if len(paths) == 0 || node == nil {
|
||||
return nil
|
||||
}
|
||||
if node.Name != paths[0] {
|
||||
return nil
|
||||
}
|
||||
if len(paths) == 1 {
|
||||
return node
|
||||
}
|
||||
for _, child := range node.Children {
|
||||
tmp := child.getByPath(paths[1:])
|
||||
if tmp != nil {
|
||||
return tmp
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (node *Node) isFile() bool {
|
||||
return node.Url != ""
|
||||
}
|
||||
|
||||
func (node *Node) calSize() int64 {
|
||||
if node.isFile() {
|
||||
return node.Size
|
||||
}
|
||||
var size int64 = 0
|
||||
for _, child := range node.Children {
|
||||
size += child.calSize()
|
||||
}
|
||||
node.Size = size
|
||||
return size
|
||||
}
|
||||
|
||||
func nodeToObj(node *Node, path string) (model.Obj, error) {
|
||||
if node == nil {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return &model.Object{
|
||||
Name: node.Name,
|
||||
Size: node.Size,
|
||||
Modified: time.Unix(node.Modified, 0),
|
||||
IsFolder: !node.isFile(),
|
||||
Path: path,
|
||||
}, nil
|
||||
}
|
30
drivers/123_link/util.go
Normal file
30
drivers/123_link/util.go
Normal file
@ -0,0 +1,30 @@
|
||||
package _123Link
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
func SignURL(originURL, privateKey string, uid uint64, validDuration time.Duration) (newURL string, err error) {
|
||||
if privateKey == "" {
|
||||
return originURL, nil
|
||||
}
|
||||
var (
|
||||
ts = time.Now().Add(validDuration).Unix() // 有效时间戳
|
||||
rInt = rand.Int() // 随机正整数
|
||||
objURL *url.URL
|
||||
)
|
||||
objURL, err = url.Parse(originURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
authKey := fmt.Sprintf("%d-%d-%d-%x", ts, rInt, uid, md5.Sum([]byte(fmt.Sprintf("%s-%d-%d-%d-%s",
|
||||
objURL.Path, ts, rInt, uid, privateKey))))
|
||||
v := objURL.Query()
|
||||
v.Add("auth_key", authKey)
|
||||
objURL.RawQuery = v.Encode()
|
||||
return objURL.String(), nil
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -21,6 +22,10 @@ type File struct {
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
@ -36,6 +41,9 @@ func (f File) GetName() string {
|
||||
func (f File) ModTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
return f.Type == 1
|
||||
|
@ -103,9 +103,9 @@ func (d *Yun139) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
if d.isFamily() {
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
var contentInfoList []string
|
||||
var catalogInfoList []string
|
||||
@ -131,7 +131,10 @@ func (d *Yun139) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
}
|
||||
pathname := "/orchestration/personalCloud/batchOprTask/v1.0/createBatchOprTask"
|
||||
_, err := d.post(pathname, data, nil)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return srcObj, nil
|
||||
}
|
||||
|
||||
func (d *Yun139) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
|
@ -10,7 +10,7 @@ type Catalog struct {
|
||||
CatalogID string `json:"catalogID"`
|
||||
CatalogName string `json:"catalogName"`
|
||||
//CatalogType int `json:"catalogType"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
UpdateTime string `json:"updateTime"`
|
||||
//IsShared bool `json:"isShared"`
|
||||
//CatalogLevel int `json:"catalogLevel"`
|
||||
@ -63,7 +63,7 @@ type Content struct {
|
||||
//ParentCatalogID string `json:"parentCatalogId"`
|
||||
//Channel string `json:"channel"`
|
||||
//GeoLocFlag string `json:"geoLocFlag"`
|
||||
//Digest string `json:"digest"`
|
||||
Digest string `json:"digest"`
|
||||
//Version string `json:"version"`
|
||||
//FileEtag string `json:"fileEtag"`
|
||||
//FileVersion string `json:"fileVersion"`
|
||||
@ -141,7 +141,7 @@ type CloudContent struct {
|
||||
//ContentSuffix string `json:"contentSuffix"`
|
||||
ContentSize int64 `json:"contentSize"`
|
||||
//ContentDesc string `json:"contentDesc"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
//Shottime interface{} `json:"shottime"`
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
ThumbnailURL string `json:"thumbnailURL"`
|
||||
@ -165,7 +165,7 @@ type CloudCatalog struct {
|
||||
CatalogID string `json:"catalogID"`
|
||||
CatalogName string `json:"catalogName"`
|
||||
//CloudID string `json:"cloudID"`
|
||||
//CreateTime string `json:"createTime"`
|
||||
CreateTime string `json:"createTime"`
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
//Creator string `json:"creator"`
|
||||
//CreatorNickname string `json:"creatorNickname"`
|
||||
|
@ -48,7 +48,7 @@ func calSign(body, ts, randStr string) string {
|
||||
}
|
||||
|
||||
func getTime(t string) time.Time {
|
||||
stamp, _ := time.ParseInLocation("20060102150405", t, time.Local)
|
||||
stamp, _ := time.ParseInLocation("20060102150405", t, utils.CNLoc)
|
||||
return stamp
|
||||
}
|
||||
|
||||
@ -139,6 +139,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: catalog.CatalogName,
|
||||
Size: 0,
|
||||
Modified: getTime(catalog.UpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
IsFolder: true,
|
||||
}
|
||||
files = append(files, &f)
|
||||
@ -150,6 +151,7 @@ func (d *Yun139) getFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: content.ContentName,
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.UpdateTime),
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, content.Digest),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
@ -202,6 +204,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Size: 0,
|
||||
IsFolder: true,
|
||||
Modified: getTime(catalog.LastUpdateTime),
|
||||
Ctime: getTime(catalog.CreateTime),
|
||||
}
|
||||
files = append(files, &f)
|
||||
}
|
||||
@ -212,6 +215,7 @@ func (d *Yun139) familyGetFiles(catalogID string) ([]model.Obj, error) {
|
||||
Name: content.ContentName,
|
||||
Size: content.ContentSize,
|
||||
Modified: getTime(content.LastUpdateTime),
|
||||
Ctime: getTime(content.CreateTime),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: content.ThumbnailURL},
|
||||
//Thumbnail: content.BigthumbnailURL,
|
||||
|
@ -380,7 +380,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(i * 100 / count))
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||
sliceMd5 := fileMd5
|
||||
|
@ -3,6 +3,7 @@ package _189pc
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -24,10 +25,17 @@ type Cloud189PC struct {
|
||||
|
||||
loginParam *LoginParam
|
||||
tokenInfo *AppSessionResp
|
||||
|
||||
uploadThread int
|
||||
|
||||
storageConfig driver.Config
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Config() driver.Config {
|
||||
return config
|
||||
if y.storageConfig.Name == "" {
|
||||
y.storageConfig = config
|
||||
}
|
||||
return y.storageConfig
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||
@ -35,6 +43,9 @@ func (y *Cloud189PC) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
// 兼容旧上传接口
|
||||
y.storageConfig.NoOverwriteUpload = y.isFamily() && (y.Addition.RapidUpload || y.Addition.UploadMethod == "old")
|
||||
|
||||
// 处理个人云和家庭云参数
|
||||
if y.isFamily() && y.RootFolderID == "-11" {
|
||||
y.RootFolderID = ""
|
||||
@ -44,6 +55,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
y.FamilyID = ""
|
||||
}
|
||||
|
||||
// 限制上传线程数
|
||||
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
|
||||
if y.uploadThread < 1 || y.uploadThread > 32 {
|
||||
y.uploadThread, y.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
// 初始化请求客户端
|
||||
if y.client == nil {
|
||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||
@ -109,10 +126,11 @@ func (y *Cloud189PC) Link(ctx context.Context, file model.Obj, args model.LinkAr
|
||||
|
||||
// 重定向获取真实链接
|
||||
downloadUrl.URL = strings.Replace(strings.ReplaceAll(downloadUrl.URL, "&", "&"), "http://", "https://", 1)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).Get(downloadUrl.URL)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).SetDoNotParseResponse(true).Get(downloadUrl.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.RawBody().Close()
|
||||
if res.StatusCode() == 302 {
|
||||
downloadUrl.URL = res.Header().Get("location")
|
||||
}
|
||||
@ -293,6 +311,13 @@ func (y *Cloud189PC) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 响应时间长,按需启用
|
||||
if y.Addition.RapidUpload {
|
||||
if newObj, err := y.RapidUpload(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
}
|
||||
}
|
||||
|
||||
switch y.UploadMethod {
|
||||
case "old":
|
||||
return y.OldUpload(ctx, dstDir, stream, up)
|
||||
|
@ -160,9 +160,8 @@ func toDesc(o string) string {
|
||||
func ParseHttpHeader(str string) map[string]string {
|
||||
header := make(map[string]string)
|
||||
for _, value := range strings.Split(str, "&") {
|
||||
i := strings.Index(value, "=")
|
||||
if i > 0 {
|
||||
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
|
||||
if k, v, found := strings.Cut(value, "="); found {
|
||||
header[k] = v
|
||||
}
|
||||
}
|
||||
return header
|
||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||
FamilyID string `json:"family_id"`
|
||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
NoUseOcr bool `json:"no_use_ocr"`
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@ package _189pc
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -175,6 +176,14 @@ type Cloud189File struct {
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189File) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||
func (c *Cloud189File) GetName() string { return c.Name }
|
||||
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
@ -199,6 +208,14 @@ type Cloud189Folder struct {
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
@ -239,14 +256,25 @@ type InitMultiUploadResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
type UploadUrlsResp struct {
|
||||
Code string `json:"code"`
|
||||
UploadUrls map[string]Part `json:"uploadUrls"`
|
||||
Code string `json:"code"`
|
||||
Data map[string]UploadUrlsData `json:"uploadUrls"`
|
||||
}
|
||||
type Part struct {
|
||||
type UploadUrlsData struct {
|
||||
RequestURL string `json:"requestURL"`
|
||||
RequestHeader string `json:"requestHeader"`
|
||||
}
|
||||
|
||||
type UploadUrlInfo struct {
|
||||
PartNumber int
|
||||
Headers map[string]string
|
||||
UploadUrlsData
|
||||
}
|
||||
|
||||
type UploadProgress struct {
|
||||
UploadInfo InitMultiUploadResp
|
||||
UploadParts []string
|
||||
}
|
||||
|
||||
/* 第二种上传方式 */
|
||||
type CreateUploadFileResp struct {
|
||||
// 上传文件请求ID
|
||||
|
@ -13,8 +13,9 @@ import (
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -24,6 +25,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
@ -436,14 +438,18 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
// 普通上传
|
||||
// 无法上传大小为0的文件
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastPartSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastPartSize == 0 {
|
||||
lastPartSize = sliceSize
|
||||
}
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"lazyCheck": "1",
|
||||
}
|
||||
|
||||
@ -465,61 +471,59 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
|
||||
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
byteData := make([]byte, sliceSize)
|
||||
if i == count {
|
||||
byteData = byteData[:lastPartSize]
|
||||
}
|
||||
|
||||
// 读取块
|
||||
byteData.Reset()
|
||||
silceMd5.Reset()
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
|
||||
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
|
||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
// 获取上传链接
|
||||
var uploadUrl UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
|
||||
"uploadFileId": initMultiUpload.Data.UploadFileID,
|
||||
}, &uploadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 开始上传
|
||||
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(int(i * 100 / count))
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
@ -542,124 +546,157 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
// 快传
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
func (y *Cloud189PC) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||
fileMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(fileMd5) < utils.MD5.Width {
|
||||
return nil, errors.New("invalid hash")
|
||||
}
|
||||
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, stream.GetName(), fmt.Sprint(stream.GetSize()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
if uploadInfo.FileDataExists != 1 {
|
||||
return nil, errors.New("rapid upload fail")
|
||||
}
|
||||
|
||||
// 优先计算所需信息
|
||||
return y.OldUploadCommit(ctx, uploadInfo.FileCommitUrl, uploadInfo.UploadFileId)
|
||||
}
|
||||
|
||||
// 快传
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastSliceSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastSliceSize == 0 {
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
//step.1 优先计算所需信息
|
||||
byteSize := sliceSize
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
silceMd5Base64s := make([]string, 0, count)
|
||||
partInfos := make([]string, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
if i == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
silceMd5.Reset()
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
md5Byte := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
// 检测是否支持快传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
}
|
||||
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.Data.FileDataExists != 1 {
|
||||
var uploadUrls UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"partInfo": strings.Join(silceMd5Base64s, ","),
|
||||
}, &uploadUrls)
|
||||
// 尝试恢复进度
|
||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
if !ok {
|
||||
//step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
}
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
}
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, DEFAULT)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
n, err := io.ReadFull(tempFile, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up(int(i * 100 / count))
|
||||
uploadProgress = &UploadProgress{
|
||||
UploadInfo: uploadInfo,
|
||||
UploadParts: partInfos,
|
||||
}
|
||||
}
|
||||
|
||||
// 提交
|
||||
uploadInfo := uploadProgress.UploadInfo.Data
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.FileDataExists != 1 {
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, uploadPart := range uploadProgress.UploadParts {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
i, uploadPart := i, uploadPart
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
// step.3 获取上传链接
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadUrl := uploadUrls[0]
|
||||
|
||||
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
|
||||
if uploadUrl.PartNumber == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
up(float64(threadG.Success()) * 100 / float64(len(uploadUrls)))
|
||||
uploadProgress.UploadParts[i] = ""
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// step.5 提交
|
||||
var resp CommitMultiUploadFileResp
|
||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"uploadFileId": uploadInfo.UploadFileID,
|
||||
"isLog": "0",
|
||||
"opertype": "3",
|
||||
}, &resp)
|
||||
@ -669,71 +706,71 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
// 获取上传切片信息
|
||||
// 对http body有大小限制,分片信息太多会出错
|
||||
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
var uploadUrlsResp UploadUrlsResp
|
||||
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadFileId,
|
||||
"partInfo": strings.Join(partInfo, ","),
|
||||
}, &uploadUrlsResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
uploadUrls := uploadUrlsResp.Data
|
||||
|
||||
// 计算md5
|
||||
fileMd5 := md5.New()
|
||||
if _, err := io.Copy(fileMd5, tempFile); err != nil {
|
||||
if len(uploadUrls) != len(partInfo) {
|
||||
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
|
||||
}
|
||||
|
||||
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
|
||||
for k, uploadUrl := range uploadUrls {
|
||||
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
|
||||
PartNumber: partNumber,
|
||||
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
|
||||
UploadUrlsData: uploadUrl,
|
||||
})
|
||||
}
|
||||
sort.Slice(uploadUrlInfos, func(i, j int) bool {
|
||||
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
|
||||
})
|
||||
return uploadUrlInfos, nil
|
||||
}
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
fileMd5, err := utils.HashFile(utils.MD5, tempFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
|
||||
// 创建上传会话
|
||||
var uploadInfo CreateUploadFileResp
|
||||
|
||||
fullUrl := API_URL + "/createUploadFile.action"
|
||||
if y.isFamily() {
|
||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||
}
|
||||
_, err = y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"fileMd5": fileMd5Hex,
|
||||
"fileName": file.GetName(),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"parentId": dstDir.GetID(),
|
||||
"resumePolicy": "1",
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": file.GetName(),
|
||||
"size": fmt.Sprint(file.GetSize()),
|
||||
"md5": fileMd5Hex,
|
||||
"opertype": "3",
|
||||
"flag": "1",
|
||||
"resumePolicy": "1",
|
||||
"isLog": "0",
|
||||
// "baseFileId": "",
|
||||
// "lastWrite":"",
|
||||
// "localPath": strings.ReplaceAll(param.LocalPath, "\\", "/"),
|
||||
// "fileExt": "",
|
||||
})
|
||||
}
|
||||
}, &uploadInfo)
|
||||
|
||||
uploadInfo, err := y.OldUploadCreate(ctx, dstDir.GetID(), fileMd5, file.GetName(), fmt.Sprint(file.GetSize()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
status := GetUploadFileStatusResp{CreateUploadFileResp: uploadInfo}
|
||||
for status.Size < file.GetSize() && status.FileDataExists != 1 {
|
||||
status := GetUploadFileStatusResp{CreateUploadFileResp: *uploadInfo}
|
||||
for status.GetSize() < file.GetSize() && status.FileDataExists != 1 {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@ -772,28 +809,70 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(int(status.Size / file.GetSize()))
|
||||
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
|
||||
}
|
||||
|
||||
// 提交
|
||||
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
|
||||
}
|
||||
|
||||
// 创建上传会话
|
||||
func (y *Cloud189PC) OldUploadCreate(ctx context.Context, parentID string, fileMd5, fileName, fileSize string) (*CreateUploadFileResp, error) {
|
||||
var uploadInfo CreateUploadFileResp
|
||||
|
||||
fullUrl := API_URL + "/createUploadFile.action"
|
||||
if y.isFamily() {
|
||||
fullUrl = API_URL + "/family/file/createFamilyFile.action"
|
||||
}
|
||||
_, err := y.post(fullUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetQueryParams(map[string]string{
|
||||
"familyId": y.FamilyID,
|
||||
"parentId": parentID,
|
||||
"fileMd5": fileMd5,
|
||||
"fileName": fileName,
|
||||
"fileSize": fileSize,
|
||||
"resumePolicy": "1",
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"parentFolderId": parentID,
|
||||
"fileName": fileName,
|
||||
"size": fileSize,
|
||||
"md5": fileMd5,
|
||||
"opertype": "3",
|
||||
"flag": "1",
|
||||
"resumePolicy": "1",
|
||||
"isLog": "0",
|
||||
})
|
||||
}
|
||||
}, &uploadInfo)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &uploadInfo, nil
|
||||
}
|
||||
|
||||
// 提交上传文件
|
||||
func (y *Cloud189PC) OldUploadCommit(ctx context.Context, fileCommitUrl string, uploadFileID int64) (model.Obj, error) {
|
||||
var resp OldCommitUploadFileResp
|
||||
_, err = y.post(status.FileCommitUrl, func(req *resty.Request) {
|
||||
_, err := y.post(fileCommitUrl, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
if y.isFamily() {
|
||||
req.SetHeaders(map[string]string{
|
||||
"ResumePolicy": "1",
|
||||
"UploadFileId": fmt.Sprint(status.UploadFileId),
|
||||
"UploadFileId": fmt.Sprint(uploadFileID),
|
||||
"FamilyId": fmt.Sprint(y.FamilyID),
|
||||
})
|
||||
} else {
|
||||
req.SetFormData(map[string]string{
|
||||
"opertype": "3",
|
||||
"resumePolicy": "1",
|
||||
"uploadFileId": fmt.Sprint(status.UploadFileId),
|
||||
"uploadFileId": fmt.Sprint(uploadFileID),
|
||||
"isLog": "0",
|
||||
})
|
||||
}
|
||||
|
@ -3,6 +3,7 @@ package alist_v3
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
SetHeader("Password", d.MetaPassword).
|
||||
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetContentLength(true).
|
||||
SetBody(stream.GetReadCloser())
|
||||
SetBody(io.ReadCloser(stream))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -18,9 +18,11 @@ type ObjResp struct {
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
|
@ -14,6 +14,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -67,7 +69,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
// init deviceID
|
||||
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
|
||||
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
|
||||
// init privateKey
|
||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||
state := State{
|
||||
@ -163,14 +165,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := model.FileStream{
|
||||
Obj: stream,
|
||||
ReadCloser: stream,
|
||||
Mimetype: stream.GetMimetype(),
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := stream.FileStream{
|
||||
Obj: streamer,
|
||||
Reader: streamer,
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
@ -187,25 +189,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
}
|
||||
|
||||
var localFile *os.File
|
||||
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
|
||||
localFile, _ = fileStream.ReadCloser.(*os.File)
|
||||
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
|
||||
localFile, _ = fileStream.Reader.(*os.File)
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
io.CopyN(buf, file, 1024)
|
||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// 把头部拼接回去
|
||||
file.ReadCloser = struct {
|
||||
file.Reader = struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, file),
|
||||
Closer: file,
|
||||
Closer: &file,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -281,7 +283,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
file.ReadCloser = localFile
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
@ -303,7 +305,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
}
|
||||
res.Body.Close()
|
||||
if count > 0 {
|
||||
up(i * 100 / count)
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
}
|
||||
var resp2 base.Json
|
||||
|
@ -107,7 +107,9 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
nowTime, _ := getNowTime()
|
||||
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -115,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
||||
"name": dirName,
|
||||
"type": "folder",
|
||||
"check_name_mode": "refuse",
|
||||
})
|
||||
}).SetResult(&newDir)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -128,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
|
||||
"to_parent_file_id": dstDir.GetID(),
|
||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||
})
|
||||
}).SetResult(&resp)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Exist {
|
||||
return nil, errors.New("existence of files with the same name")
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.ID = resp.FileID
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
"name": newName,
|
||||
})
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -170,7 +192,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.upload(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
@ -199,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||
|
@ -11,7 +11,7 @@ type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at,created_at"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"ASC,DESC"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.xhofe.top/alist/ali_open/token"`
|
||||
OauthTokenURL string `json:"oauth_token_url" default:"https://api.nn.ci/alist/ali_open/token"`
|
||||
ClientID string `json:"client_id" required:"false" help:"Keep it empty if you don't have one"`
|
||||
ClientSecret string `json:"client_secret" required:"false" help:"Keep it empty if you don't have one"`
|
||||
RemoveWay string `json:"remove_way" required:"true" type:"select" options:"trash,delete"`
|
||||
|
@ -1,6 +1,7 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -17,22 +18,28 @@ type Files struct {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt *time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
|
||||
// create only
|
||||
FileName string `json:"file_name"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.Name == "" {
|
||||
f.Name = f.FileName
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.FileId,
|
||||
@ -40,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
Ctime: f.CreatedAt,
|
||||
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
}
|
||||
@ -67,3 +76,9 @@ type CreateResp struct {
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
PartInfoList []PartInfo `json:"part_info_list"`
|
||||
}
|
||||
|
||||
type MoveOrCopyResp struct {
|
||||
Exist bool `json:"exist"`
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
@ -3,14 +3,11 @@ package aliyundrive_open
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -18,7 +15,9 @@ import (
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -32,19 +31,19 @@ func makePartInfos(size int) []base.Json {
|
||||
}
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
var partSize int64 = 20 * 1024 * 1024
|
||||
var partSize int64 = 20 * utils.MB
|
||||
if fileSize > partSize {
|
||||
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
|
||||
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
||||
} else if fileSize > 768*1024*1024*1024 { // over 768GB
|
||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||
partSize = 5 * utils.GB // file part size 5GB
|
||||
} else if fileSize > 768*utils.GB { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if fileSize > 512*1024*1024*1024 { // over 512GB
|
||||
} else if fileSize > 512*utils.GB { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if fileSize > 384*1024*1024*1024 { // over 384GB
|
||||
} else if fileSize > 384*utils.GB { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if fileSize > 256*1024*1024*1024 { // over 256GB
|
||||
} else if fileSize > 256*utils.GB { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if fileSize > 128*1024*1024*1024 { // over 128GB
|
||||
} else if fileSize > 128*utils.GB { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
}
|
||||
@ -65,73 +64,40 @@ func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]Pa
|
||||
return resp.PartInfoList, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
|
||||
partInfo := resp.PartInfoList[i-1]
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
||||
uploadUrl := partInfo.UploadUrl
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
if retry {
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
if retry && res.StatusCode == http.StatusForbidden {
|
||||
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) normalUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, createResp CreateResp, count int, partSize int64) error {
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
// 2. upload
|
||||
preTime := time.Now()
|
||||
for i := 1; i <= len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
err := d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
up(i * 100 / count)
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
}
|
||||
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||
// 3. complete
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": createResp.FileId,
|
||||
"upload_id": createResp.UploadId,
|
||||
})
|
||||
"file_id": fileId,
|
||||
"upload_id": uploadId,
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
type ProofRange struct {
|
||||
@ -159,110 +125,146 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, fileSize)
|
||||
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := make([]byte, proofRange.End-proofRange.Start)
|
||||
_, err = file.ReadAt(buf, proofRange.Start)
|
||||
length := proofRange.End - proofRange.Start
|
||||
buf := bytes.NewBuffer(make([]byte, 0, length))
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf), nil
|
||||
_, err = io.CopyN(buf, reader, length)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. create
|
||||
// Part Size Unit: Bytes, Default: 20MB,
|
||||
// Maximum number of slices 10,000, ≈195.3125GB
|
||||
var partSize = calPartSize(stream.GetSize())
|
||||
const dateFormat = "2006-01-02T15:04:05.000Z"
|
||||
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
|
||||
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||
|
||||
createData := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"local_modified_at": mtimeStr,
|
||||
"local_created_at": ctimeStr,
|
||||
}
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||
createData["part_info_list"] = makePartInfos(count)
|
||||
// rapid upload
|
||||
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
|
||||
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||
if rapidUpload {
|
||||
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||
// read 1024 bytes to calculate pre hash
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
_, err := io.CopyN(buf, stream, 1024)
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
hash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createData["size"] = stream.GetSize()
|
||||
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
// if support seek, seek to start
|
||||
if localFile, ok := stream.(io.Seeker); ok {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Put spliced head back to stream
|
||||
stream.SetReadCloser(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, stream.GetReadCloser()),
|
||||
Closer: stream.GetReadCloser(),
|
||||
})
|
||||
}
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
var tmpF model.File
|
||||
if err != nil {
|
||||
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||
// convert to local file
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = stream.GetReadCloser().Close()
|
||||
stream.SetReadCloser(file)
|
||||
// calculate full hash
|
||||
h := sha1.New()
|
||||
_, err = io.Copy(h, file)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
hi := stream.GetHash()
|
||||
hash := hi.GetHash(utils.SHA1)
|
||||
if len(hash) <= 0 {
|
||||
tmpF, err = stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
delete(createData, "pre_hash")
|
||||
createData["proof_version"] = "v1"
|
||||
createData["content_hash_name"] = "sha1"
|
||||
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
|
||||
// seek to start
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
|
||||
createData["content_hash"] = hash
|
||||
createData["proof_code"], err = d.calProofCode(stream)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if createResp.RapidUpload {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
return nil
|
||||
}
|
||||
// failed to rapid upload, try normal upload
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !createResp.RapidUpload {
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
//rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += partSize
|
||||
up(float64(i*100) / float64(count))
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
}
|
||||
|
||||
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||
return d.normalUpload(ctx, stream, up, createResp, count, partSize)
|
||||
// 3. complete
|
||||
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||
}
|
||||
|
@ -2,9 +2,12 @@ package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
@ -15,7 +18,7 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
url := d.base + "/oauth/access_token"
|
||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||
url = d.OauthTokenURL
|
||||
@ -23,7 +26,7 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
//var resp base.TokenResp
|
||||
var e ErrResp
|
||||
res, err := base.RestyClient.R().
|
||||
ForceContentType("application/json").
|
||||
//ForceContentType("application/json").
|
||||
SetBody(base.Json{
|
||||
"client_id": d.ClientID,
|
||||
"client_secret": d.ClientSecret,
|
||||
@ -34,16 +37,56 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
SetError(&e).
|
||||
Post(url)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", "", err
|
||||
}
|
||||
log.Debugf("[ali_open] refresh token response: %s", res.String())
|
||||
if e.Code != "" {
|
||||
return fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
}
|
||||
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
|
||||
if refresh == "" {
|
||||
return errors.New("failed to refresh token: refresh token is empty")
|
||||
return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
|
||||
}
|
||||
curSub, err := getSub(d.RefreshToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
newSub, err := getSub(refresh)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if curSub != newSub {
|
||||
return "", "", errors.New("failed to refresh token: sub not match")
|
||||
}
|
||||
return refresh, access, nil
|
||||
}
|
||||
|
||||
func getSub(token string) (string, error) {
|
||||
segments := strings.Split(token, ".")
|
||||
if len(segments) != 3 {
|
||||
return "", errors.New("not a jwt token because of invalid segments")
|
||||
}
|
||||
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
|
||||
if err != nil {
|
||||
return "", errors.New("failed to decode jwt token")
|
||||
}
|
||||
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
refresh, access, err := d._refreshToken()
|
||||
for i := 0; i < 3; i++ {
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||
}
|
||||
refresh, access, err = d._refreshToken()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("[ali_open] token exchange: %s -> %s", d.RefreshToken, refresh)
|
||||
d.RefreshToken, d.AccessToken = refresh, access
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
@ -127,3 +170,9 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getNowTime() (time.Time, string) {
|
||||
nowTime := time.Now()
|
||||
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||
return nowTime, nowTimeStr
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
Ctime: f.CreatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
|
@ -3,6 +3,7 @@ package drivers
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||
_ "github.com/alist-org/alist/v3/drivers/123_share"
|
||||
_ "github.com/alist-org/alist/v3/drivers/139"
|
||||
_ "github.com/alist-org/alist/v3/drivers/189"
|
||||
|
@ -4,29 +4,31 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BaiduNetdisk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
}
|
||||
|
||||
const BaiduFileAPI = "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
|
||||
const DefaultSliceSize int64 = 4 * 1024 * 1024
|
||||
uploadThread int
|
||||
vipType int // 会员类型,0普通用户(4G/4M)、1普通会员(10G/16M)、2超级会员(20G/32M)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
@ -37,11 +39,24 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||
}
|
||||
|
||||
res, err := d.get("/xpan/nas", map[string]string{
|
||||
"method": "uinfo",
|
||||
}, nil)
|
||||
log.Debugf("[baidu] get uinfo: %s", string(res))
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.vipType = utils.Json.Get(res, "vip_type").ToInt()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Drop(ctx context.Context) error {
|
||||
@ -65,12 +80,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
||||
return d.linkOfficial(file, args)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
||||
return err
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var newDir File
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -79,10 +98,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
||||
},
|
||||
}
|
||||
_, err := d.manage("move", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -90,7 +117,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
|
||||
},
|
||||
}
|
||||
_, err := d.manage("rename", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
|
||||
srcObj.Name = newName
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -111,118 +148,165 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
streamSize := stream.GetSize()
|
||||
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
func (d *BaiduNetdisk) PutRapid(ctx context.Context, dstDir model.Obj, stream model.FileStreamer) (model.Obj, error) {
|
||||
contentMd5 := stream.GetHash().GetHash(utils.MD5)
|
||||
if len(contentMd5) < utils.MD5.Width {
|
||||
return nil, errors.New("invalid hash")
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
blockList, _ := utils.Json.MarshalToString([]string{contentMd5})
|
||||
|
||||
var newFile File
|
||||
_, err := d.create(path, streamSize, 0, "", blockList, &newFile, mtime, ctime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// rapid upload
|
||||
if newObj, err := d.PutRapid(ctx, dstDir, stream); err == nil {
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
sliceSize := d.getSliceSize()
|
||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(sliceSize)), 1))
|
||||
lastBlockSize := streamSize % sliceSize
|
||||
if streamSize > 0 && lastBlockSize == 0 {
|
||||
lastBlockSize = sliceSize
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
count := int(math.Ceil(float64(streamSize) / float64(DefaultSliceSize)))
|
||||
//cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * 1024
|
||||
// cal md5
|
||||
h1 := md5.New()
|
||||
h2 := md5.New()
|
||||
blockList := make([]string, 0)
|
||||
contentMd5 := ""
|
||||
sliceMd5 := ""
|
||||
left := streamSize
|
||||
for i := 0; i < count; i++ {
|
||||
byteSize := DefaultSliceSize
|
||||
if left < DefaultSliceSize {
|
||||
byteSize = left
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.Copy(io.MultiWriter(h1, h2), io.LimitReader(tempFile, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blockList = append(blockList, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
||||
h2.Reset()
|
||||
}
|
||||
contentMd5 = hex.EncodeToString(h1.Sum(nil))
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if streamSize <= SliceSize {
|
||||
sliceMd5 = contentMd5
|
||||
} else {
|
||||
sliceData := make([]byte, SliceSize)
|
||||
_, err = io.ReadFull(tempFile, sliceData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h2.Write(sliceData)
|
||||
sliceMd5 = hex.EncodeToString(h2.Sum(nil))
|
||||
}
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(blockList, ","))
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
|
||||
path, streamSize,
|
||||
block_list_str,
|
||||
contentMd5, sliceMd5)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
return nil
|
||||
}
|
||||
params = map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
}
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := sliceSize
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
|
||||
var offset int64 = 0
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(streamSize, 10),
|
||||
"isdir": "0",
|
||||
"autoinit": "1",
|
||||
"rtype": "3",
|
||||
"block_list": blockListStr,
|
||||
"content-md5": contentMd5,
|
||||
"slice-md5": sliceMd5,
|
||||
}
|
||||
joinTime(form, ctime, mtime)
|
||||
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", form)
|
||||
_, err = d.postForm("/xpan/file", params, form, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(precreateResp.File), nil
|
||||
}
|
||||
}
|
||||
// step.2 上传分片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
params["partseq"] = strconv.Itoa(partseq)
|
||||
byteSize := int64(math.Min(float64(streamSize-offset), float64(DefaultSliceSize)))
|
||||
err := retry.Do(func() error {
|
||||
return d.uploadSlice(ctx, ¶ms, stream.GetName(), tempFile, offset, byteSize)
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3))
|
||||
if err != nil {
|
||||
return err
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
offset += byteSize
|
||||
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(i * 100 / len(precreateResp.BlockList))
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, block_list_str)
|
||||
return err
|
||||
}
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params *map[string]string, fileName string, file *os.File, offset int64, byteSize int64) error {
|
||||
_, err := file.Seek(offset, io.SeekStart)
|
||||
if err = threadG.Wait(); err != nil {
|
||||
// 如果属于用户主动取消,则保存上传进度
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// step.3 创建文件
|
||||
var newFile File
|
||||
_, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(*params).
|
||||
SetFileReader("file", fileName, io.LimitReader(file, byteSize)).
|
||||
Post(BaiduFileAPI)
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", fileName, file).
|
||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,6 +1,7 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -17,10 +18,8 @@ type File struct {
|
||||
//OwnerType int `json:"owner_type"`
|
||||
//Category int `json:"category"`
|
||||
//RealCategory string `json:"real_category"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
//OperId int `json:"oper_id"`
|
||||
//ServerCtime int `json:"server_ctime"`
|
||||
Thumbs struct {
|
||||
//Icon string `json:"icon"`
|
||||
Url3 string `json:"url3"`
|
||||
@ -28,29 +27,52 @@ type File struct {
|
||||
//Url1 string `json:"url1"`
|
||||
} `json:"thumbs"`
|
||||
//Wpfile int `json:"wpfile"`
|
||||
//LocalMtime int `json:"local_mtime"`
|
||||
|
||||
Size int64 `json:"size"`
|
||||
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
||||
Path string `json:"path"`
|
||||
//Share int `json:"share"`
|
||||
//ServerAtime int `json:"server_atime"`
|
||||
//Pl int `json:"pl"`
|
||||
//LocalCtime int `json:"local_ctime"`
|
||||
ServerFilename string `json:"server_filename"`
|
||||
//Md5 string `json:"md5"`
|
||||
Md5 string `json:"md5"`
|
||||
//OwnerId int `json:"owner_id"`
|
||||
//Unlist int `json:"unlist"`
|
||||
Isdir int `json:"isdir"`
|
||||
|
||||
// list resp
|
||||
ServerCtime int64 `json:"server_ctime"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
LocalMtime int64 `json:"local_mtime"`
|
||||
LocalCtime int64 `json:"local_ctime"`
|
||||
//ServerAtime int64 `json:"server_atime"` `
|
||||
|
||||
// only create and precreate resp
|
||||
Ctime int64 `json:"ctime"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.ServerFilename == "" {
|
||||
f.ServerFilename = path.Base(f.Path)
|
||||
}
|
||||
if f.LocalCtime == 0 {
|
||||
f.LocalCtime = f.Ctime
|
||||
}
|
||||
if f.LocalMtime == 0 {
|
||||
f.LocalMtime = f.Mtime
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: strconv.FormatInt(f.FsId, 10),
|
||||
Path: f.Path,
|
||||
Name: f.ServerFilename,
|
||||
Size: f.Size,
|
||||
Modified: time.Unix(f.ServerMtime, 0),
|
||||
Modified: time.Unix(f.LocalMtime, 0),
|
||||
Ctime: time.Unix(f.LocalCtime, 0),
|
||||
IsFolder: f.Isdir == 1,
|
||||
|
||||
// 直接获取的MD5是错误的
|
||||
// HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||
}
|
||||
@ -154,10 +176,15 @@ type DownloadResp2 struct {
|
||||
}
|
||||
|
||||
type PrecreateResp struct {
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
ReturnType int `json:"return_type"`
|
||||
BlockList []int `json:"block_list"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
ReturnType int `json:"return_type"`
|
||||
|
||||
// return_type=1
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
@ -1,18 +1,18 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/avast/retry-go"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -21,7 +21,7 @@ import (
|
||||
|
||||
func (d *BaiduNetdisk) refreshToken() error {
|
||||
err := d._refreshToken()
|
||||
if err != nil && err == errs.EmptyToken {
|
||||
if err != nil && errors.Is(err, errs.EmptyToken) {
|
||||
err = d._refreshToken()
|
||||
}
|
||||
return err
|
||||
@ -73,7 +73,7 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
||||
log.Info("refreshing baidu_netdisk token.")
|
||||
err2 := d.refreshToken()
|
||||
if err2 != nil {
|
||||
return err2
|
||||
return retry.Unrecoverable(err2)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||
@ -81,7 +81,10 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
||||
result = res.Body()
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3))
|
||||
retry.LastErrorOnly(true),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
return result, err
|
||||
}
|
||||
|
||||
@ -91,10 +94,10 @@ func (d *BaiduNetdisk) get(pathname string, params map[string]string, resp inter
|
||||
}, resp)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) post(pathname string, params map[string]string, data interface{}, resp interface{}) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) postForm(pathname string, params map[string]string, form map[string]string, resp interface{}) ([]byte, error) {
|
||||
return d.request("https://pan.baidu.com/rest/2.0"+pathname, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetQueryParams(params)
|
||||
req.SetBody(data)
|
||||
req.SetFormData(form)
|
||||
}, resp)
|
||||
}
|
||||
|
||||
@ -149,6 +152,9 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, args model.LinkArgs) (*model
|
||||
//if res.StatusCode() == 302 {
|
||||
u = res.Header().Get("location")
|
||||
//}
|
||||
|
||||
updateObjMd5(file, "pan.baidu.com", u)
|
||||
|
||||
return &model.Link{
|
||||
URL: u,
|
||||
Header: http.Header{
|
||||
@ -171,6 +177,9 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updateObjMd5(file, d.CustomCrackUA, resp.Info[0].Dlink)
|
||||
|
||||
return &model.Link{
|
||||
URL: resp.Info[0].Dlink,
|
||||
Header: http.Header{
|
||||
@ -179,32 +188,79 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "filemanager",
|
||||
"opera": opera,
|
||||
}
|
||||
marshal, err := utils.Json.Marshal(filelist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
marshal, _ := utils.Json.MarshalToString(filelist)
|
||||
return d.postForm("/xpan/file", params, map[string]string{
|
||||
"async": "0",
|
||||
"filelist": marshal,
|
||||
"ondup": "fail",
|
||||
}, nil)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "create",
|
||||
}
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
if uploadid != "" {
|
||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||
form := map[string]string{
|
||||
"path": path,
|
||||
"size": strconv.FormatInt(size, 10),
|
||||
"isdir": strconv.Itoa(isdir),
|
||||
"rtype": "3",
|
||||
}
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
if mtime != 0 && ctime != 0 {
|
||||
joinTime(form, ctime, mtime)
|
||||
}
|
||||
|
||||
if uploadid != "" {
|
||||
form["uploadid"] = uploadid
|
||||
}
|
||||
if block_list != "" {
|
||||
form["block_list"] = block_list
|
||||
}
|
||||
return d.postForm("/xpan/file", params, form, resp)
|
||||
}
|
||||
|
||||
func encodeURIComponent(str string) string {
|
||||
r := url.QueryEscape(str)
|
||||
r = strings.ReplaceAll(r, "+", "%20")
|
||||
return r
|
||||
func joinTime(form map[string]string, ctime, mtime int64) {
|
||||
form["local_mtime"] = strconv.FormatInt(mtime, 10)
|
||||
form["local_ctime"] = strconv.FormatInt(ctime, 10)
|
||||
}
|
||||
|
||||
func updateObjMd5(obj model.Obj, userAgent, u string) {
|
||||
object := model.GetRawObject(obj)
|
||||
if object != nil {
|
||||
req, _ := http.NewRequest(http.MethodHead, u, nil)
|
||||
req.Header.Add("User-Agent", userAgent)
|
||||
resp, _ := base.HttpClient.Do(req)
|
||||
if resp != nil {
|
||||
contentMd5 := resp.Header.Get("Content-Md5")
|
||||
object.HashInfo = utils.NewHashInfo(utils.MD5, contentMd5)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultSliceSize int64 = 4 * utils.MB
|
||||
VipSliceSize = 16 * utils.MB
|
||||
SVipSliceSize = 32 * utils.MB
|
||||
)
|
||||
|
||||
func (d *BaiduNetdisk) getSliceSize() int64 {
|
||||
switch d.vipType {
|
||||
case 1:
|
||||
return VipSliceSize
|
||||
case 2:
|
||||
return SVipSliceSize
|
||||
default:
|
||||
return DefaultSliceSize
|
||||
}
|
||||
}
|
||||
|
||||
// func encodeURIComponent(str string) string {
|
||||
// r := url.QueryEscape(str)
|
||||
// r = strings.ReplaceAll(r, "+", "%20")
|
||||
// return r
|
||||
// }
|
||||
|
@ -4,18 +4,22 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -26,6 +30,8 @@ type BaiduPhoto struct {
|
||||
AccessToken string
|
||||
Uk int64
|
||||
root model.Obj
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Config() driver.Config {
|
||||
@ -37,6 +43,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if err := d.refreshToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,45 +222,57 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 不支持大小为0的文件
|
||||
if stream.GetSize() == 0 {
|
||||
return nil, fmt.Errorf("file size cannot be zero")
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// 暂时没有找到妙传方式
|
||||
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
const DEFAULT int64 = 1 << 22
|
||||
const SliceSize int64 = 1 << 18
|
||||
|
||||
// 计算需要的数据
|
||||
const DEFAULT = 1 << 22
|
||||
const SliceSize = 1 << 18
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
||||
lastBlockSize := streamSize % DEFAULT
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = DEFAULT
|
||||
}
|
||||
|
||||
// step.1 计算MD5
|
||||
sliceMD5List := make([]string, 0, count)
|
||||
fileMd5 := md5.New()
|
||||
sliceMd5 := md5.New()
|
||||
sliceMd52 := md5.New()
|
||||
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
|
||||
byteSize := int64(DEFAULT)
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
|
||||
sliceMd5.Reset()
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
|
||||
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||
|
||||
// 开始执行上传
|
||||
// step.2 预上传
|
||||
params := map[string]string{
|
||||
"autoinit": "1",
|
||||
"isdir": "0",
|
||||
@ -257,46 +280,69 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
"ctype": "11",
|
||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||
"size": fmt.Sprint(stream.GetSize()),
|
||||
"slice-md5": slice_md5,
|
||||
"content-md5": content_md5,
|
||||
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
|
||||
"slice-md5": sliceMd5,
|
||||
"content-md5": contentMd5,
|
||||
"block_list": blockListStr,
|
||||
}
|
||||
|
||||
// 预上传
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch precreateResp.ReturnType {
|
||||
case 1: // 上传文件
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
case 1: //step.3 上传文件切片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
uploadParams["partseq"] = fmt.Sprint(i)
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"partseq": fmt.Sprint(partseq),
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
up(i * 100 / count)
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 2: // 创建文件
|
||||
case 2: //step.4 创建文件
|
||||
params["uploadid"] = precreateResp.UploadID
|
||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
@ -306,7 +352,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 3: // 增加到相册
|
||||
case 3: //step.5 增加到相册
|
||||
rootfile := precreateResp.Data.toFile()
|
||||
if album, ok := dstDir.(*Album); ok {
|
||||
return d.AddAlbumFile(ctx, album, rootfile)
|
||||
|
@ -61,12 +61,12 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
|
||||
|
||||
func renameAlbum(album *Album, newName string) *Album {
|
||||
return &Album{
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreateTime: album.CreateTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreationTime: album.CreationTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
||||
DeleteOrigin bool `json:"delete_origin"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
|
||||
@ -51,22 +53,17 @@ type (
|
||||
Ctime int64 `json:"ctime"` // 创建时间 s
|
||||
Mtime int64 `json:"mtime"` // 修改时间 s
|
||||
Thumburl []string `json:"thumburl"`
|
||||
|
||||
parseTime *time.Time
|
||||
Md5 string `json:"md5"`
|
||||
}
|
||||
)
|
||||
|
||||
func (c *File) GetSize() int64 { return c.Size }
|
||||
func (c *File) GetName() string { return getFileName(c.Path) }
|
||||
func (c *File) ModTime() time.Time {
|
||||
if c.parseTime == nil {
|
||||
c.parseTime = toTime(c.Mtime)
|
||||
}
|
||||
return *c.parseTime
|
||||
}
|
||||
func (c *File) IsDir() bool { return false }
|
||||
func (c *File) GetID() string { return "" }
|
||||
func (c *File) GetPath() string { return "" }
|
||||
func (c *File) GetSize() int64 { return c.Size }
|
||||
func (c *File) GetName() string { return getFileName(c.Path) }
|
||||
func (c *File) CreateTime() time.Time { return time.Unix(c.Ctime, 0) }
|
||||
func (c *File) ModTime() time.Time { return time.Unix(c.Mtime, 0) }
|
||||
func (c *File) IsDir() bool { return false }
|
||||
func (c *File) GetID() string { return "" }
|
||||
func (c *File) GetPath() string { return "" }
|
||||
func (c *File) Thumb() string {
|
||||
if len(c.Thumburl) > 0 {
|
||||
return c.Thumburl[0]
|
||||
@ -74,6 +71,10 @@ func (c *File) Thumb() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
/*相册部分*/
|
||||
type (
|
||||
AlbumListResp struct {
|
||||
@ -84,12 +85,12 @@ type (
|
||||
}
|
||||
|
||||
Album struct {
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreationTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
|
||||
parseTime *time.Time
|
||||
}
|
||||
@ -109,17 +110,17 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) ModTime() time.Time {
|
||||
if a.parseTime == nil {
|
||||
a.parseTime = toTime(a.Mtime)
|
||||
}
|
||||
return *a.parseTime
|
||||
func (a *Album) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
func (a *Album) IsDir() bool { return true }
|
||||
func (a *Album) GetID() string { return "" }
|
||||
func (a *Album) GetPath() string { return "" }
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) CreateTime() time.Time { return time.Unix(a.CreationTime, 0) }
|
||||
func (a *Album) ModTime() time.Time { return time.Unix(a.Mtime, 0) }
|
||||
func (a *Album) IsDir() bool { return true }
|
||||
func (a *Album) GetID() string { return "" }
|
||||
func (a *Album) GetPath() string { return "" }
|
||||
|
||||
type (
|
||||
CopyFileResp struct {
|
||||
@ -160,9 +161,9 @@ type (
|
||||
CreateFileResp
|
||||
|
||||
//不存在返回
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
Blocklist []int64 `json:"block_list"`
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
}
|
||||
)
|
||||
|
||||
|
31
drivers/base/upload.go
Normal file
31
drivers/base/upload.go
Normal file
@ -0,0 +1,31 @@
|
||||
package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/go-cache"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
)
|
||||
|
||||
// storage upload progress, for upload recovery
|
||||
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
|
||||
|
||||
// Save upload progress for 20 minutes
|
||||
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
|
||||
return UploadStateCache.Set(
|
||||
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
|
||||
state,
|
||||
cache.WithEx[any](time.Minute*20))
|
||||
}
|
||||
|
||||
// An upload progress can only be made by one process alone,
|
||||
// so here you need to get it and then delete it.
|
||||
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
|
||||
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
|
||||
if ok {
|
||||
state, ok = v.(T)
|
||||
}
|
||||
return
|
||||
}
|
@ -49,7 +49,19 @@ func (d *Cloudreve) List(ctx context.Context, dir model.Obj, args model.ListArgs
|
||||
}
|
||||
|
||||
return utils.SliceConvert(r.Objects, func(src Object) (model.Obj, error) {
|
||||
return objectToObj(src), nil
|
||||
thumb, err := d.GetThumb(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.Type == "dir" && d.EnableThumbAndFolderSize {
|
||||
var dprop DirectoryProp
|
||||
err = d.request(http.MethodGet, "/object/property/"+src.Id+"?is_folder=true", nil, &dprop)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src.Size = dprop.Size
|
||||
}
|
||||
return objectToObj(src, thumb), nil
|
||||
})
|
||||
}
|
||||
|
||||
@ -115,7 +127,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if stream.GetReadCloser() == http.NoBody {
|
||||
if io.ReadCloser(stream) == http.NoBody {
|
||||
return d.create(ctx, dstDir, stream)
|
||||
}
|
||||
var r DirectoryResp
|
||||
|
@ -9,10 +9,12 @@ type Addition struct {
|
||||
// Usually one of two
|
||||
driver.RootPath
|
||||
// define other
|
||||
Address string `json:"address" required:"true"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Cookie string `json:"cookie"`
|
||||
Address string `json:"address" required:"true"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Cookie string `json:"cookie"`
|
||||
CustomUA string `json:"custom_ua"`
|
||||
EnableThumbAndFolderSize bool `json:"enable_thumb_and_folder_size"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -44,13 +44,20 @@ type Object struct {
|
||||
SourceEnabled bool `json:"source_enabled"`
|
||||
}
|
||||
|
||||
func objectToObj(f Object) *model.Object {
|
||||
return &model.Object{
|
||||
ID: f.Id,
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: f.Date,
|
||||
IsFolder: f.Type == "dir",
|
||||
type DirectoryProp struct {
|
||||
Size int `json:"size"`
|
||||
}
|
||||
|
||||
func objectToObj(f Object, t model.Thumbnail) *model.ObjThumb {
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.Id,
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: f.Date,
|
||||
IsFolder: f.Type == "dir",
|
||||
},
|
||||
Thumbnail: t,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,15 +22,18 @@ const loginPath = "/user/session"
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
u := d.Address + "/api/v3" + path
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
|
||||
var r Resp
|
||||
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
@ -146,3 +149,26 @@ func convertSrc(obj model.Obj) map[string]interface{} {
|
||||
m["items"] = items
|
||||
return m
|
||||
}
|
||||
|
||||
func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
||||
if !d.Addition.EnableThumbAndFolderSize {
|
||||
return model.Thumbnail{}, nil
|
||||
}
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.NoRedirectClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||
if err != nil {
|
||||
return model.Thumbnail{}, err
|
||||
}
|
||||
return model.Thumbnail{
|
||||
Thumbnail: resp.Header().Get("Location"),
|
||||
}, nil
|
||||
}
|
||||
|
@ -3,8 +3,8 @@ package crypt
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -13,10 +13,10 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
rcCrypt "github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
@ -55,6 +55,8 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
if !isCryptExt(d.EncryptedSuffix) {
|
||||
return fmt.Errorf("EncryptedSuffix is Illegal")
|
||||
}
|
||||
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
|
||||
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
@ -72,7 +74,7 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
"password2": p2,
|
||||
"filename_encryption": d.FileNameEnc,
|
||||
"directory_name_encryption": d.DirNameEnc,
|
||||
"filename_encoding": "base64",
|
||||
"filename_encoding": d.FileNameEncoding,
|
||||
"suffix": d.EncryptedSuffix,
|
||||
"pass_bad_blocks": "",
|
||||
}
|
||||
@ -82,7 +84,6 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
}
|
||||
d.cipher = c
|
||||
|
||||
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -128,6 +129,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
Size: 0,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
@ -147,8 +150,13 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
if !ok {
|
||||
if d.Thumbnail && thumb == "" {
|
||||
thumb = utils.EncodePath(common.GetApiUrl(nil) + stdpath.Join("/d", args.ReqPath, ".thumbnails", name+".webp"), true)
|
||||
}
|
||||
if !ok && !d.Thumbnail {
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
objWithThumb := model.ObjThumb{
|
||||
@ -232,70 +240,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 {
|
||||
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||
}
|
||||
remoteFileSize := remoteFile.GetSize()
|
||||
remoteClosers := utils.NewClosers()
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||
length := underlyingLength
|
||||
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||
length = -1
|
||||
}
|
||||
if remoteLink.RangeReadCloser.RangeReader != nil {
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = converted
|
||||
}
|
||||
if rrc != nil {
|
||||
//remoteRangeReader, err :=
|
||||
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.Add(remoteLink.RangeReadCloser.Closers)
|
||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
if remoteLink.ReadSeekCloser != nil {
|
||||
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart)
|
||||
if remoteLink.MFile != nil {
|
||||
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//remoteClosers.Add(remoteLink.ReadSeekCloser)
|
||||
//keep reuse same ReadSeekCloser and close at last.
|
||||
return io.NopCloser(remoteLink.ReadSeekCloser), nil
|
||||
//remoteClosers.Add(remoteLink.MFile)
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
if len(remoteLink.URL) > 0 {
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
|
||||
//remoteClosers.Add(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
|
||||
}
|
||||
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readCloser, nil
|
||||
}
|
||||
|
||||
return response.Body, nil
|
||||
}
|
||||
//if remoteLink.Data != nil {
|
||||
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
|
||||
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
|
||||
// remoteCloser = remoteLink.Data
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return readCloser, nil
|
||||
//}
|
||||
return nil, errs.NotSupport
|
||||
|
||||
}
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -306,7 +297,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
Header: remoteLink.Header,
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
||||
@ -370,32 +361,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||
}
|
||||
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
|
||||
in := stream.GetReadCloser()
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := d.cipher.EncryptData(in)
|
||||
wrappedIn, err := d.cipher.EncryptData(streamer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||
}
|
||||
|
||||
streamOut := &model.FileStream{
|
||||
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
|
||||
streamOut := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
ID: stream.GetID(),
|
||||
Path: stream.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(stream.GetName()),
|
||||
Size: d.cipher.EncryptedSize(stream.GetSize()),
|
||||
Modified: stream.ModTime(),
|
||||
IsFolder: stream.IsDir(),
|
||||
ID: streamer.GetID(),
|
||||
Path: streamer.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(streamer.GetName()),
|
||||
Size: d.cipher.EncryptedSize(streamer.GetSize()),
|
||||
Modified: streamer.ModTime(),
|
||||
IsFolder: streamer.IsDir(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(wrappedIn),
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: stream.NeedStore(),
|
||||
Old: stream.GetOld(),
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||
if err != nil {
|
||||
|
@ -15,16 +15,13 @@ type Addition struct {
|
||||
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
|
||||
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
|
||||
|
||||
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password'. Optional but recommended"`
|
||||
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"encrypted files will have this suffix"`
|
||||
}
|
||||
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
|
||||
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
|
||||
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
||||
|
||||
/*// inMemory contains decrypted confidential info and other temp data. will not persist these info anywhere
|
||||
type inMemory struct {
|
||||
password string
|
||||
salt string
|
||||
}*/
|
||||
Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Crypt",
|
||||
|
@ -1,24 +1,13 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
)
|
||||
|
||||
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||
header := net.ProcessHeader(&http.Header{}, &link.Header)
|
||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||
|
||||
return net.RequestHttp("GET", header, link.URL)
|
||||
}
|
||||
|
||||
// will give the best guessing based on the path
|
||||
func guessPath(path string) (isFolder, secondTry bool) {
|
||||
if strings.HasSuffix(path, "/") {
|
||||
|
@ -203,7 +203,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
_ = res.Body.Close()
|
||||
|
||||
if count > 0 {
|
||||
up((i + 1) * 100 / count)
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
}
|
||||
|
||||
offset += byteSize
|
||||
|
@ -64,9 +64,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
||||
r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: r,
|
||||
MFile: r,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
@ -30,43 +31,59 @@ func (d *FTP) login() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// An FTP file reader that implements io.ReadSeekCloser for seeking.
|
||||
type FTPFileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
|
||||
return &FTPFileReader{
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.offset += int64(n)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
oldOffset := r.offset
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
size, err := r.conn.FileSize(r.path)
|
||||
if err != nil {
|
||||
return oldOffset, err
|
||||
}
|
||||
newOffset = offset + int64(size)
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset = newOffset
|
||||
|
||||
if r.resp != nil {
|
||||
// close the existing ftp data connection, otherwise the next read will be blocked
|
||||
_ = r.resp.Close() // we do not care about whether it returns an error
|
||||
r.resp = nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Close() error {
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
obj := stream.GetOld()
|
||||
obj := stream.GetExist()
|
||||
var (
|
||||
e Error
|
||||
url string
|
||||
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -23,12 +24,17 @@ type File struct {
|
||||
Name string `json:"name"`
|
||||
MimeType string `json:"mimeType"`
|
||||
ModifiedTime time.Time `json:"modifiedTime"`
|
||||
CreatedTime time.Time `json:"createdTime"`
|
||||
Size string `json:"size"`
|
||||
ThumbnailLink string `json:"thumbnailLink"`
|
||||
ShortcutDetails struct {
|
||||
TargetId string `json:"targetId"`
|
||||
TargetMimeType string `json:"targetMimeType"`
|
||||
} `json:"shortcutDetails"`
|
||||
|
||||
MD5Checksum string `json:"md5Checksum"`
|
||||
SHA1Checksum string `json:"sha1Checksum"`
|
||||
SHA256Checksum string `json:"sha256Checksum"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
@ -39,10 +45,18 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
ID: f.Id,
|
||||
Name: f.Name,
|
||||
Size: size,
|
||||
Ctime: f.CreatedTime,
|
||||
Modified: f.ModifiedTime,
|
||||
IsFolder: f.MimeType == "application/vnd.google-apps.folder",
|
||||
HashInfo: utils.NewHashInfoByMap(map[*utils.HashType]string{
|
||||
utils.MD5: f.MD5Checksum,
|
||||
utils.SHA1: f.SHA1Checksum,
|
||||
utils.SHA256: f.SHA256Checksum,
|
||||
}),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: f.ThumbnailLink,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{},
|
||||
}
|
||||
if f.MimeType == "application/vnd.google-apps.shortcut" {
|
||||
obj.ID = f.ShortcutDetails.TargetId
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -13,6 +12,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -195,7 +196,7 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
}
|
||||
query := map[string]string{
|
||||
"orderBy": orderBy,
|
||||
"fields": "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken",
|
||||
"fields": "files(id,name,mimeType,size,modifiedTime,createdTime,thumbnailLink,shortcutDetails,md5Checksum,sha1Checksum,sha256Checksum),nextPageToken",
|
||||
"pageSize": "1000",
|
||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
||||
//"includeItemsFromAllDrives": "true",
|
||||
@ -216,25 +217,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
var offset int64 = 0
|
||||
for offset < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
chunkSize := stream.GetSize() - finish
|
||||
chunkSize := stream.GetSize() - offset
|
||||
if chunkSize > defaultChunkSize {
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(reader).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += chunkSize
|
||||
offset += chunkSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
@ -118,13 +118,102 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
|
||||
|
||||
// 根据key查询js变量
|
||||
func findJSVarFunc(key, data string) string {
|
||||
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
var values []string
|
||||
if key != "sasign" {
|
||||
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
} else {
|
||||
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
|
||||
if len(matches) == 3 {
|
||||
values = matches[1]
|
||||
} else {
|
||||
if len(matches) > 0 {
|
||||
values = matches[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[1]
|
||||
}
|
||||
|
||||
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
|
||||
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
|
||||
|
||||
// 查找所有方法位置
|
||||
func findJSFunctionIndex(data string, all bool) [][2]int {
|
||||
findFunction := findFunction
|
||||
if all {
|
||||
findFunction = findFunctionAll
|
||||
}
|
||||
|
||||
indexs := findFunction.FindAllStringIndex(data, -1)
|
||||
fIndexs := make([][2]int, 0, len(indexs))
|
||||
|
||||
for _, index := range indexs {
|
||||
if len(index) != 2 {
|
||||
continue
|
||||
}
|
||||
count, data := 0, data[index[1]:]
|
||||
for ii, v := range data {
|
||||
if v == ' ' && count == 0 {
|
||||
continue
|
||||
}
|
||||
if v == '{' {
|
||||
count++
|
||||
}
|
||||
|
||||
if v == '}' {
|
||||
count--
|
||||
}
|
||||
if count == 0 {
|
||||
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return fIndexs
|
||||
}
|
||||
|
||||
// 删除JS全局方法
|
||||
func removeJSGlobalFunction(html string) string {
|
||||
indexs := findJSFunctionIndex(html, false)
|
||||
block := make([]string, len(indexs))
|
||||
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
|
||||
index := indexs[i]
|
||||
block[i] = html[index[1]:next]
|
||||
next = index[0]
|
||||
}
|
||||
return strings.Join(block, "")
|
||||
}
|
||||
|
||||
// 根据名称获取方法
|
||||
func getJSFunctionByName(html string, name string) (string, error) {
|
||||
indexs := findJSFunctionIndex(html, true)
|
||||
for _, index := range indexs {
|
||||
data := html[index[0]:index[1]]
|
||||
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("not find %s function", name)
|
||||
}
|
||||
|
||||
// 解析html中的JSON,选择最长的数据
|
||||
func htmlJsonToMap2(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindAllStringSubmatch(html, -1)
|
||||
var sData string
|
||||
for _, data := range datas {
|
||||
if len(datas) > 0 && len(data[1]) > len(sData) {
|
||||
sData = data[1]
|
||||
}
|
||||
}
|
||||
if sData == "" {
|
||||
return nil, fmt.Errorf("not find data")
|
||||
}
|
||||
return jsonToMap(sData, html), nil
|
||||
}
|
||||
|
||||
// 解析html中的JSON
|
||||
func htmlJsonToMap(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindStringSubmatch(html)
|
||||
|
@ -3,6 +3,8 @@ package lanzou
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -18,6 +20,9 @@ type RespInfo[T any] struct {
|
||||
Info T `json:"info"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*FileOrFolder)(nil)
|
||||
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
|
||||
|
||||
type FileOrFolder struct {
|
||||
Name string `json:"name"`
|
||||
//Onof string `json:"onof"` // 是否存在提取码
|
||||
@ -49,6 +54,14 @@ type FileOrFolder struct {
|
||||
shareInfo *FileShare `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetID() string {
|
||||
if f.IsDir() {
|
||||
return f.FolID
|
||||
@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct {
|
||||
repairFlag bool `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
||||
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
||||
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
||||
|
@ -258,7 +258,7 @@ var sizeFindReg = regexp.MustCompile(`(?i)大小\W*([0-9.]+\s*[bkm]+)`)
|
||||
var timeFindReg = regexp.MustCompile(`\d+\s*[秒天分小][钟时]?前|[昨前]天|\d{4}-\d{2}-\d{2}`)
|
||||
|
||||
// 查找分享文件夹子文件夹ID和名称
|
||||
var findSubFolaerReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
||||
var findSubFolderReg = regexp.MustCompile(`(?i)(?:folderlink|mbxfolder).+href="/(.+?)"(?:.+filename")?>(.+?)<`)
|
||||
|
||||
// 获取下载页面链接
|
||||
var findDownPageParamReg = regexp.MustCompile(`<iframe.*?src="(.+?)"`)
|
||||
@ -346,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
|
||||
// 需要密码
|
||||
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
||||
param, err := htmlFormToMap(sharePageData)
|
||||
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
param, err := htmlJsonToMap(sharePageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -371,7 +375,6 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
return nil, err
|
||||
}
|
||||
nextPageData := RemoveNotes(string(data))
|
||||
|
||||
param, err = htmlJsonToMap(nextPageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -452,7 +455,7 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
||||
|
||||
files := make([]FileOrFolderByShareUrl, 0)
|
||||
// vip获取文件夹
|
||||
floders := findSubFolaerReg.FindAllStringSubmatch(sharePageData, -1)
|
||||
floders := findSubFolderReg.FindAllStringSubmatch(sharePageData, -1)
|
||||
for _, floder := range floders {
|
||||
if len(floder) == 3 {
|
||||
files = append(files, FileOrFolderByShareUrl{
|
||||
@ -473,10 +476,10 @@ func (d *LanZou) getFolderByShareUrl(pwd string, sharePageData string) ([]FileOr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
/*// 文件夹中的文件也不加密
|
||||
// 文件夹中的文件加密
|
||||
for i := 0; i < len(resp.Text); i++ {
|
||||
resp.Text[i].Pwd = pwd
|
||||
}*/
|
||||
}
|
||||
if len(resp.Text) == 0 {
|
||||
break
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -20,6 +21,8 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/sign"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/djherbis/times"
|
||||
log "github.com/sirupsen/logrus"
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
@ -101,6 +104,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
@ -108,6 +119,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
Ctime: ctime,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
@ -144,10 +156,18 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if isFolder {
|
||||
size = 0
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(path)
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
file := model.Object{
|
||||
Path: path,
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Ctime: ctime,
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
@ -170,9 +190,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = open
|
||||
link.MFile = open
|
||||
} else {
|
||||
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes()))
|
||||
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
|
||||
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||
}
|
||||
} else {
|
||||
@ -180,7 +200,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = open
|
||||
link.MFile = open
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
@ -264,6 +284,10 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(fullPath, stream.ModTime(), stream.ModTime())
|
||||
if err != nil {
|
||||
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
uploader := s3manager.NewUploader(s)
|
||||
input := &s3manager.UploadInput{
|
||||
|
@ -4,11 +4,12 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -42,7 +43,7 @@ func (d *Mega) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if node, ok := dir.(*MegaNode); ok {
|
||||
nodes, err := d.c.FS.GetChildren(node.Node)
|
||||
nodes, err := d.c.FS.GetChildren(node.n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -56,7 +57,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
||||
return res, nil
|
||||
}
|
||||
log.Errorf("can't convert: %+v", dir)
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
@ -68,21 +69,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if node, ok := file.(*MegaNode); ok {
|
||||
|
||||
//down, err := d.c.NewDownload(node.Node)
|
||||
//down, err := d.c.NewDownload(n.Node)
|
||||
//if err != nil {
|
||||
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||
//}
|
||||
|
||||
size := file.GetSize()
|
||||
var finalClosers utils.Closers
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||
length = -1
|
||||
}
|
||||
var down *mega.Download
|
||||
err := utils.Retry(3, time.Second, func() (err error) {
|
||||
down, err = d.c.NewDownload(node.Node)
|
||||
down, err = d.c.NewDownload(node.n)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@ -97,37 +98,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
|
||||
resultLink := &model.Link{
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
}
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentNode, ok := parentDir.(*MegaNode); ok {
|
||||
_, err := d.c.CreateDir(dirName, parentNode.Node)
|
||||
_, err := d.c.CreateDir(dirName, parentNode.n)
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
return d.c.Move(srcNode.Node, dstNode.Node)
|
||||
return d.c.Move(srcNode.n, dstNode.n)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
return d.c.Rename(srcNode.Node, newName)
|
||||
return d.c.Rename(srcNode.n, newName)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -136,14 +137,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
||||
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if node, ok := obj.(*MegaNode); ok {
|
||||
return d.c.Delete(node.Node, false)
|
||||
return d.c.Delete(node.n, false)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
|
||||
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -169,13 +170,13 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(id * 100 / u.Chunks())
|
||||
up(float64(id) * 100 / float64(u.Chunks()))
|
||||
}
|
||||
|
||||
_, err = u.Finish()
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package mega
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -8,29 +9,36 @@ import (
|
||||
)
|
||||
|
||||
type MegaNode struct {
|
||||
*mega.Node
|
||||
n *mega.Node
|
||||
}
|
||||
|
||||
//func (m *MegaNode) GetSize() int64 {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
//
|
||||
//func (m *MegaNode) GetName() string {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
func (m *MegaNode) GetSize() int64 {
|
||||
return m.n.GetSize()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetName() string {
|
||||
return m.n.GetName()
|
||||
}
|
||||
|
||||
func (m *MegaNode) CreateTime() time.Time {
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetHash() utils.HashInfo {
|
||||
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (m *MegaNode) ModTime() time.Time {
|
||||
return m.GetTimeStamp()
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) IsDir() bool {
|
||||
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
|
||||
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetID() string {
|
||||
return m.GetHash()
|
||||
return m.n.GetHash()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetPath() string {
|
||||
|
@ -6,16 +6,19 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/foxxorcat/mopan-sdk-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type MoPan struct {
|
||||
@ -23,7 +26,8 @@ type MoPan struct {
|
||||
Addition
|
||||
client *mopan.MoClient
|
||||
|
||||
userID string
|
||||
userID string
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *MoPan) Config() driver.Config {
|
||||
@ -35,6 +39,10 @@ func (d *MoPan) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *MoPan) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
login := func() error {
|
||||
data, err := d.client.Login(d.Phone, d.Password)
|
||||
if err != nil {
|
||||
@ -47,9 +55,19 @@ func (d *MoPan) Init(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
d.userID = info.UserID
|
||||
log.Debugf("[mopan] Phone: %s UserCloudStorageRelations: %+v", d.Phone, data.UserCloudStorageRelations)
|
||||
cloudCircleApp, _ := d.client.QueryAllCloudCircleApp()
|
||||
log.Debugf("[mopan] Phone: %s CloudCircleApp: %+v", d.Phone, cloudCircleApp)
|
||||
if d.RootFolderID == "" {
|
||||
for _, userCloudStorage := range data.UserCloudStorageRelations {
|
||||
if userCloudStorage.Path == "/文件" {
|
||||
d.RootFolderID = userCloudStorage.FolderID
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
d.client = mopan.NewMoClient().
|
||||
d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()).
|
||||
SetRestyClient(base.RestyClient).
|
||||
SetOnAuthorizationExpired(func(_ error) error {
|
||||
err := login()
|
||||
@ -87,6 +105,7 @@ func (d *MoPan) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
break
|
||||
}
|
||||
|
||||
log.Debugf("[mopan] Phone: %s folder: %+v", d.Phone, data.FileListAO.FolderList)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FolderList, folderToObj)...)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FileList, fileToObj)...)
|
||||
}
|
||||
@ -99,6 +118,15 @@ func (d *MoPan) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data.DownloadUrl = strings.Replace(strings.ReplaceAll(data.DownloadUrl, "&", "&"), "http://", "https://", 1)
|
||||
res, err := base.NoRedirectClient.R().SetContext(ctx).Head(data.DownloadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode() == 302 {
|
||||
data.DownloadUrl = res.Header().Get("location")
|
||||
}
|
||||
|
||||
return &model.Link{
|
||||
URL: data.DownloadUrl,
|
||||
}, nil
|
||||
@ -212,68 +240,88 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
initUpdload, err := d.client.InitMultiUpload(ctx, mopan.UpdloadFileParam{
|
||||
// step.1
|
||||
uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{
|
||||
ParentFolderId: dstDir.GetID(),
|
||||
FileName: stream.GetName(),
|
||||
FileSize: stream.GetSize(),
|
||||
File: file,
|
||||
}, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfo)
|
||||
// 尝试恢复进度
|
||||
initUpdload, ok := base.GetUploadProgress[*mopan.InitMultiUploadData](d, d.client.Authorization, uploadPartData.FileMd5)
|
||||
if !ok {
|
||||
// step.2
|
||||
initUpdload, err = d.client.InitMultiUpload(ctx, *uploadPartData, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.client.CloudDiskStartBusiness()
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
utils.Log.Error(d.client.CloudDiskStartBusiness())
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
// step.3
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, part := range parts {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
i, part, byteSize := i, part, initUpdload.PartSize
|
||||
if part.PartNumber == uploadPartData.PartTotal {
|
||||
byteSize = initUpdload.LastPartSize
|
||||
}
|
||||
|
||||
err := retry.Do(func() error {
|
||||
if _, err := file.Seek(int64(part.PartNumber-1)*int64(initUpdload.PartSize), io.SeekStart); err != nil {
|
||||
return retry.Unrecoverable(err)
|
||||
}
|
||||
|
||||
req, err := part.NewRequest(ctx, io.LimitReader(file, int64(initUpdload.PartSize)))
|
||||
// step.4
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
up(100 * float64(threadG.Success()) / float64(len(parts)))
|
||||
initUpdload.PartInfos[i] = ""
|
||||
return nil
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
initUpdload.PartInfos = utils.SliceFilter(initUpdload.PartInfos, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(d, initUpdload, d.client.Authorization, uploadPartData.FileMd5)
|
||||
}
|
||||
up(100 * (i + 1) / len(parts))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
//step.5
|
||||
uFile, err := d.client.CommitMultiUploadFile(initUpdload.UploadFileID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -9,7 +9,7 @@ type Addition struct {
|
||||
Phone string `json:"phone" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
|
||||
RootFolderID string `json:"root_folder_id" default:"-11" required:"true" help:"be careful when using the -11 value, some operations may cause system errors"`
|
||||
RootFolderID string `json:"root_folder_id" default:""`
|
||||
|
||||
CloudID string `json:"cloud_id"`
|
||||
|
||||
@ -17,6 +17,8 @@ type Addition struct {
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
DeviceInfo string `json:"device_info"`
|
||||
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
func (a *Addition) GetRootId() string {
|
||||
@ -24,7 +26,7 @@ func (a *Addition) GetRootId() string {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MoPan",
|
||||
Name: "MoPan",
|
||||
// DefaultRoot: "root, / or other",
|
||||
CheckStatus: true,
|
||||
Alert: "warning|This network disk may store your password in clear text. Please set your password carefully",
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/foxxorcat/mopan-sdk-go"
|
||||
)
|
||||
|
||||
@ -14,6 +15,8 @@ func fileToObj(f mopan.File) model.Obj {
|
||||
Name: f.Name,
|
||||
Size: int64(f.Size),
|
||||
Modified: time.Time(f.LastOpTime),
|
||||
Ctime: time.Time(f.CreateDate),
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: f.Icon.SmallURL,
|
||||
@ -26,6 +29,7 @@ func folderToObj(f mopan.Folder) model.Obj {
|
||||
ID: string(f.ID),
|
||||
Name: f.Name,
|
||||
Modified: time.Time(f.LastOpTime),
|
||||
Ctime: time.Time(f.CreateDate),
|
||||
IsFolder: true,
|
||||
}
|
||||
}
|
||||
@ -37,6 +41,7 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
|
||||
Name: newName,
|
||||
IsFolder: true,
|
||||
Modified: o.ModTime(),
|
||||
Ctime: o.CreateTime(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -50,6 +55,8 @@ func CloneObj(o model.Obj, newID, newName string) model.Obj {
|
||||
Name: newName,
|
||||
Size: o.GetSize(),
|
||||
Modified: o.ModTime(),
|
||||
Ctime: o.CreateTime(),
|
||||
HashInfo: o.GetHash(),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -57,8 +58,17 @@ func (d *Onedrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
||||
if f.File == nil {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
u := f.Url
|
||||
if d.CustomHost != "" {
|
||||
_u, err := url.Parse(f.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_u.Host = d.CustomHost
|
||||
u = _u.String()
|
||||
}
|
||||
return &model.Link{
|
||||
URL: f.Url,
|
||||
URL: u,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
||||
RefreshToken string `json:"refresh_token" required:"true"`
|
||||
SiteId string `json:"site_id"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -196,13 +196,14 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 {
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(int(finish * 100 / stream.GetSize()))
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -57,8 +58,17 @@ func (d *OnedriveAPP) Link(ctx context.Context, file model.Obj, args model.LinkA
|
||||
if f.File == nil {
|
||||
return nil, errs.NotFile
|
||||
}
|
||||
u := f.Url
|
||||
if d.CustomHost != "" {
|
||||
_u, err := url.Parse(f.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_u.Host = d.CustomHost
|
||||
u = _u.String()
|
||||
}
|
||||
return &model.Link{
|
||||
URL: f.Url,
|
||||
URL: u,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
||||
TenantID string `json:"tenant_id"`
|
||||
Email string `json:"email"`
|
||||
ChunkSize int64 `json:"chunk_size" type:"number" default:"5"`
|
||||
CustomHost string `json:"custom_host" help:"Custom host for onedrive download link"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -71,8 +71,8 @@ func (d *OnedriveAPP) _accessToken() error {
|
||||
"grant_type": "client_credentials",
|
||||
"client_id": d.ClientID,
|
||||
"client_secret": d.ClientSecret,
|
||||
"resource": "https://graph.microsoft.com/",
|
||||
"scope": "https://graph.microsoft.com/.default",
|
||||
"resource": onedriveHostMap[d.Region].Api + "/",
|
||||
"scope": onedriveHostMap[d.Region].Api + "/.default",
|
||||
}).Post(url)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -187,13 +187,14 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 {
|
||||
// https://learn.microsoft.com/zh-cn/onedrive/developer/rest-api/api/driveitem_createuploadsession
|
||||
if res.StatusCode != 201 && res.StatusCode != 202 && res.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(int(finish * 100 / stream.GetSize()))
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -3,15 +3,14 @@ package pikpak
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@ -124,23 +123,20 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
// cal gcid
|
||||
sha1Str, err := getGcid(tempFile, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
hi := stream.GetHash()
|
||||
sha1Str := hi.GetHash(hash_extend.GCID)
|
||||
if len(sha1Str) < hash_extend.GCID.Width {
|
||||
tFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sha1Str, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var resp UploadTaskData
|
||||
res, err := d.request("https://api-drive.mypikpak.com/drive/v1/files", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
@ -179,7 +175,7 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: ¶ms.Bucket,
|
||||
Key: ¶ms.Key,
|
||||
Body: tempFile,
|
||||
Body: stream,
|
||||
}
|
||||
_, err = uploader.UploadWithContext(ctx, input)
|
||||
return err
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||
)
|
||||
|
||||
type RespErr struct {
|
||||
@ -21,7 +23,9 @@ type File struct {
|
||||
Id string `json:"id"`
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
CreatedTime time.Time `json:"created_time"`
|
||||
ModifiedTime time.Time `json:"modified_time"`
|
||||
Hash string `json:"hash"`
|
||||
Size string `json:"size"`
|
||||
ThumbnailLink string `json:"thumbnail_link"`
|
||||
WebContentLink string `json:"web_content_link"`
|
||||
@ -35,8 +39,10 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
ID: f.Id,
|
||||
Name: f.Name,
|
||||
Size: size,
|
||||
Ctime: f.CreatedTime,
|
||||
Modified: f.ModifiedTime,
|
||||
IsFolder: f.Kind == "drive#folder",
|
||||
HashInfo: utils.NewHashInfo(hash_extend.GCID, f.Hash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: f.ThumbnailLink,
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
||||
"User-Agent": []string{ua},
|
||||
},
|
||||
Concurrency: 2,
|
||||
PartSize: 10 * 1024 * 1024,
|
||||
PartSize: 10 * utils.MB,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
m := md5.New()
|
||||
_, err = io.Copy(m, tempFile)
|
||||
@ -211,7 +209,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
md5s = append(md5s, m)
|
||||
partNumber++
|
||||
up(int(100 * (total - left) / total))
|
||||
up(100 * float64(total-left) / float64(total))
|
||||
}
|
||||
err = d.upCommit(pre, md5s)
|
||||
if err != nil {
|
||||
|
@ -10,6 +10,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@ -96,14 +98,14 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo
|
||||
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return d.Put(ctx, &model.Object{
|
||||
Path: stdpath.Join(parentDir.GetPath(), dirName),
|
||||
}, &model.FileStream{
|
||||
}, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: getPlaceholderName(d.Placeholder),
|
||||
Modified: time.Now(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
}, func(int) {})
|
||||
Reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
}, func(float64) {})
|
||||
}
|
||||
|
||||
func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
||||
Modified: f.ModTime(),
|
||||
Size: f.Size(),
|
||||
IsFolder: f.IsDir(),
|
||||
Ctime: f.(*smb2.FileStat).CreationTime,
|
||||
},
|
||||
}
|
||||
files = append(files, &file)
|
||||
@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
d.updateLastConnTime()
|
||||
return link, nil
|
||||
|
@ -189,7 +189,7 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(i * 100 / newChunk.Chunks)
|
||||
up(float64(i) * 100 / float64(newChunk.Chunks))
|
||||
}
|
||||
_, err = base.RestyClient.R().SetHeader("Authorization", token).Post(
|
||||
fmt.Sprintf("https://%s.teambition.net/upload/chunk/%s",
|
||||
|
@ -41,24 +41,24 @@ func (d *Template) Link(ctx context.Context, file model.Obj, args model.LinkArgs
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (d *Template) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
// TODO create folder, optional
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Template) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO move obj, optional
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *Template) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
// TODO rename obj, optional
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *Template) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO copy obj, optional
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
|
||||
@ -66,9 +66,9 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// TODO upload file, optional
|
||||
return errs.NotImplement
|
||||
return nil, errs.NotImplement
|
||||
}
|
||||
|
||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package terbox
|
||||
package terabox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -6,16 +6,16 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
@ -23,6 +23,7 @@ import (
|
||||
type Terabox struct {
|
||||
model.Storage
|
||||
Addition
|
||||
JsToken string
|
||||
}
|
||||
|
||||
func (d *Terabox) Config() driver.Config {
|
||||
@ -116,14 +117,10 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
var Default int64 = 4 * 1024 * 1024
|
||||
defaultByteData := make([]byte, Default)
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(Default)))
|
||||
@ -170,6 +167,9 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.Errno != 0 {
|
||||
return fmt.Errorf("[terabox] failed to precreate file, errno: %d", precreateResp.Errno)
|
||||
}
|
||||
if precreateResp.ReturnType == 2 {
|
||||
return nil
|
||||
}
|
||||
@ -213,7 +213,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
log.Debugln(res.String())
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(i * 100 / len(precreateResp.BlockList))
|
||||
up(float64(i) * 100 / float64(len(precreateResp.BlockList)))
|
||||
}
|
||||
}
|
||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user