Compare commits
46 Commits
Author | SHA1 | Date | |
---|---|---|---|
3c66db9845 | |||
f6ab1f7f61 | |||
8e40465e86 | |||
37dffd0fce | |||
e7c0d94b44 | |||
8102142007 | |||
7c6dec5d47 | |||
dd10c0c5d0 | |||
34fadecc2c | |||
cb8867fcc1 | |||
092ed06833 | |||
6308f1c35d | |||
ce10c9f120 | |||
6c4736fc8f | |||
b301b791c7 | |||
19d34e2eb8 | |||
a3748af772 | |||
9b765ef696 | |||
8f493cccc4 | |||
31a033dff1 | |||
8c3337b88b | |||
7238243664 | |||
ba2b15ab24 | |||
28dc8822b7 | |||
358c5055e9 | |||
b6cd40e6d3 | |||
7d96d8070d | |||
d482fb5f26 | |||
60402ce1fc | |||
1e3950c847 | |||
ed550594da | |||
3bbae29f93 | |||
3b74f8cd9a | |||
e9bdb91e01 | |||
1aa024ed6b | |||
13e8d36e1a | |||
5606c23768 | |||
0b675d6c02 | |||
c1db3a36ad | |||
c59dbb4f9e | |||
df6b306fce | |||
9d45718e5f | |||
b91ed7a78a | |||
95386d777b | |||
635809c376 | |||
af6bb2a6aa |
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -3,7 +3,7 @@
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
ko_fi: xhofe # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
|
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@ -22,8 +22,8 @@ body:
|
||||
I'm sure there are no duplicate issues or discussions.
|
||||
我确定没有重复的issue或讨论。
|
||||
- label: |
|
||||
I'm sure it's due to `AList` and not something else(such as `Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如`依赖`或`操作`)。
|
||||
I'm sure it's due to `AList` and not something else(such as [Network](https://alist.nn.ci/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host) ,`Dependencies` or `Operational`).
|
||||
我确定是`AList`的问题,而不是其他原因(例如[网络](https://alist.nn.ci/zh/faq/howto.html#tls-handshake-timeout-read-connection-reset-by-peer-dns-lookup-failed-connect-connection-refused-client-timeout-exceeded-while-awaiting-headers-no-such-host),`依赖`或`操作`)。
|
||||
- label: |
|
||||
I'm sure this issue is not fixed in the latest version.
|
||||
我确定这个问题在最新版本中没有被修复。
|
||||
|
2
.github/workflows/issue_question.yml
vendored
2
.github/workflows/issue_question.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
||||
if: github.event.label.name == 'question'
|
||||
steps:
|
||||
- name: Create comment
|
||||
uses: actions-cool/issues-helper@v3.5.1
|
||||
uses: actions-cool/issues-helper@v3.5.2
|
||||
with:
|
||||
actions: 'create-comment'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
34
.github/workflows/release_linux_musl.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: release_linux_musl
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_linux_musl:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
go-version: [ '1.20' ]
|
||||
name: Release
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
bash build.sh release linux_musl
|
||||
|
||||
- name: Upload assets
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: build/compress/*
|
2
.github/workflows/release_linux_musl_arm.yml
vendored
2
.github/workflows/release_linux_musl_arm.yml
vendored
@ -5,7 +5,7 @@ on:
|
||||
types: [ published ]
|
||||
|
||||
jobs:
|
||||
release_arm:
|
||||
release_linux_musl_arm:
|
||||
strategy:
|
||||
matrix:
|
||||
platform: [ ubuntu-latest ]
|
||||
|
22
build.sh
22
build.sh
@ -89,6 +89,18 @@ BuildDocker() {
|
||||
}
|
||||
|
||||
BuildRelease() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMusl() {
|
||||
rm -rf .git/
|
||||
mkdir -p "build"
|
||||
muslflags="--extldflags '-static -fpic' $ldflags"
|
||||
@ -112,13 +124,6 @@ BuildRelease() {
|
||||
export CGO_ENABLED=1
|
||||
go build -o ./build/$appName-$os_arch -ldflags="$muslflags" -tags=jsoniter .
|
||||
done
|
||||
BuildWinArm64 ./build/alist-windows-arm64.exe
|
||||
xgo -out "$appName" -ldflags="$ldflags" -tags=jsoniter .
|
||||
# why? Because some target platforms seem to have issues with upx compression
|
||||
upx -9 ./alist-linux-amd64
|
||||
cp ./alist-windows-amd64.exe ./alist-windows-amd64-upx.exe
|
||||
upx -9 ./alist-windows-amd64-upx.exe
|
||||
mv alist-* build
|
||||
}
|
||||
|
||||
BuildReleaseLinuxMuslArm() {
|
||||
@ -192,6 +197,9 @@ elif [ "$1" = "release" ]; then
|
||||
elif [ "$2" = "linux_musl_arm" ]; then
|
||||
BuildReleaseLinuxMuslArm
|
||||
MakeRelease "md5-linux-musl-arm.txt"
|
||||
elif [ "$2" = "linux_musl" ]; then
|
||||
BuildReleaseLinuxMusl
|
||||
MakeRelease "md5-linux-musl.txt"
|
||||
else
|
||||
BuildRelease
|
||||
MakeRelease "md5.txt"
|
||||
|
151
cmd/storage.go
151
cmd/storage.go
@ -4,8 +4,14 @@ Copyright © 2023 NAME HERE <EMAIL ADDRESS>
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/db"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/charmbracelet/bubbles/table"
|
||||
tea "github.com/charmbracelet/bubbletea"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -15,31 +21,134 @@ var storageCmd = &cobra.Command{
|
||||
Short: "Manage storage",
|
||||
}
|
||||
|
||||
func init() {
|
||||
var mountPath string
|
||||
var disable = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
var disableStorageCmd = &cobra.Command{
|
||||
Use: "disable",
|
||||
Short: "Disable a storage",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) < 1 {
|
||||
utils.Log.Errorf("mount path is required")
|
||||
return
|
||||
}
|
||||
mountPath := args[0]
|
||||
Init()
|
||||
storage, err := db.GetStorageByMountPath(mountPath)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storage: %+v", err)
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
storage.Disabled = true
|
||||
err = db.UpdateStorage(storage)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to update storage: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
utils.Log.Infof("Storage with mount path [%s] have been disabled", mountPath)
|
||||
}
|
||||
},
|
||||
}
|
||||
disable.Flags().StringVarP(&mountPath, "mount-path", "m", "", "The mountPath of storage")
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disable)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var baseStyle = lipgloss.NewStyle().
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240"))
|
||||
|
||||
type model struct {
|
||||
table table.Model
|
||||
}
|
||||
|
||||
func (m model) Init() tea.Cmd { return nil }
|
||||
|
||||
func (m model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
||||
var cmd tea.Cmd
|
||||
switch msg := msg.(type) {
|
||||
case tea.KeyMsg:
|
||||
switch msg.String() {
|
||||
case "esc":
|
||||
if m.table.Focused() {
|
||||
m.table.Blur()
|
||||
} else {
|
||||
m.table.Focus()
|
||||
}
|
||||
case "q", "ctrl+c":
|
||||
return m, tea.Quit
|
||||
//case "enter":
|
||||
// return m, tea.Batch(
|
||||
// tea.Printf("Let's go to %s!", m.table.SelectedRow()[1]),
|
||||
// )
|
||||
}
|
||||
}
|
||||
m.table, cmd = m.table.Update(msg)
|
||||
return m, cmd
|
||||
}
|
||||
|
||||
func (m model) View() string {
|
||||
return baseStyle.Render(m.table.View()) + "\n"
|
||||
}
|
||||
|
||||
var storageTableHeight int
|
||||
var listStorageCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all storages",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
Init()
|
||||
storages, _, err := db.GetStorages(1, -1)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("failed to query storages: %+v", err)
|
||||
} else {
|
||||
utils.Log.Infof("Found %d storages", len(storages))
|
||||
columns := []table.Column{
|
||||
{Title: "ID", Width: 4},
|
||||
{Title: "Driver", Width: 16},
|
||||
{Title: "Mount Path", Width: 30},
|
||||
{Title: "Enabled", Width: 7},
|
||||
}
|
||||
|
||||
var rows []table.Row
|
||||
for i := range storages {
|
||||
storage := storages[i]
|
||||
enabled := "true"
|
||||
if storage.Disabled {
|
||||
enabled = "false"
|
||||
}
|
||||
rows = append(rows, table.Row{
|
||||
strconv.Itoa(int(storage.ID)),
|
||||
storage.Driver,
|
||||
storage.MountPath,
|
||||
enabled,
|
||||
})
|
||||
}
|
||||
t := table.New(
|
||||
table.WithColumns(columns),
|
||||
table.WithRows(rows),
|
||||
table.WithFocused(true),
|
||||
table.WithHeight(storageTableHeight),
|
||||
)
|
||||
|
||||
s := table.DefaultStyles()
|
||||
s.Header = s.Header.
|
||||
BorderStyle(lipgloss.NormalBorder()).
|
||||
BorderForeground(lipgloss.Color("240")).
|
||||
BorderBottom(true).
|
||||
Bold(false)
|
||||
s.Selected = s.Selected.
|
||||
Foreground(lipgloss.Color("229")).
|
||||
Background(lipgloss.Color("57")).
|
||||
Bold(false)
|
||||
t.SetStyles(s)
|
||||
|
||||
m := model{t}
|
||||
if _, err := tea.NewProgram(m).Run(); err != nil {
|
||||
utils.Log.Errorf("failed to run program: %+v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
RootCmd.AddCommand(storageCmd)
|
||||
storageCmd.AddCommand(disableStorageCmd)
|
||||
storageCmd.AddCommand(listStorageCmd)
|
||||
storageCmd.PersistentFlags().IntVarP(&storageTableHeight, "height", "H", 10, "Table height")
|
||||
// Here you will define your flags and configuration settings.
|
||||
|
||||
// Cobra supports Persistent Flags which will work for this command
|
||||
|
@ -2,13 +2,13 @@ package _115
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Pan115 struct {
|
||||
@ -38,15 +38,15 @@ func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
|
||||
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
|
||||
return nil, err
|
||||
}
|
||||
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
|
||||
return src, nil
|
||||
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
|
||||
return &src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
downloadInfo, err := d.client.
|
||||
SetUserAgent(driver115.UA115Browser).
|
||||
Download(file.(driver115.File).PickCode)
|
||||
Download(file.(*FileObj).PickCode)
|
||||
// recover for upload
|
||||
d.client.SetUserAgent(driver115.UA115Desktop)
|
||||
if err != nil {
|
||||
@ -83,15 +83,67 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
var (
|
||||
fastInfo *driver115.UploadInitResp
|
||||
dirID = dstDir.GetID()
|
||||
)
|
||||
|
||||
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
|
||||
return driver115.ErrUploadTooLarge
|
||||
}
|
||||
//if digest, err = d.client.GetDigestResult(stream); err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
const PreHashSize int64 = 128 * utils.KB
|
||||
hashSize := PreHashSize
|
||||
if stream.GetSize() < PreHashSize {
|
||||
hashSize = stream.GetSize()
|
||||
}
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
|
||||
preHash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preHash = strings.ToUpper(preHash)
|
||||
fullHash := stream.GetHash().GetHash(utils.SHA1)
|
||||
if len(fullHash) <= 0 {
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fullHash = strings.ToUpper(fullHash)
|
||||
|
||||
// rapid-upload
|
||||
// note that 115 add timeout for rapid-upload,
|
||||
// and "sig invalid" err is thrown even when the hash is correct after timeout.
|
||||
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
|
||||
return err
|
||||
}
|
||||
if matched, err := fastInfo.Ok(); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 闪传失败,上传
|
||||
if stream.GetSize() <= utils.KB { // 文件大小小于1KB,改用普通模式上传
|
||||
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
|
||||
}
|
||||
// 分片上传
|
||||
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
|
||||
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*Pan115)(nil)
|
||||
|
@ -3,6 +3,20 @@ package _115
|
||||
import (
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
var _ model.Obj = (*driver.File)(nil)
|
||||
var _ model.Obj = (*FileObj)(nil)
|
||||
|
||||
type FileObj struct {
|
||||
driver.File
|
||||
}
|
||||
|
||||
func (f *FileObj) CreateTime() time.Time {
|
||||
return f.File.CreateTime
|
||||
}
|
||||
|
||||
func (f *FileObj) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.Sha1)
|
||||
}
|
||||
|
@ -1,10 +1,25 @@
|
||||
package _115
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/orzogc/fake115uploader/cipher"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SheltonZhu/115driver/pkg/driver"
|
||||
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -41,8 +56,8 @@ func (d *Pan115) login() error {
|
||||
return d.client.LoginCheck()
|
||||
}
|
||||
|
||||
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
res := make([]driver.File, 0)
|
||||
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
|
||||
res := make([]FileObj, 0)
|
||||
if d.PageSize <= 0 {
|
||||
d.PageSize = driver.FileListLimit
|
||||
}
|
||||
@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, file := range *files {
|
||||
res = append(res, file)
|
||||
res = append(res, FileObj{file})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
appVer = "2.0.3.6"
|
||||
)
|
||||
|
||||
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
|
||||
var (
|
||||
ecdhCipher *cipher.EcdhCipher
|
||||
encrypted []byte
|
||||
decrypted []byte
|
||||
encodedToken string
|
||||
err error
|
||||
target = "U_1_" + dirID
|
||||
bodyBytes []byte
|
||||
result = driver115.UploadInitResp{}
|
||||
fileSizeStr = strconv.FormatInt(fileSize, 10)
|
||||
)
|
||||
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
userID := strconv.FormatInt(d.client.UserID, 10)
|
||||
form := url.Values{}
|
||||
form.Set("appid", "0")
|
||||
form.Set("appversion", appVer)
|
||||
form.Set("userid", userID)
|
||||
form.Set("filename", fileName)
|
||||
form.Set("filesize", fileSizeStr)
|
||||
form.Set("fileid", fileID)
|
||||
form.Set("target", target)
|
||||
form.Set("sig", d.client.GenerateSignature(fileID, target))
|
||||
|
||||
signKey, signVal := "", ""
|
||||
for retry := true; retry; {
|
||||
t := driver115.Now()
|
||||
|
||||
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params := map[string]string{
|
||||
"k_ec": encodedToken,
|
||||
}
|
||||
|
||||
form.Set("t", t.String())
|
||||
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
|
||||
if signKey != "" && signVal != "" {
|
||||
form.Set("sign_key", signKey)
|
||||
form.Set("sign_val", signVal)
|
||||
}
|
||||
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := d.client.NewRequest().
|
||||
SetQueryParams(params).
|
||||
SetBody(encrypted).
|
||||
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
|
||||
SetDoNotParseResponse(true)
|
||||
resp, err := req.Post(driver115.ApiUploadInit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := resp.RawBody()
|
||||
defer data.Close()
|
||||
if bodyBytes, err = io.ReadAll(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Status == 7 {
|
||||
// Update signKey & signVal
|
||||
signKey = result.SignKey
|
||||
signVal, err = UploadDigestRange(stream, result.SignCheck)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
retry = false
|
||||
}
|
||||
result.SHA1 = fileID
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
|
||||
var start, end int64
|
||||
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length := end - start + 1
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
|
||||
hashStr, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
result = strings.ToUpper(hashStr)
|
||||
return
|
||||
}
|
||||
|
||||
// UploadByMultipart upload by mutipart blocks
|
||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
|
||||
var (
|
||||
chunks []oss.FileChunk
|
||||
parts []oss.UploadPart
|
||||
imur oss.InitiateMultipartUploadResult
|
||||
ossClient *oss.Client
|
||||
bucket *oss.Bucket
|
||||
ossToken *driver115.UploadOSSTokenResp
|
||||
err error
|
||||
)
|
||||
|
||||
tmpF, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := driver115.DefalutUploadMultipartOptions()
|
||||
if len(opts) > 0 {
|
||||
for _, f := range opts {
|
||||
f(options)
|
||||
}
|
||||
}
|
||||
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ossToken一小时后就会失效,所以每50分钟重新获取一次
|
||||
ticker := time.NewTicker(options.TokenRefreshTime)
|
||||
defer ticker.Stop()
|
||||
// 设置超时
|
||||
timeout := time.NewTimer(options.Timeout)
|
||||
|
||||
if chunks, err = SplitFile(fileSize); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if imur, err = bucket.InitiateMultipartUpload(params.Object,
|
||||
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
|
||||
oss.UserAgentHeader(driver115.OSSUserAgent),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(chunks))
|
||||
|
||||
chunksCh := make(chan oss.FileChunk)
|
||||
errCh := make(chan error)
|
||||
UploadedPartsCh := make(chan oss.UploadPart)
|
||||
quit := make(chan struct{})
|
||||
|
||||
// producer
|
||||
go chunksProducer(chunksCh, chunks)
|
||||
go func() {
|
||||
wg.Wait()
|
||||
quit <- struct{}{}
|
||||
}()
|
||||
|
||||
// consumers
|
||||
for i := 0; i < options.ThreadsNum; i++ {
|
||||
go func(threadId int) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
errCh <- fmt.Errorf("Recovered in %v", r)
|
||||
}
|
||||
}()
|
||||
for chunk := range chunksCh {
|
||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||
for retry := 0; retry < 3; retry++ {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
buf := make([]byte, chunk.Size)
|
||||
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
|
||||
continue
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(buf)
|
||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
||||
}
|
||||
UploadedPartsCh <- part
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for part := range UploadedPartsCh {
|
||||
parts = append(parts, part)
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// 到时重新获取ossToken
|
||||
if ossToken, err = d.client.GetOSSToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
case <-quit:
|
||||
break LOOP
|
||||
case <-errCh:
|
||||
return err
|
||||
case <-timeout.C:
|
||||
return fmt.Errorf("time out")
|
||||
}
|
||||
}
|
||||
|
||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
|
||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return d.checkUploadStatus(dirID, params.SHA1)
|
||||
}
|
||||
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
|
||||
for _, chunk := range chunks {
|
||||
ch <- chunk
|
||||
}
|
||||
}
|
||||
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
|
||||
// 验证上传是否成功
|
||||
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
|
||||
opts := []driver115.GetFileOptions{
|
||||
driver115.WithOrder(driver115.FileOrderByTime),
|
||||
driver115.WithShowDirEnable(false),
|
||||
driver115.WithAsc(false),
|
||||
driver115.WithLimit(500),
|
||||
}
|
||||
fResp, err := driver115.GetFiles(req, dirID, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fileInfo := range fResp.Files {
|
||||
if fileInfo.Sha1 == sha1 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return driver115.ErrUploadFailed
|
||||
}
|
||||
|
||||
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
|
||||
for i := int64(1); i < 10; i++ {
|
||||
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
|
||||
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// 单个分片大小不能小于100KB
|
||||
if chunks[0].Size < 100*utils.KB {
|
||||
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SplitFileByPartNum splits big file into parts by the num of parts.
|
||||
// Split the file with specified parts count, returns the split result when error is nil.
|
||||
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
|
||||
if chunkNum <= 0 || chunkNum > 10000 {
|
||||
return nil, errors.New("chunkNum invalid")
|
||||
}
|
||||
|
||||
if int64(chunkNum) > fileSize {
|
||||
return nil, errors.New("oss: chunkNum invalid")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
var chunkN = (int64)(chunkNum)
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * (fileSize / chunkN)
|
||||
if i == chunkN-1 {
|
||||
chunk.Size = fileSize/chunkN + fileSize%chunkN
|
||||
} else {
|
||||
chunk.Size = fileSize / chunkN
|
||||
}
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// SplitFileByPartSize splits big file into parts by the size of parts.
|
||||
// Splits the file by the part size. Returns the FileChunk when error is nil.
|
||||
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
|
||||
if chunkSize <= 0 {
|
||||
return nil, errors.New("chunkSize invalid")
|
||||
}
|
||||
|
||||
var chunkN = fileSize / chunkSize
|
||||
if chunkN >= 10000 {
|
||||
return nil, errors.New("Too many parts, please increase part size")
|
||||
}
|
||||
|
||||
var chunks []oss.FileChunk
|
||||
var chunk = oss.FileChunk{}
|
||||
for i := int64(0); i < chunkN; i++ {
|
||||
chunk.Number = int(i + 1)
|
||||
chunk.Offset = i * chunkSize
|
||||
chunk.Size = chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
if fileSize%chunkSize > 0 {
|
||||
chunk.Number = len(chunks) + 1
|
||||
chunk.Offset = int64(len(chunks)) * chunkSize
|
||||
chunk.Size = fileSize % chunkSize
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
return chunks, nil
|
||||
}
|
||||
|
@ -6,11 +6,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -22,6 +17,9 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Pan123 struct {
|
||||
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
// const DEFAULT int64 = 10485760
|
||||
h := md5.New()
|
||||
// need to calculate md5 of the full content
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
if _, err = io.Copy(h, tempFile); err != nil {
|
||||
return err
|
||||
|
@ -1,6 +1,7 @@
|
||||
package _123
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -21,6 +22,14 @@ type File struct {
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package _123Share
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -21,6 +22,10 @@ type File struct {
|
||||
DownloadUrl string `json:"DownloadUrl"`
|
||||
}
|
||||
|
||||
func (f File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f File) GetPath() string {
|
||||
return ""
|
||||
}
|
||||
@ -36,6 +41,9 @@ func (f File) GetName() string {
|
||||
func (f File) ModTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
func (f File) CreateTime() time.Time {
|
||||
return f.UpdateAt
|
||||
}
|
||||
|
||||
func (f File) IsDir() bool {
|
||||
return f.Type == 1
|
||||
|
@ -3,6 +3,7 @@ package _189pc
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -24,6 +25,8 @@ type Cloud189PC struct {
|
||||
|
||||
loginParam *LoginParam
|
||||
tokenInfo *AppSessionResp
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (y *Cloud189PC) Config() driver.Config {
|
||||
@ -44,6 +47,12 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) {
|
||||
y.FamilyID = ""
|
||||
}
|
||||
|
||||
// 限制上传线程数
|
||||
y.uploadThread, _ = strconv.Atoi(y.UploadThread)
|
||||
if y.uploadThread < 1 || y.uploadThread > 32 {
|
||||
y.uploadThread, y.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
// 初始化请求客户端
|
||||
if y.client == nil {
|
||||
y.client = base.NewRestyClient().SetHeaders(map[string]string{
|
||||
|
@ -160,9 +160,8 @@ func toDesc(o string) string {
|
||||
func ParseHttpHeader(str string) map[string]string {
|
||||
header := make(map[string]string)
|
||||
for _, value := range strings.Split(str, "&") {
|
||||
i := strings.Index(value, "=")
|
||||
if i > 0 {
|
||||
header[strings.TrimSpace(value[0:i])] = strings.TrimSpace(value[i+1:])
|
||||
if k, v, found := strings.Cut(value, "="); found {
|
||||
header[k] = v
|
||||
}
|
||||
}
|
||||
return header
|
||||
|
@ -15,6 +15,7 @@ type Addition struct {
|
||||
Type string `json:"type" type:"select" options:"personal,family" default:"personal"`
|
||||
FamilyID string `json:"family_id"`
|
||||
UploadMethod string `json:"upload_method" type:"select" options:"stream,rapid,old" default:"stream"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
NoUseOcr bool `json:"no_use_ocr"`
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@ package _189pc
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -175,6 +176,14 @@ type Cloud189File struct {
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189File) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.MD5, c.Md5)
|
||||
}
|
||||
|
||||
func (c *Cloud189File) GetSize() int64 { return c.Size }
|
||||
func (c *Cloud189File) GetName() string { return c.Name }
|
||||
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
@ -199,6 +208,14 @@ type Cloud189Folder struct {
|
||||
// StarLabel int64 `json:"starLabel"`
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) CreateTime() time.Time {
|
||||
return time.Time(c.CreateDate)
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Cloud189Folder) GetSize() int64 { return 0 }
|
||||
func (c *Cloud189Folder) GetName() string { return c.Name }
|
||||
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }
|
||||
@ -239,14 +256,25 @@ type InitMultiUploadResp struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
type UploadUrlsResp struct {
|
||||
Code string `json:"code"`
|
||||
UploadUrls map[string]Part `json:"uploadUrls"`
|
||||
Code string `json:"code"`
|
||||
Data map[string]UploadUrlsData `json:"uploadUrls"`
|
||||
}
|
||||
type Part struct {
|
||||
type UploadUrlsData struct {
|
||||
RequestURL string `json:"requestURL"`
|
||||
RequestHeader string `json:"requestHeader"`
|
||||
}
|
||||
|
||||
type UploadUrlInfo struct {
|
||||
PartNumber int
|
||||
Headers map[string]string
|
||||
UploadUrlsData
|
||||
}
|
||||
|
||||
type UploadProgress struct {
|
||||
UploadInfo InitMultiUploadResp
|
||||
UploadParts []string
|
||||
}
|
||||
|
||||
/* 第二种上传方式 */
|
||||
type CreateUploadFileResp struct {
|
||||
// 上传文件请求ID
|
||||
|
@ -13,8 +13,9 @@ import (
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -24,6 +25,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
@ -436,14 +438,18 @@ func (y *Cloud189PC) refreshSession() (err error) {
|
||||
// 普通上传
|
||||
// 无法上传大小为0的文件
|
||||
func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
var count = int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastPartSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastPartSize == 0 {
|
||||
lastPartSize = sliceSize
|
||||
}
|
||||
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"lazyCheck": "1",
|
||||
}
|
||||
|
||||
@ -465,61 +471,59 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
return nil, err
|
||||
}
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
byteData := bytes.NewBuffer(make([]byte, DEFAULT))
|
||||
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
byteData := make([]byte, sliceSize)
|
||||
if i == count {
|
||||
byteData = byteData[:lastPartSize]
|
||||
}
|
||||
|
||||
// 读取块
|
||||
byteData.Reset()
|
||||
silceMd5.Reset()
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5, byteData), file, DEFAULT)
|
||||
if err != io.EOF && err != io.ErrUnexpectedEOF && err != nil {
|
||||
if _, err := io.ReadFull(io.TeeReader(file, io.MultiWriter(fileMd5, silceMd5)), byteData); err != io.EOF && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 计算块md5并进行hex和base64编码
|
||||
md5Bytes := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Bytes)))
|
||||
silceMd5Base64 := base64.StdEncoding.EncodeToString(md5Bytes)
|
||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||
|
||||
// 获取上传链接
|
||||
var uploadUrl UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"partInfo": fmt.Sprintf("%d-%s", i, silceMd5Base64),
|
||||
"uploadFileId": initMultiUpload.Data.UploadFileID,
|
||||
}, &uploadUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, initMultiUpload.Data.UploadFileID, partInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 开始上传
|
||||
uploadData := uploadUrl.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(byteData.Bytes()))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(int(i * 100 / count))
|
||||
// step.4 上传切片
|
||||
uploadUrl := uploadUrls[0]
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, bytes.NewReader(byteData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / count)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
@ -545,121 +549,140 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
// 快传
|
||||
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
var DEFAULT = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(DEFAULT)))
|
||||
var sliceSize = partSize(file.GetSize())
|
||||
count := int(math.Ceil(float64(file.GetSize()) / float64(sliceSize)))
|
||||
lastSliceSize := file.GetSize() % sliceSize
|
||||
if file.GetSize() > 0 && lastSliceSize == 0 {
|
||||
lastSliceSize = sliceSize
|
||||
}
|
||||
|
||||
// 优先计算所需信息
|
||||
//step.1 优先计算所需信息
|
||||
byteSize := sliceSize
|
||||
fileMd5 := md5.New()
|
||||
silceMd5 := md5.New()
|
||||
silceMd5Hexs := make([]string, 0, count)
|
||||
silceMd5Base64s := make([]string, 0, count)
|
||||
partInfos := make([]string, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
if i == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
silceMd5.Reset()
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, DEFAULT); err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
if _, err := io.CopyN(io.MultiWriter(fileMd5, silceMd5), tempFile, byteSize); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
md5Byte := silceMd5.Sum(nil)
|
||||
silceMd5Hexs = append(silceMd5Hexs, strings.ToUpper(hex.EncodeToString(md5Byte)))
|
||||
silceMd5Base64s = append(silceMd5Base64s, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
partInfos = append(partInfos, fmt.Sprint(i, "-", base64.StdEncoding.EncodeToString(md5Byte)))
|
||||
}
|
||||
|
||||
fileMd5Hex := strings.ToUpper(hex.EncodeToString(fileMd5.Sum(nil)))
|
||||
sliceMd5Hex := fileMd5Hex
|
||||
if file.GetSize() > DEFAULT {
|
||||
if file.GetSize() > sliceSize {
|
||||
sliceMd5Hex = strings.ToUpper(utils.GetMD5EncodeStr(strings.Join(silceMd5Hexs, "\n")))
|
||||
}
|
||||
|
||||
// 检测是否支持快传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(DEFAULT),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
}
|
||||
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
//params.Set("extend", `{"opScene":"1","relativepath":"","rootfolderid":""}`)
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.Data.FileDataExists != 1 {
|
||||
var uploadUrls UploadUrlsResp
|
||||
_, err = y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"partInfo": strings.Join(silceMd5Base64s, ","),
|
||||
}, &uploadUrls)
|
||||
// 尝试恢复进度
|
||||
uploadProgress, ok := base.GetUploadProgress[*UploadProgress](y, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
if !ok {
|
||||
//step.2 预上传
|
||||
params := Params{
|
||||
"parentFolderId": dstDir.GetID(),
|
||||
"fileName": url.QueryEscape(file.GetName()),
|
||||
"fileSize": fmt.Sprint(file.GetSize()),
|
||||
"fileMd5": fileMd5Hex,
|
||||
"sliceSize": fmt.Sprint(sliceSize),
|
||||
"sliceMd5": sliceMd5Hex,
|
||||
}
|
||||
if y.isFamily() {
|
||||
params.Set("familyId", y.FamilyID)
|
||||
}
|
||||
var uploadInfo InitMultiUploadResp
|
||||
_, err = y.request(fullUrl+"/initMultiUpload", http.MethodGet, func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, params, &uploadInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := make([]byte, DEFAULT)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
n, err := io.ReadFull(tempFile, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return nil, err
|
||||
}
|
||||
uploadData := uploadUrls.UploadUrls[fmt.Sprint("partNumber_", i)]
|
||||
err = retry.Do(func() error {
|
||||
_, err := y.put(ctx, uploadData.RequestURL, ParseHttpHeader(uploadData.RequestHeader), false, bytes.NewReader(buf[:n]))
|
||||
return err
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up(int(i * 100 / count))
|
||||
uploadProgress = &UploadProgress{
|
||||
UploadInfo: uploadInfo,
|
||||
UploadParts: partInfos,
|
||||
}
|
||||
}
|
||||
|
||||
// 提交
|
||||
uploadInfo := uploadProgress.UploadInfo.Data
|
||||
// 网盘中不存在该文件,开始上传
|
||||
if uploadInfo.FileDataExists != 1 {
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, y.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, uploadPart := range uploadProgress.UploadParts {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
i, uploadPart := i, uploadPart
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
// step.3 获取上传链接
|
||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, uploadInfo.UploadFileID, uploadPart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadUrl := uploadUrls[0]
|
||||
|
||||
byteSize, offset := sliceSize, int64(uploadUrl.PartNumber-1)*sliceSize
|
||||
if uploadUrl.PartNumber == count {
|
||||
byteSize = lastSliceSize
|
||||
}
|
||||
|
||||
// step.4 上传切片
|
||||
_, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
up(int(threadG.Success()) * 100 / len(uploadUrls))
|
||||
uploadProgress.UploadParts[i] = ""
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
uploadProgress.UploadParts = utils.SliceFilter(uploadProgress.UploadParts, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(y, uploadProgress, y.tokenInfo.SessionKey, fileMd5Hex)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// step.5 提交
|
||||
var resp CommitMultiUploadFileResp
|
||||
_, err = y.request(fullUrl+"/commitMultiUploadFile", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadInfo.Data.UploadFileID,
|
||||
"uploadFileId": uploadInfo.UploadFileID,
|
||||
"isLog": "0",
|
||||
"opertype": "3",
|
||||
}, &resp)
|
||||
@ -669,16 +692,60 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
return resp.toFile(), nil
|
||||
}
|
||||
|
||||
// 获取上传切片信息
|
||||
// 对http body有大小限制,分片信息太多会出错
|
||||
func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string, partInfo ...string) ([]UploadUrlInfo, error) {
|
||||
fullUrl := UPLOAD_URL
|
||||
if y.isFamily() {
|
||||
fullUrl += "/family"
|
||||
} else {
|
||||
fullUrl += "/person"
|
||||
}
|
||||
|
||||
var uploadUrlsResp UploadUrlsResp
|
||||
_, err := y.request(fullUrl+"/getMultiUploadUrls", http.MethodGet,
|
||||
func(req *resty.Request) {
|
||||
req.SetContext(ctx)
|
||||
}, Params{
|
||||
"uploadFileId": uploadFileId,
|
||||
"partInfo": strings.Join(partInfo, ","),
|
||||
}, &uploadUrlsResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploadUrls := uploadUrlsResp.Data
|
||||
|
||||
if len(uploadUrls) != len(partInfo) {
|
||||
return nil, fmt.Errorf("uploadUrls get error, due to get length %d, real length %d", len(partInfo), len(uploadUrls))
|
||||
}
|
||||
|
||||
uploadUrlInfos := make([]UploadUrlInfo, 0, len(uploadUrls))
|
||||
for k, uploadUrl := range uploadUrls {
|
||||
partNumber, err := strconv.Atoi(strings.TrimPrefix(k, "partNumber_"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uploadUrlInfos = append(uploadUrlInfos, UploadUrlInfo{
|
||||
PartNumber: partNumber,
|
||||
Headers: ParseHttpHeader(uploadUrl.RequestHeader),
|
||||
UploadUrlsData: uploadUrl,
|
||||
})
|
||||
}
|
||||
sort.Slice(uploadUrlInfos, func(i, j int) bool {
|
||||
return uploadUrlInfos[i].PartNumber < uploadUrlInfos[j].PartNumber
|
||||
})
|
||||
return uploadUrlInfos, nil
|
||||
}
|
||||
|
||||
// 旧版本上传,家庭云不支持覆盖
|
||||
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
|
||||
tempFile, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
// 计算md5
|
||||
|
@ -3,6 +3,7 @@ package alist_v3
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
@ -93,8 +94,10 @@ func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
Object: model.Object{
|
||||
Name: f.Name,
|
||||
Modified: f.Modified,
|
||||
Ctime: f.Created,
|
||||
Size: f.Size,
|
||||
IsFolder: f.IsDir,
|
||||
HashInfo: utils.FromString(f.HashInfo),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||
}
|
||||
@ -176,7 +179,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
SetHeader("Password", d.MetaPassword).
|
||||
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
|
||||
SetContentLength(true).
|
||||
SetBody(stream.GetReadCloser())
|
||||
SetBody(io.ReadCloser(stream))
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -18,9 +18,11 @@ type ObjResp struct {
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified time.Time `json:"modified"`
|
||||
Created time.Time `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
HashInfo string `json:"hashinfo"`
|
||||
}
|
||||
|
||||
type FsListResp struct {
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
@ -67,7 +68,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
// init deviceID
|
||||
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
|
||||
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
|
||||
// init privateKey
|
||||
privateKey, _ := NewPrivateKeyFromHex(deviceID)
|
||||
state := State{
|
||||
@ -163,14 +164,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := model.FileStream{
|
||||
Obj: stream,
|
||||
ReadCloser: stream,
|
||||
Mimetype: stream.GetMimetype(),
|
||||
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
file := stream.FileStream{
|
||||
Obj: streamer,
|
||||
Reader: streamer,
|
||||
Mimetype: streamer.GetMimetype(),
|
||||
}
|
||||
const DEFAULT int64 = 10485760
|
||||
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
|
||||
|
||||
partInfoList := make([]base.Json, 0, count)
|
||||
for i := 1; i <= count; i++ {
|
||||
@ -187,25 +188,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
}
|
||||
|
||||
var localFile *os.File
|
||||
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
|
||||
localFile, _ = fileStream.ReadCloser.(*os.File)
|
||||
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
|
||||
localFile, _ = fileStream.Reader.(*os.File)
|
||||
}
|
||||
if d.RapidUpload {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
io.CopyN(buf, file, 1024)
|
||||
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
|
||||
if localFile != nil {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// 把头部拼接回去
|
||||
file.ReadCloser = struct {
|
||||
file.Reader = struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, file),
|
||||
Closer: file,
|
||||
Closer: &file,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -281,7 +282,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
file.ReadCloser = localFile
|
||||
file.Reader = localFile
|
||||
}
|
||||
|
||||
for i, partInfo := range resp.PartInfoList {
|
||||
|
@ -107,7 +107,9 @@ func (d *AliyundriveOpen) Link(ctx context.Context, file model.Obj, args model.L
|
||||
return d.limitLink(ctx, file)
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
nowTime, _ := getNowTime()
|
||||
newDir := File{CreatedAt: nowTime, UpdatedAt: nowTime}
|
||||
_, err := d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -115,12 +117,16 @@ func (d *AliyundriveOpen) MakeDir(ctx context.Context, parentDir model.Obj, dirN
|
||||
"name": dirName,
|
||||
"type": "folder",
|
||||
"check_name_mode": "refuse",
|
||||
})
|
||||
}).SetResult(&newDir)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
var resp MoveOrCopyResp
|
||||
_, err := d.request("/adrive/v1.0/openFile/move", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
@ -128,20 +134,36 @@ func (d *AliyundriveOpen) Move(ctx context.Context, srcObj, dstDir model.Obj) er
|
||||
"to_parent_file_id": dstDir.GetID(),
|
||||
"check_name_mode": "refuse", // optional:ignore,auto_rename,refuse
|
||||
//"new_name": "newName", // The new name to use when a file of the same name exists
|
||||
})
|
||||
}).SetResult(&resp)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Exist {
|
||||
return nil, errors.New("existence of files with the same name")
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.ID = resp.FileID
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *AliyundriveOpen) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/update", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": srcObj.GetID(),
|
||||
"name": newName,
|
||||
})
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -170,7 +192,7 @@ func (d *AliyundriveOpen) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *AliyundriveOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
return d.upload(ctx, dstDir, stream, up)
|
||||
}
|
||||
|
||||
@ -199,3 +221,7 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte
|
||||
}
|
||||
|
||||
var _ driver.Driver = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MkdirResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.MoveResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.RenameResult = (*AliyundriveOpen)(nil)
|
||||
var _ driver.PutResult = (*AliyundriveOpen)(nil)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package aliyundrive_open
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -17,22 +18,28 @@ type Files struct {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt *time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
DriveId string `json:"drive_id"`
|
||||
FileId string `json:"file_id"`
|
||||
ParentFileId string `json:"parent_file_id"`
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
FileExtension string `json:"file_extension"`
|
||||
ContentHash string `json:"content_hash"`
|
||||
Category string `json:"category"`
|
||||
Type string `json:"type"`
|
||||
Thumbnail string `json:"thumbnail"`
|
||||
Url string `json:"url"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
|
||||
// create only
|
||||
FileName string `json:"file_name"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.Name == "" {
|
||||
f.Name = f.FileName
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: f.FileId,
|
||||
@ -40,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
Ctime: f.CreatedAt,
|
||||
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
}
|
||||
@ -67,3 +76,9 @@ type CreateResp struct {
|
||||
RapidUpload bool `json:"rapid_upload"`
|
||||
PartInfoList []PartInfo `json:"part_info_list"`
|
||||
}
|
||||
|
||||
type MoveOrCopyResp struct {
|
||||
Exist bool `json:"exist"`
|
||||
DriveID string `json:"drive_id"`
|
||||
FileID string `json:"file_id"`
|
||||
}
|
||||
|
@ -3,14 +3,12 @@ package aliyundrive_open
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -19,6 +17,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -32,19 +31,19 @@ func makePartInfos(size int) []base.Json {
|
||||
}
|
||||
|
||||
func calPartSize(fileSize int64) int64 {
|
||||
var partSize int64 = 20 * 1024 * 1024
|
||||
var partSize int64 = 20 * utils.MB
|
||||
if fileSize > partSize {
|
||||
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
|
||||
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
|
||||
} else if fileSize > 768*1024*1024*1024 { // over 768GB
|
||||
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||
partSize = 5 * utils.GB // file part size 5GB
|
||||
} else if fileSize > 768*utils.GB { // over 768GB
|
||||
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||
} else if fileSize > 512*1024*1024*1024 { // over 512GB
|
||||
} else if fileSize > 512*utils.GB { // over 512GB
|
||||
partSize = 82463373 // ≈ 78.6432MB
|
||||
} else if fileSize > 384*1024*1024*1024 { // over 384GB
|
||||
} else if fileSize > 384*utils.GB { // over 384GB
|
||||
partSize = 54975582 // ≈ 52.4288MB
|
||||
} else if fileSize > 256*1024*1024*1024 { // over 256GB
|
||||
} else if fileSize > 256*utils.GB { // over 256GB
|
||||
partSize = 41231687 // ≈ 39.3216MB
|
||||
} else if fileSize > 128*1024*1024*1024 { // over 128GB
|
||||
} else if fileSize > 128*utils.GB { // over 128GB
|
||||
partSize = 27487791 // ≈ 26.2144MB
|
||||
}
|
||||
}
|
||||
@ -65,73 +64,40 @@ func (d *AliyundriveOpen) getUploadUrl(count int, fileId, uploadId string) ([]Pa
|
||||
return resp.PartInfoList, err
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, i, count int, reader *utils.MultiReadable, resp *CreateResp, retry bool) error {
|
||||
partInfo := resp.PartInfoList[i-1]
|
||||
func (d *AliyundriveOpen) uploadPart(ctx context.Context, r io.Reader, partInfo PartInfo) error {
|
||||
uploadUrl := partInfo.UploadUrl
|
||||
if d.InternalUpload {
|
||||
uploadUrl = strings.ReplaceAll(uploadUrl, "https://cn-beijing-data.aliyundrive.net/", "http://ccp-bj29-bj-1592982087.oss-cn-beijing-internal.aliyuncs.com/")
|
||||
}
|
||||
req, err := http.NewRequest("PUT", uploadUrl, reader)
|
||||
req, err := http.NewRequestWithContext(ctx, "PUT", uploadUrl, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
if retry {
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
res.Body.Close()
|
||||
if retry && res.StatusCode == http.StatusForbidden {
|
||||
resp.PartInfoList, err = d.getUploadUrl(count, resp.FileId, resp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader.Reset()
|
||||
return d.uploadPart(ctx, i, count, reader, resp, false)
|
||||
}
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusConflict {
|
||||
return fmt.Errorf("upload status: %d", res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) normalUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, createResp CreateResp, count int, partSize int64) error {
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
// 2. upload
|
||||
preTime := time.Now()
|
||||
for i := 1; i <= len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
err := d.uploadPart(ctx, i, count, utils.NewMultiReadable(io.LimitReader(stream, partSize)), &createResp, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
up(i * 100 / count)
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
}
|
||||
func (d *AliyundriveOpen) completeUpload(fileId, uploadId string) (model.Obj, error) {
|
||||
// 3. complete
|
||||
var newFile File
|
||||
_, err := d.request("/adrive/v1.0/openFile/complete", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"file_id": createResp.FileId,
|
||||
"upload_id": createResp.UploadId,
|
||||
})
|
||||
"file_id": fileId,
|
||||
"upload_id": uploadId,
|
||||
}).SetResult(&newFile)
|
||||
})
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
|
||||
type ProofRange struct {
|
||||
@ -159,110 +125,145 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
|
||||
return pr, nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, fileSize)
|
||||
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
|
||||
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := make([]byte, proofRange.End-proofRange.Start)
|
||||
_, err = file.ReadAt(buf, proofRange.Start)
|
||||
length := proofRange.End - proofRange.Start
|
||||
buf := bytes.NewBuffer(make([]byte, 0, length))
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf), nil
|
||||
_, err = io.CopyN(buf, reader, length)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 1. create
|
||||
// Part Size Unit: Bytes, Default: 20MB,
|
||||
// Maximum number of slices 10,000, ≈195.3125GB
|
||||
var partSize = calPartSize(stream.GetSize())
|
||||
const dateFormat = "2006-01-02T15:04:05.000Z"
|
||||
mtimeStr := stream.ModTime().UTC().Format(dateFormat)
|
||||
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
|
||||
|
||||
createData := base.Json{
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"drive_id": d.DriveId,
|
||||
"parent_file_id": dstDir.GetID(),
|
||||
"name": stream.GetName(),
|
||||
"type": "file",
|
||||
"check_name_mode": "ignore",
|
||||
"local_modified_at": mtimeStr,
|
||||
"local_created_at": ctimeStr,
|
||||
}
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
|
||||
createData["part_info_list"] = makePartInfos(count)
|
||||
// rapid upload
|
||||
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
|
||||
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
|
||||
if rapidUpload {
|
||||
log.Debugf("[aliyundrive_open] start cal pre_hash")
|
||||
// read 1024 bytes to calculate pre hash
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
_, err := io.CopyN(buf, stream, 1024)
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
hash, err := utils.HashReader(utils.SHA1, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createData["size"] = stream.GetSize()
|
||||
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
|
||||
// if support seek, seek to start
|
||||
if localFile, ok := stream.(io.Seeker); ok {
|
||||
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Put spliced head back to stream
|
||||
stream.SetReadCloser(struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}{
|
||||
Reader: io.MultiReader(buf, stream.GetReadCloser()),
|
||||
Closer: stream.GetReadCloser(),
|
||||
})
|
||||
}
|
||||
createData["pre_hash"] = hash
|
||||
}
|
||||
var createResp CreateResp
|
||||
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
var tmpF model.File
|
||||
if err != nil {
|
||||
if e.Code != "PreHashMatched" || !rapidUpload {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
|
||||
// convert to local file
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = stream.GetReadCloser().Close()
|
||||
stream.SetReadCloser(file)
|
||||
// calculate full hash
|
||||
h := sha1.New()
|
||||
_, err = io.Copy(h, file)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
hi := stream.GetHash()
|
||||
hash := hi.GetHash(utils.SHA1)
|
||||
if len(hash) <= 0 {
|
||||
tmpF, err = stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash, err = utils.HashFile(utils.SHA1, tmpF)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
delete(createData, "pre_hash")
|
||||
createData["proof_version"] = "v1"
|
||||
createData["content_hash_name"] = "sha1"
|
||||
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
|
||||
// seek to start
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
|
||||
createData["content_hash"] = hash
|
||||
createData["proof_code"], err = d.calProofCode(stream)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
|
||||
}
|
||||
_, err = d.request("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(createData).SetResult(&createResp)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if createResp.RapidUpload {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
return nil
|
||||
}
|
||||
// failed to rapid upload, try normal upload
|
||||
if _, err = file.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !createResp.RapidUpload {
|
||||
// 2. normal upload
|
||||
log.Debugf("[aliyundive_open] normal upload")
|
||||
|
||||
preTime := time.Now()
|
||||
var offset, length int64 = 0, partSize
|
||||
//var length
|
||||
for i := 0; i < len(createResp.PartInfoList); i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
// refresh upload url if 50 minutes passed
|
||||
if time.Since(preTime) > 50*time.Minute {
|
||||
createResp.PartInfoList, err = d.getUploadUrl(count, createResp.FileId, createResp.UploadId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preTime = time.Now()
|
||||
}
|
||||
if remain := stream.GetSize() - offset; length > remain {
|
||||
length = remain
|
||||
}
|
||||
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = retry.Do(func() error {
|
||||
//rd.Reset()
|
||||
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
|
||||
},
|
||||
retry.Attempts(3),
|
||||
retry.DelayType(retry.BackOffDelay),
|
||||
retry.Delay(time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offset += partSize
|
||||
}
|
||||
} else {
|
||||
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)
|
||||
}
|
||||
|
||||
log.Debugf("[aliyundrive_open] create file success, resp: %+v", createResp)
|
||||
return d.normalUpload(ctx, stream, up, createResp, count, partSize)
|
||||
// 3. complete
|
||||
return d.completeUpload(createResp.FileId, createResp.UploadId)
|
||||
}
|
||||
|
@ -2,9 +2,12 @@ package aliyundrive_open
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
@ -15,7 +18,7 @@ import (
|
||||
|
||||
// do others that not defined in Driver interface
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
func (d *AliyundriveOpen) _refreshToken() (string, string, error) {
|
||||
url := d.base + "/oauth/access_token"
|
||||
if d.OauthTokenURL != "" && d.ClientID == "" {
|
||||
url = d.OauthTokenURL
|
||||
@ -23,7 +26,7 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
//var resp base.TokenResp
|
||||
var e ErrResp
|
||||
res, err := base.RestyClient.R().
|
||||
ForceContentType("application/json").
|
||||
//ForceContentType("application/json").
|
||||
SetBody(base.Json{
|
||||
"client_id": d.ClientID,
|
||||
"client_secret": d.ClientSecret,
|
||||
@ -34,16 +37,56 @@ func (d *AliyundriveOpen) refreshToken() error {
|
||||
SetError(&e).
|
||||
Post(url)
|
||||
if err != nil {
|
||||
return err
|
||||
return "", "", err
|
||||
}
|
||||
log.Debugf("[ali_open] refresh token response: %s", res.String())
|
||||
if e.Code != "" {
|
||||
return fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
return "", "", fmt.Errorf("failed to refresh token: %s", e.Message)
|
||||
}
|
||||
refresh, access := utils.Json.Get(res.Body(), "refresh_token").ToString(), utils.Json.Get(res.Body(), "access_token").ToString()
|
||||
if refresh == "" {
|
||||
return errors.New("failed to refresh token: refresh token is empty")
|
||||
return "", "", fmt.Errorf("failed to refresh token: refresh token is empty, resp: %s", res.String())
|
||||
}
|
||||
curSub, err := getSub(d.RefreshToken)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
newSub, err := getSub(refresh)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if curSub != newSub {
|
||||
return "", "", errors.New("failed to refresh token: sub not match")
|
||||
}
|
||||
return refresh, access, nil
|
||||
}
|
||||
|
||||
func getSub(token string) (string, error) {
|
||||
segments := strings.Split(token, ".")
|
||||
if len(segments) != 3 {
|
||||
return "", errors.New("not a jwt token because of invalid segments")
|
||||
}
|
||||
bs, err := base64.RawStdEncoding.DecodeString(segments[1])
|
||||
if err != nil {
|
||||
return "", errors.New("failed to decode jwt token")
|
||||
}
|
||||
return utils.Json.Get(bs, "sub").ToString(), nil
|
||||
}
|
||||
|
||||
func (d *AliyundriveOpen) refreshToken() error {
|
||||
refresh, access, err := d._refreshToken()
|
||||
for i := 0; i < 3; i++ {
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
log.Errorf("[ali_open] failed to refresh token: %s", err)
|
||||
}
|
||||
refresh, access, err = d._refreshToken()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("[ali_open] toekn exchange: %s -> %s", d.RefreshToken, refresh)
|
||||
d.RefreshToken, d.AccessToken = refresh, access
|
||||
op.MustSaveDriverStorage(d)
|
||||
return nil
|
||||
@ -127,3 +170,9 @@ func (d *AliyundriveOpen) getFiles(ctx context.Context, fileId string) ([]File,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getNowTime() (time.Time, string) {
|
||||
nowTime := time.Now()
|
||||
nowTimeStr := nowTime.Format("2006-01-02T15:04:05.000Z")
|
||||
return nowTime, nowTimeStr
|
||||
}
|
||||
|
@ -44,6 +44,7 @@ func fileToObj(f File) *model.ObjThumb {
|
||||
Name: f.Name,
|
||||
Size: f.Size,
|
||||
Modified: f.UpdatedAt,
|
||||
Ctime: f.CreatedAt,
|
||||
IsFolder: f.Type == "folder",
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
|
||||
|
@ -4,29 +4,33 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type BaiduNetdisk struct {
|
||||
model.Storage
|
||||
Addition
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
const BaiduFileAPI = "https://d.pcs.baidu.com/rest/2.0/pcs/superfile2"
|
||||
const DefaultSliceSize int64 = 4 * 1024 * 1024
|
||||
const DefaultSliceSize int64 = 4 * utils.MB
|
||||
|
||||
func (d *BaiduNetdisk) Config() driver.Config {
|
||||
return config
|
||||
@ -37,6 +41,15 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if _, err := url.Parse(d.UploadAPI); d.UploadAPI == "" || err != nil {
|
||||
d.UploadAPI = "https://d.pcs.baidu.com"
|
||||
}
|
||||
|
||||
res, err := d.get("/xpan/nas", map[string]string{
|
||||
"method": "uinfo",
|
||||
}, nil)
|
||||
@ -65,12 +78,16 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
|
||||
return d.linkOfficial(file, args)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "")
|
||||
return err
|
||||
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||
var newDir File
|
||||
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(newDir), nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -79,10 +96,18 @@ func (d *BaiduNetdisk) Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
||||
},
|
||||
}
|
||||
_, err := d.manage("move", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(dstDir.GetPath(), srcObj.GetName()))
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||
data := []base.Json{
|
||||
{
|
||||
"path": srcObj.GetPath(),
|
||||
@ -90,7 +115,17 @@ func (d *BaiduNetdisk) Rename(ctx context.Context, srcObj model.Obj, newName str
|
||||
},
|
||||
}
|
||||
_, err := d.manage("rename", data)
|
||||
return err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if srcObj, ok := srcObj.(*model.ObjThumb); ok {
|
||||
srcObj.SetPath(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName))
|
||||
srcObj.Name = newName
|
||||
srcObj.Modified = time.Now()
|
||||
return srcObj, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -111,118 +146,130 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
streamSize := stream.GetSize()
|
||||
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1))
|
||||
lastBlockSize := streamSize % DefaultSliceSize
|
||||
if streamSize > 0 && lastBlockSize == 0 {
|
||||
lastBlockSize = DefaultSliceSize
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
count := int(math.Ceil(float64(streamSize) / float64(DefaultSliceSize)))
|
||||
//cal md5 for first 256k data
|
||||
const SliceSize int64 = 256 * 1024
|
||||
// cal md5
|
||||
h1 := md5.New()
|
||||
h2 := md5.New()
|
||||
blockList := make([]string, 0)
|
||||
contentMd5 := ""
|
||||
sliceMd5 := ""
|
||||
left := streamSize
|
||||
for i := 0; i < count; i++ {
|
||||
byteSize := DefaultSliceSize
|
||||
if left < DefaultSliceSize {
|
||||
byteSize = left
|
||||
blockList := make([]string, 0, count)
|
||||
byteSize := DefaultSliceSize
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
left -= byteSize
|
||||
_, err = io.Copy(io.MultiWriter(h1, h2), io.LimitReader(tempFile, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
blockList = append(blockList, fmt.Sprintf("\"%s\"", hex.EncodeToString(h2.Sum(nil))))
|
||||
h2.Reset()
|
||||
}
|
||||
contentMd5 = hex.EncodeToString(h1.Sum(nil))
|
||||
_, err = tempFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if streamSize <= SliceSize {
|
||||
sliceMd5 = contentMd5
|
||||
} else {
|
||||
sliceData := make([]byte, SliceSize)
|
||||
_, err = io.ReadFull(tempFile, sliceData)
|
||||
if err != nil {
|
||||
return err
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
h2.Write(sliceData)
|
||||
sliceMd5 = hex.EncodeToString(h2.Sum(nil))
|
||||
blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(blockList)
|
||||
|
||||
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
||||
path := encodeURIComponent(rawPath)
|
||||
block_list_str := fmt.Sprintf("[%s]", strings.Join(blockList, ","))
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&block_list=%s&content-md5=%s&slice-md5=%s",
|
||||
path, streamSize,
|
||||
block_list_str,
|
||||
contentMd5, sliceMd5)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
return nil
|
||||
}
|
||||
params = map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
}
|
||||
mtime := stream.ModTime().Unix()
|
||||
ctime := stream.CreateTime().Unix()
|
||||
|
||||
var offset int64 = 0
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
params["partseq"] = strconv.Itoa(partseq)
|
||||
byteSize := int64(math.Min(float64(streamSize-offset), float64(DefaultSliceSize)))
|
||||
err := retry.Do(func() error {
|
||||
return d.uploadSlice(ctx, ¶ms, stream.GetName(), tempFile, offset, byteSize)
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3))
|
||||
// step.1 预上传
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s&local_mtime=%d&local_ctime=%d",
|
||||
path, streamSize, blockListStr, contentMd5, sliceMd5, mtime, ctime)
|
||||
params := map[string]string{
|
||||
"method": "precreate",
|
||||
}
|
||||
log.Debugf("[baidu_netdisk] precreate data: %s", data)
|
||||
_, err = d.post("/xpan/file", params, data, &precreateResp)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
offset += byteSize
|
||||
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(i * 100 / len(precreateResp.BlockList))
|
||||
log.Debugf("%+v", precreateResp)
|
||||
if precreateResp.ReturnType == 2 {
|
||||
//rapid upload, since got md5 match from baidu server
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fileToObj(precreateResp.File), nil
|
||||
}
|
||||
}
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, block_list_str)
|
||||
return err
|
||||
}
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params *map[string]string, fileName string, file *os.File, offset int64, byteSize int64) error {
|
||||
_, err := file.Seek(offset, io.SeekStart)
|
||||
// step.2 上传分片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DefaultSliceSize, DefaultSliceSize
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
params := map[string]string{
|
||||
"method": "upload",
|
||||
"access_token": d.AccessToken,
|
||||
"type": "tmpfile",
|
||||
"path": path,
|
||||
"uploadid": precreateResp.Uploadid,
|
||||
"partseq": strconv.Itoa(partseq),
|
||||
}
|
||||
err := d.uploadSlice(ctx, params, stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
// 如果属于用户主动取消,则保存上传进度
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// step.3 创建文件
|
||||
var newFile File
|
||||
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fileToObj(newFile), nil
|
||||
}
|
||||
func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string, fileName string, file io.Reader) error {
|
||||
res, err := base.RestyClient.R().
|
||||
SetContext(ctx).
|
||||
SetQueryParams(*params).
|
||||
SetFileReader("file", fileName, io.LimitReader(file, byteSize)).
|
||||
Post(BaiduFileAPI)
|
||||
SetQueryParams(params).
|
||||
SetFileReader("file", fileName, file).
|
||||
Post(d.UploadAPI + "/rest/2.0/pcs/superfile2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -15,6 +15,8 @@ type Addition struct {
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||
AccessToken string
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,6 +1,8 @@
|
||||
package baidu_netdisk
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -17,10 +19,8 @@ type File struct {
|
||||
//OwnerType int `json:"owner_type"`
|
||||
//Category int `json:"category"`
|
||||
//RealCategory string `json:"real_category"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
FsId int64 `json:"fs_id"`
|
||||
//OperId int `json:"oper_id"`
|
||||
//ServerCtime int `json:"server_ctime"`
|
||||
Thumbs struct {
|
||||
//Icon string `json:"icon"`
|
||||
Url3 string `json:"url3"`
|
||||
@ -28,29 +28,50 @@ type File struct {
|
||||
//Url1 string `json:"url1"`
|
||||
} `json:"thumbs"`
|
||||
//Wpfile int `json:"wpfile"`
|
||||
//LocalMtime int `json:"local_mtime"`
|
||||
|
||||
Size int64 `json:"size"`
|
||||
//ExtentTinyint7 int `json:"extent_tinyint7"`
|
||||
Path string `json:"path"`
|
||||
//Share int `json:"share"`
|
||||
//ServerAtime int `json:"server_atime"`
|
||||
//Pl int `json:"pl"`
|
||||
//LocalCtime int `json:"local_ctime"`
|
||||
ServerFilename string `json:"server_filename"`
|
||||
//Md5 string `json:"md5"`
|
||||
Md5 string `json:"md5"`
|
||||
//OwnerId int `json:"owner_id"`
|
||||
//Unlist int `json:"unlist"`
|
||||
Isdir int `json:"isdir"`
|
||||
|
||||
// list resp
|
||||
ServerCtime int64 `json:"server_ctime"`
|
||||
ServerMtime int64 `json:"server_mtime"`
|
||||
LocalMtime int64 `json:"local_mtime"`
|
||||
LocalCtime int64 `json:"local_ctime"`
|
||||
//ServerAtime int64 `json:"server_atime"` `
|
||||
|
||||
// only create and precreate resp
|
||||
Ctime int64 `json:"ctime"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
}
|
||||
|
||||
func fileToObj(f File) *model.ObjThumb {
|
||||
if f.ServerFilename == "" {
|
||||
f.ServerFilename = path.Base(f.Path)
|
||||
}
|
||||
if f.LocalCtime == 0 {
|
||||
f.LocalCtime = f.Ctime
|
||||
}
|
||||
if f.LocalMtime == 0 {
|
||||
f.LocalMtime = f.Mtime
|
||||
}
|
||||
return &model.ObjThumb{
|
||||
Object: model.Object{
|
||||
ID: strconv.FormatInt(f.FsId, 10),
|
||||
Path: f.Path,
|
||||
Name: f.ServerFilename,
|
||||
Size: f.Size,
|
||||
Modified: time.Unix(f.ServerMtime, 0),
|
||||
Modified: time.Unix(f.LocalMtime, 0),
|
||||
Ctime: time.Unix(f.LocalCtime, 0),
|
||||
IsFolder: f.Isdir == 1,
|
||||
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
|
||||
},
|
||||
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
|
||||
}
|
||||
@ -154,10 +175,15 @@ type DownloadResp2 struct {
|
||||
}
|
||||
|
||||
type PrecreateResp struct {
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
ReturnType int `json:"return_type"`
|
||||
BlockList []int `json:"block_list"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
Errno int `json:"errno"`
|
||||
RequestId int64 `json:"request_id"`
|
||||
ReturnType int `json:"return_type"`
|
||||
|
||||
// return_type=1
|
||||
Path string `json:"path"`
|
||||
Uploadid string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
|
||||
// return_type=2
|
||||
File File `json:"info"`
|
||||
}
|
||||
|
@ -2,17 +2,18 @@ package baidu_netdisk
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/avast/retry-go"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -76,12 +77,20 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
||||
return err2
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||
|
||||
err2 := fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||
if !utils.SliceContains([]int{2}, errno) {
|
||||
err2 = retry.Unrecoverable(err2)
|
||||
}
|
||||
return err2
|
||||
}
|
||||
result = res.Body()
|
||||
return nil
|
||||
},
|
||||
retry.Attempts(3))
|
||||
retry.LastErrorOnly(true),
|
||||
retry.Attempts(5),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
return result, err
|
||||
}
|
||||
|
||||
@ -179,28 +188,31 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, args model.LinkArgs) (*model.Li
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist interface{}) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "filemanager",
|
||||
"opera": opera,
|
||||
}
|
||||
marshal, err := utils.Json.Marshal(filelist)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=newcopy", string(marshal))
|
||||
marshal, _ := utils.Json.MarshalToString(filelist)
|
||||
data := fmt.Sprintf("async=0&filelist=%s&ondup=fail", marshal)
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
}
|
||||
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string) ([]byte, error) {
|
||||
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
|
||||
params := map[string]string{
|
||||
"method": "create",
|
||||
}
|
||||
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
data := ""
|
||||
if mtime == 0 || ctime == 0 {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
|
||||
} else {
|
||||
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3&local_mtime=%d&local_ctime=%d", encodeURIComponent(path), size, isdir, mtime, ctime)
|
||||
}
|
||||
|
||||
if uploadid != "" {
|
||||
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
|
||||
}
|
||||
return d.post("/xpan/file", params, data, nil)
|
||||
return d.post("/xpan/file", params, data, resp)
|
||||
}
|
||||
|
||||
func encodeURIComponent(str string) string {
|
||||
|
@ -4,18 +4,22 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/go-resty/resty/v2"
|
||||
)
|
||||
|
||||
@ -26,6 +30,8 @@ type BaiduPhoto struct {
|
||||
AccessToken string
|
||||
Uk int64
|
||||
root model.Obj
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Config() driver.Config {
|
||||
@ -37,6 +43,11 @@ func (d *BaiduPhoto) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
|
||||
if err := d.refreshToken(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,45 +222,57 @@ func (d *BaiduPhoto) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
// 不支持大小为0的文件
|
||||
if stream.GetSize() == 0 {
|
||||
return nil, fmt.Errorf("file size cannot be zero")
|
||||
}
|
||||
|
||||
// 需要获取完整文件md5,必须支持 io.Seek
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
// 计算需要的数据
|
||||
const DEFAULT = 1 << 22
|
||||
const SliceSize = 1 << 18
|
||||
count := int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
|
||||
const DEFAULT int64 = 1 << 22
|
||||
const SliceSize int64 = 1 << 18
|
||||
|
||||
// 计算需要的数据
|
||||
streamSize := stream.GetSize()
|
||||
count := int(math.Ceil(float64(streamSize) / float64(DEFAULT)))
|
||||
lastBlockSize := streamSize % DEFAULT
|
||||
if lastBlockSize == 0 {
|
||||
lastBlockSize = DEFAULT
|
||||
}
|
||||
|
||||
// step.1 计算MD5
|
||||
sliceMD5List := make([]string, 0, count)
|
||||
fileMd5 := md5.New()
|
||||
sliceMd5 := md5.New()
|
||||
sliceMd52 := md5.New()
|
||||
slicemd52Write := utils.LimitWriter(sliceMd52, SliceSize)
|
||||
byteSize := int64(DEFAULT)
|
||||
fileMd5H := md5.New()
|
||||
sliceMd5H := md5.New()
|
||||
sliceMd5H2 := md5.New()
|
||||
slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
|
||||
for i := 1; i <= count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5, sliceMd5, slicemd52Write), tempFile, DEFAULT)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
if i == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
_, err := io.CopyN(io.MultiWriter(fileMd5H, sliceMd5H, slicemd5H2Write), tempFile, byteSize)
|
||||
if err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5.Sum(nil)))
|
||||
sliceMd5.Reset()
|
||||
sliceMD5List = append(sliceMD5List, hex.EncodeToString(sliceMd5H.Sum(nil)))
|
||||
sliceMd5H.Reset()
|
||||
}
|
||||
if _, err = tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
content_md5 := hex.EncodeToString(fileMd5.Sum(nil))
|
||||
slice_md5 := hex.EncodeToString(sliceMd52.Sum(nil))
|
||||
contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
|
||||
sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
|
||||
blockListStr, _ := utils.Json.MarshalToString(sliceMD5List)
|
||||
|
||||
// 开始执行上传
|
||||
// step.2 预上传
|
||||
params := map[string]string{
|
||||
"autoinit": "1",
|
||||
"isdir": "0",
|
||||
@ -257,46 +280,69 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
"ctype": "11",
|
||||
"path": fmt.Sprintf("/%s", stream.GetName()),
|
||||
"size": fmt.Sprint(stream.GetSize()),
|
||||
"slice-md5": slice_md5,
|
||||
"content-md5": content_md5,
|
||||
"block_list": MustString(utils.Json.MarshalToString(sliceMD5List)),
|
||||
"slice-md5": sliceMd5,
|
||||
"content-md5": contentMd5,
|
||||
"block_list": blockListStr,
|
||||
}
|
||||
|
||||
// 预上传
|
||||
var precreateResp PrecreateResp
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// 尝试获取之前的进度
|
||||
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
|
||||
if !ok {
|
||||
_, err = d.Post(FILE_API_URL_V1+"/precreate", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetFormData(params)
|
||||
}, &precreateResp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch precreateResp.ReturnType {
|
||||
case 1: // 上传文件
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
case 1: //step.3 上传文件切片
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
for i, partseq := range precreateResp.BlockList {
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||
if partseq+1 == count {
|
||||
byteSize = lastBlockSize
|
||||
}
|
||||
uploadParams["partseq"] = fmt.Sprint(i)
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.LimitReader(tempFile, DEFAULT))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
uploadParams := map[string]string{
|
||||
"method": "upload",
|
||||
"path": params["path"],
|
||||
"partseq": fmt.Sprint(partseq),
|
||||
"uploadid": precreateResp.UploadID,
|
||||
}
|
||||
|
||||
_, err = d.Post("https://c3.pcs.baidu.com/rest/2.0/pcs/superfile2", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
r.SetQueryParams(uploadParams)
|
||||
r.SetFileReader("file", stream.GetName(), io.NewSectionReader(tempFile, offset, byteSize))
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
precreateResp.BlockList = utils.SliceFilter(precreateResp.BlockList, func(s int) bool { return s >= 0 })
|
||||
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
|
||||
}
|
||||
up(i * 100 / count)
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 2: // 创建文件
|
||||
case 2: //step.4 创建文件
|
||||
params["uploadid"] = precreateResp.UploadID
|
||||
_, err = d.Post(FILE_API_URL_V1+"/create", func(r *resty.Request) {
|
||||
r.SetContext(ctx)
|
||||
@ -306,7 +352,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return nil, err
|
||||
}
|
||||
fallthrough
|
||||
case 3: // 增加到相册
|
||||
case 3: //step.5 增加到相册
|
||||
rootfile := precreateResp.Data.toFile()
|
||||
if album, ok := dstDir.(*Album); ok {
|
||||
return d.AddAlbumFile(ctx, album, rootfile)
|
||||
|
@ -61,12 +61,12 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
|
||||
|
||||
func renameAlbum(album *Album, newName string) *Album {
|
||||
return &Album{
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreateTime: album.CreateTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
AlbumID: album.AlbumID,
|
||||
Tid: album.Tid,
|
||||
JoinTime: album.JoinTime,
|
||||
CreationTime: album.CreationTime,
|
||||
Title: newName,
|
||||
Mtime: time.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
||||
DeleteOrigin bool `json:"delete_origin"`
|
||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -2,6 +2,7 @@ package baiduphoto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -73,6 +74,13 @@ func (c *File) Thumb() string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (c *File) CreateTime() time.Time {
|
||||
return time.Unix(c.Ctime, 0)
|
||||
}
|
||||
|
||||
func (c *File) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
/*相册部分*/
|
||||
type (
|
||||
@ -84,12 +92,12 @@ type (
|
||||
}
|
||||
|
||||
Album struct {
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
AlbumID string `json:"album_id"`
|
||||
Tid int64 `json:"tid"`
|
||||
Title string `json:"title"`
|
||||
JoinTime int64 `json:"join_time"`
|
||||
CreationTime int64 `json:"create_time"`
|
||||
Mtime int64 `json:"mtime"`
|
||||
|
||||
parseTime *time.Time
|
||||
}
|
||||
@ -109,6 +117,14 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func (a *Album) CreateTime() time.Time {
|
||||
return time.Unix(a.CreationTime, 0)
|
||||
}
|
||||
|
||||
func (a *Album) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (a *Album) GetSize() int64 { return 0 }
|
||||
func (a *Album) GetName() string { return a.Title }
|
||||
func (a *Album) ModTime() time.Time {
|
||||
@ -160,9 +176,9 @@ type (
|
||||
CreateFileResp
|
||||
|
||||
//不存在返回
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
Blocklist []int64 `json:"block_list"`
|
||||
Path string `json:"path"`
|
||||
UploadID string `json:"uploadid"`
|
||||
BlockList []int `json:"block_list"`
|
||||
}
|
||||
)
|
||||
|
||||
|
31
drivers/base/upload.go
Normal file
31
drivers/base/upload.go
Normal file
@ -0,0 +1,31 @@
|
||||
package base
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/go-cache"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
)
|
||||
|
||||
// storage upload progress, for upload recovery
|
||||
var UploadStateCache = cache.NewMemCache(cache.WithShards[any](32))
|
||||
|
||||
// Save upload progress for 20 minutes
|
||||
func SaveUploadProgress(driver driver.Driver, state any, keys ...string) bool {
|
||||
return UploadStateCache.Set(
|
||||
fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")),
|
||||
state,
|
||||
cache.WithEx[any](time.Minute*20))
|
||||
}
|
||||
|
||||
// An upload progress can only be made by one process alone,
|
||||
// so here you need to get it and then delete it.
|
||||
func GetUploadProgress[T any](driver driver.Driver, keys ...string) (state T, ok bool) {
|
||||
v, ok := UploadStateCache.GetDel(fmt.Sprint(driver.Config().Name, "-upload-", strings.Join(keys, "-")))
|
||||
if ok {
|
||||
state, ok = v.(T)
|
||||
}
|
||||
return
|
||||
}
|
@ -115,7 +115,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if stream.GetReadCloser() == http.NoBody {
|
||||
if io.ReadCloser(stream) == http.NoBody {
|
||||
return d.create(ctx, dstDir, stream)
|
||||
}
|
||||
var r DirectoryResp
|
||||
|
@ -13,6 +13,7 @@ type Addition struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Cookie string `json:"cookie"`
|
||||
CustomUA string `json:"custom_ua"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -22,15 +22,18 @@ const loginPath = "/user/session"
|
||||
|
||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||
u := d.Address + "/api/v3" + path
|
||||
ua := d.CustomUA
|
||||
if ua == "" {
|
||||
ua = base.UserAgent
|
||||
}
|
||||
req := base.RestyClient.R()
|
||||
req.SetHeaders(map[string]string{
|
||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
|
||||
"User-Agent": ua,
|
||||
})
|
||||
|
||||
var r Resp
|
||||
|
||||
req.SetResult(&r)
|
||||
|
||||
if callback != nil {
|
||||
|
@ -3,8 +3,8 @@ package crypt
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@ -13,7 +13,6 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/fs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
@ -55,6 +54,8 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
if !isCryptExt(d.EncryptedSuffix) {
|
||||
return fmt.Errorf("EncryptedSuffix is Illegal")
|
||||
}
|
||||
d.FileNameEncoding = utils.GetNoneEmpty(d.FileNameEncoding, "base64")
|
||||
d.EncryptedSuffix = utils.GetNoneEmpty(d.EncryptedSuffix, ".bin")
|
||||
|
||||
op.MustSaveDriverStorage(d)
|
||||
|
||||
@ -72,7 +73,7 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
"password2": p2,
|
||||
"filename_encryption": d.FileNameEnc,
|
||||
"directory_name_encryption": d.DirNameEnc,
|
||||
"filename_encoding": "base64",
|
||||
"filename_encoding": d.FileNameEncoding,
|
||||
"suffix": d.EncryptedSuffix,
|
||||
"pass_bad_blocks": "",
|
||||
}
|
||||
@ -82,7 +83,6 @@ func (d *Crypt) Init(ctx context.Context) error {
|
||||
}
|
||||
d.cipher = c
|
||||
|
||||
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -128,6 +128,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
Size: 0,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
result = append(result, &objRes)
|
||||
} else {
|
||||
@ -147,6 +149,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
Size: size,
|
||||
Modified: obj.ModTime(),
|
||||
IsFolder: obj.IsDir(),
|
||||
Ctime: obj.CreateTime(),
|
||||
// discarding hash as it's encrypted
|
||||
}
|
||||
if !ok {
|
||||
result = append(result, &objRes)
|
||||
@ -232,70 +236,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 {
|
||||
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
|
||||
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
|
||||
}
|
||||
remoteFileSize := remoteFile.GetSize()
|
||||
remoteClosers := utils.NewClosers()
|
||||
remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
|
||||
length := underlyingLength
|
||||
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
|
||||
length = -1
|
||||
}
|
||||
if remoteLink.RangeReadCloser.RangeReader != nil {
|
||||
rrc := remoteLink.RangeReadCloser
|
||||
if len(remoteLink.URL) > 0 {
|
||||
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rrc = converted
|
||||
}
|
||||
if rrc != nil {
|
||||
//remoteRangeReader, err :=
|
||||
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.Add(remoteLink.RangeReadCloser.Closers)
|
||||
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
|
||||
remoteClosers.AddClosers(rrc.GetClosers())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remoteReader, nil
|
||||
}
|
||||
if remoteLink.ReadSeekCloser != nil {
|
||||
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart)
|
||||
if remoteLink.MFile != nil {
|
||||
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//remoteClosers.Add(remoteLink.ReadSeekCloser)
|
||||
//keep reuse same ReadSeekCloser and close at last.
|
||||
return io.NopCloser(remoteLink.ReadSeekCloser), nil
|
||||
//remoteClosers.Add(remoteLink.MFile)
|
||||
//keep reuse same MFile and close at last.
|
||||
remoteClosers.Add(remoteLink.MFile)
|
||||
return io.NopCloser(remoteLink.MFile), nil
|
||||
}
|
||||
if len(remoteLink.URL) > 0 {
|
||||
rangedRemoteLink := &model.Link{
|
||||
URL: remoteLink.URL,
|
||||
Header: remoteLink.Header,
|
||||
}
|
||||
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
|
||||
//remoteClosers.Add(response.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
|
||||
}
|
||||
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readCloser, nil
|
||||
}
|
||||
|
||||
return response.Body, nil
|
||||
}
|
||||
//if remoteLink.Data != nil {
|
||||
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
|
||||
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
|
||||
// remoteCloser = remoteLink.Data
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return readCloser, nil
|
||||
//}
|
||||
return nil, errs.NotSupport
|
||||
|
||||
}
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -306,7 +293,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
|
||||
resultLink := &model.Link{
|
||||
Header: remoteLink.Header,
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
Expiration: remoteLink.Expiration,
|
||||
}
|
||||
|
||||
@ -370,32 +357,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
|
||||
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
|
||||
}
|
||||
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
|
||||
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert path to remote path: %w", err)
|
||||
}
|
||||
|
||||
in := stream.GetReadCloser()
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := d.cipher.EncryptData(in)
|
||||
wrappedIn, err := d.cipher.EncryptData(streamer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to EncryptData: %w", err)
|
||||
}
|
||||
|
||||
streamOut := &model.FileStream{
|
||||
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
|
||||
streamOut := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
ID: stream.GetID(),
|
||||
Path: stream.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(stream.GetName()),
|
||||
Size: d.cipher.EncryptedSize(stream.GetSize()),
|
||||
Modified: stream.ModTime(),
|
||||
IsFolder: stream.IsDir(),
|
||||
ID: streamer.GetID(),
|
||||
Path: streamer.GetPath(),
|
||||
Name: d.cipher.EncryptFileName(streamer.GetName()),
|
||||
Size: d.cipher.EncryptedSize(streamer.GetSize()),
|
||||
Modified: streamer.ModTime(),
|
||||
IsFolder: streamer.IsDir(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(wrappedIn),
|
||||
Reader: wrappedIn,
|
||||
Mimetype: "application/octet-stream",
|
||||
WebPutAsTask: stream.NeedStore(),
|
||||
Old: stream.GetOld(),
|
||||
WebPutAsTask: streamer.NeedStore(),
|
||||
Exist: streamer.GetExist(),
|
||||
}
|
||||
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
|
||||
if err != nil {
|
||||
|
@ -15,17 +15,12 @@ type Addition struct {
|
||||
DirNameEnc string `json:"directory_name_encryption" type:"select" required:"true" options:"false,true" default:"false"`
|
||||
RemotePath string `json:"remote_path" required:"true" help:"This is where the encrypted data stores"`
|
||||
|
||||
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password'. Optional but recommended"`
|
||||
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"encrypted files will have this suffix"`
|
||||
Password string `json:"password" required:"true" confidential:"true" help:"the main password"`
|
||||
Salt string `json:"salt" confidential:"true" help:"If you don't know what is salt, treat it as a second password. Optional but recommended"`
|
||||
EncryptedSuffix string `json:"encrypted_suffix" required:"true" default:".bin" help:"for advanced user only! encrypted files will have this suffix"`
|
||||
FileNameEncoding string `json:"filename_encoding" type:"select" required:"true" options:"base64,base32,base32768" default:"base64" help:"for advanced user only!"`
|
||||
}
|
||||
|
||||
/*// inMemory contains decrypted confidential info and other temp data. will not persist these info anywhere
|
||||
type inMemory struct {
|
||||
password string
|
||||
salt string
|
||||
}*/
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "Crypt",
|
||||
LocalSort: true,
|
||||
|
@ -1,24 +1,13 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
)
|
||||
|
||||
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||
header := net.ProcessHeader(&http.Header{}, &link.Header)
|
||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||
|
||||
return net.RequestHttp("GET", header, link.URL)
|
||||
}
|
||||
|
||||
// will give the best guessing based on the path
|
||||
func guessPath(path string) (isFolder, secondTry bool) {
|
||||
if strings.HasSuffix(path, "/") {
|
||||
|
@ -64,9 +64,9 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := NewFTPFileReader(d.conn, file.GetPath())
|
||||
r := NewFileReader(d.conn, file.GetPath(), file.GetSize())
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: r,
|
||||
MFile: r,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
@ -30,43 +31,59 @@ func (d *FTP) login() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// An FTP file reader that implements io.ReadSeekCloser for seeking.
|
||||
type FTPFileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
// FileReader An FTP file reader that implements io.MFile for seeking.
|
||||
type FileReader struct {
|
||||
conn *ftp.ServerConn
|
||||
resp *ftp.Response
|
||||
offset atomic.Int64
|
||||
readAtOffset int64
|
||||
mu sync.Mutex
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
|
||||
return &FTPFileReader{
|
||||
func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader {
|
||||
return &FileReader{
|
||||
conn: conn,
|
||||
path: path,
|
||||
size: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
|
||||
func (r *FileReader) Read(buf []byte) (n int, err error) {
|
||||
n, err = r.ReadAt(buf, r.offset.Load())
|
||||
r.offset.Add(int64(n))
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if off != r.readAtOffset {
|
||||
//have to restart the connection, to correct offset
|
||||
_ = r.resp.Close()
|
||||
r.resp = nil
|
||||
}
|
||||
|
||||
if r.resp == nil {
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
|
||||
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
|
||||
r.readAtOffset = off
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
n, err = r.resp.Read(buf)
|
||||
r.offset += int64(n)
|
||||
r.readAtOffset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
oldOffset := r.offset
|
||||
func (r *FileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
oldOffset := r.offset.Load()
|
||||
var newOffset int64
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
@ -74,11 +91,7 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
case io.SeekCurrent:
|
||||
newOffset = oldOffset + offset
|
||||
case io.SeekEnd:
|
||||
size, err := r.conn.FileSize(r.path)
|
||||
if err != nil {
|
||||
return oldOffset, err
|
||||
}
|
||||
newOffset = offset + int64(size)
|
||||
return r.size, nil
|
||||
default:
|
||||
return -1, os.ErrInvalid
|
||||
}
|
||||
@ -91,17 +104,11 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
|
||||
// offset not changed, so return directly
|
||||
return oldOffset, nil
|
||||
}
|
||||
r.offset = newOffset
|
||||
|
||||
if r.resp != nil {
|
||||
// close the existing ftp data connection, otherwise the next read will be blocked
|
||||
_ = r.resp.Close() // we do not care about whether it returns an error
|
||||
r.resp = nil
|
||||
}
|
||||
r.offset.Store(newOffset)
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
func (r *FTPFileReader) Close() error {
|
||||
func (r *FileReader) Close() error {
|
||||
if r.resp != nil {
|
||||
return r.resp.Close()
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
obj := stream.GetOld()
|
||||
obj := stream.GetExist()
|
||||
var (
|
||||
e Error
|
||||
url string
|
||||
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
putUrl := res.Header().Get("location")
|
||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
|
||||
}, nil)
|
||||
} else {
|
||||
err = d.chunkUpload(ctx, stream, putUrl)
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
||||
|
||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
||||
var finish int64 = 0
|
||||
for finish < stream.GetSize() {
|
||||
var offset int64 = 0
|
||||
for offset < stream.GetSize() {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return ctx.Err()
|
||||
}
|
||||
chunkSize := stream.GetSize() - finish
|
||||
chunkSize := stream.GetSize() - offset
|
||||
if chunkSize > defaultChunkSize {
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
|
||||
req.SetHeaders(map[string]string{
|
||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
|
||||
}).SetBody(reader).SetContext(ctx)
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finish += chunkSize
|
||||
offset += chunkSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
|
||||
}
|
||||
|
||||
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
|
||||
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
|
||||
req.SetBody(stream).SetContext(ctx)
|
||||
}, nil, postHeaders)
|
||||
|
||||
if err != nil {
|
||||
|
@ -118,13 +118,102 @@ var findKVReg = regexp.MustCompile(`'(.+?)':('?([^' },]*)'?)`) // 拆分kv
|
||||
|
||||
// 根据key查询js变量
|
||||
func findJSVarFunc(key, data string) string {
|
||||
values := regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
var values []string
|
||||
if key != "sasign" {
|
||||
values = regexp.MustCompile(`var ` + key + ` = '(.+?)';`).FindStringSubmatch(data)
|
||||
} else {
|
||||
matches := regexp.MustCompile(`var `+key+` = '(.+?)';`).FindAllStringSubmatch(data, -1)
|
||||
if len(matches) == 3 {
|
||||
values = matches[1]
|
||||
} else {
|
||||
if len(matches) > 0 {
|
||||
values = matches[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
return values[1]
|
||||
}
|
||||
|
||||
var findFunction = regexp.MustCompile(`(?ims)^function[^{]+`)
|
||||
var findFunctionAll = regexp.MustCompile(`(?is)function[^{]+`)
|
||||
|
||||
// 查找所有方法位置
|
||||
func findJSFunctionIndex(data string, all bool) [][2]int {
|
||||
findFunction := findFunction
|
||||
if all {
|
||||
findFunction = findFunctionAll
|
||||
}
|
||||
|
||||
indexs := findFunction.FindAllStringIndex(data, -1)
|
||||
fIndexs := make([][2]int, 0, len(indexs))
|
||||
|
||||
for _, index := range indexs {
|
||||
if len(index) != 2 {
|
||||
continue
|
||||
}
|
||||
count, data := 0, data[index[1]:]
|
||||
for ii, v := range data {
|
||||
if v == ' ' && count == 0 {
|
||||
continue
|
||||
}
|
||||
if v == '{' {
|
||||
count++
|
||||
}
|
||||
|
||||
if v == '}' {
|
||||
count--
|
||||
}
|
||||
if count == 0 {
|
||||
fIndexs = append(fIndexs, [2]int{index[0], index[1] + ii + 1})
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return fIndexs
|
||||
}
|
||||
|
||||
// 删除JS全局方法
|
||||
func removeJSGlobalFunction(html string) string {
|
||||
indexs := findJSFunctionIndex(html, false)
|
||||
block := make([]string, len(indexs))
|
||||
for i, next := len(indexs)-1, len(html); i >= 0; i-- {
|
||||
index := indexs[i]
|
||||
block[i] = html[index[1]:next]
|
||||
next = index[0]
|
||||
}
|
||||
return strings.Join(block, "")
|
||||
}
|
||||
|
||||
// 根据名称获取方法
|
||||
func getJSFunctionByName(html string, name string) (string, error) {
|
||||
indexs := findJSFunctionIndex(html, true)
|
||||
for _, index := range indexs {
|
||||
data := html[index[0]:index[1]]
|
||||
if regexp.MustCompile(`function\s+` + name + `[()\s]+{`).MatchString(data) {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("not find %s function", name)
|
||||
}
|
||||
|
||||
// 解析html中的JSON,选择最长的数据
|
||||
func htmlJsonToMap2(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindAllStringSubmatch(html, -1)
|
||||
var sData string
|
||||
for _, data := range datas {
|
||||
if len(datas) > 0 && len(data[1]) > len(sData) {
|
||||
sData = data[1]
|
||||
}
|
||||
}
|
||||
if sData == "" {
|
||||
return nil, fmt.Errorf("not find data")
|
||||
}
|
||||
return jsonToMap(sData, html), nil
|
||||
}
|
||||
|
||||
// 解析html中的JSON
|
||||
func htmlJsonToMap(html string) (map[string]string, error) {
|
||||
datas := findDataReg.FindStringSubmatch(html)
|
||||
|
@ -3,6 +3,8 @@ package lanzou
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -18,6 +20,9 @@ type RespInfo[T any] struct {
|
||||
Info T `json:"info"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*FileOrFolder)(nil)
|
||||
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
|
||||
|
||||
type FileOrFolder struct {
|
||||
Name string `json:"name"`
|
||||
//Onof string `json:"onof"` // 是否存在提取码
|
||||
@ -49,6 +54,14 @@ type FileOrFolder struct {
|
||||
shareInfo *FileShare `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolder) GetID() string {
|
||||
if f.IsDir() {
|
||||
return f.FolID
|
||||
@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct {
|
||||
repairFlag bool `json:"-"`
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
|
||||
return f.ModTime()
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
|
||||
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
|
||||
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }
|
||||
|
@ -346,7 +346,11 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
|
||||
// 需要密码
|
||||
if strings.Contains(sharePageData, "pwdload") || strings.Contains(sharePageData, "passwddiv") {
|
||||
param, err := htmlFormToMap(sharePageData)
|
||||
sharePageData, err := getJSFunctionByName(sharePageData, "down_p")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
param, err := htmlJsonToMap(sharePageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -371,7 +375,6 @@ func (d *LanZou) getFilesByShareUrl(shareID, pwd string, sharePageData string) (
|
||||
return nil, err
|
||||
}
|
||||
nextPageData := RemoveNotes(string(data))
|
||||
|
||||
param, err = htmlJsonToMap(nextPageData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -20,6 +21,8 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/sign"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/djherbis/times"
|
||||
log "github.com/sirupsen/logrus"
|
||||
_ "golang.org/x/image/webp"
|
||||
)
|
||||
|
||||
@ -101,6 +104,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
||||
if !isFolder {
|
||||
size = f.Size()
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
|
||||
file := model.ObjThumb{
|
||||
Object: model.Object{
|
||||
Path: filepath.Join(fullPath, f.Name()),
|
||||
@ -108,6 +119,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
|
||||
Modified: f.ModTime(),
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
Ctime: ctime,
|
||||
},
|
||||
Thumbnail: model.Thumbnail{
|
||||
Thumbnail: thumb,
|
||||
@ -144,10 +156,18 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) {
|
||||
if isFolder {
|
||||
size = 0
|
||||
}
|
||||
var ctime time.Time
|
||||
t, err := times.Stat(path)
|
||||
if err == nil {
|
||||
if t.HasBirthTime() {
|
||||
ctime = t.BirthTime()
|
||||
}
|
||||
}
|
||||
file := model.Object{
|
||||
Path: path,
|
||||
Name: f.Name(),
|
||||
Modified: f.ModTime(),
|
||||
Ctime: ctime,
|
||||
Size: size,
|
||||
IsFolder: isFolder,
|
||||
}
|
||||
@ -170,9 +190,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = open
|
||||
link.MFile = open
|
||||
} else {
|
||||
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes()))
|
||||
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
|
||||
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
|
||||
}
|
||||
} else {
|
||||
@ -180,7 +200,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
link.ReadSeekCloser = open
|
||||
link.MFile = open
|
||||
}
|
||||
return &link, nil
|
||||
}
|
||||
@ -264,6 +284,10 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Chtimes(fullPath, stream.ModTime(), stream.ModTime())
|
||||
if err != nil {
|
||||
log.Errorf("[local] failed to change time of %s: %s", fullPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
uploader := s3manager.NewUploader(s)
|
||||
input := &s3manager.UploadInput{
|
||||
|
@ -42,7 +42,7 @@ func (d *Mega) Drop(ctx context.Context) error {
|
||||
|
||||
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||
if node, ok := dir.(*MegaNode); ok {
|
||||
nodes, err := d.c.FS.GetChildren(node.Node)
|
||||
nodes, err := d.c.FS.GetChildren(node.n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -56,7 +56,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
||||
return res, nil
|
||||
}
|
||||
log.Errorf("can't convert: %+v", dir)
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
@ -68,21 +68,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
|
||||
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
if node, ok := file.(*MegaNode); ok {
|
||||
|
||||
//down, err := d.c.NewDownload(node.Node)
|
||||
//down, err := d.c.NewDownload(n.Node)
|
||||
//if err != nil {
|
||||
// return nil, fmt.Errorf("open download file failed: %w", err)
|
||||
//}
|
||||
|
||||
size := file.GetSize()
|
||||
var finalClosers utils.Closers
|
||||
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
length := httpRange.Length
|
||||
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
|
||||
length = -1
|
||||
}
|
||||
var down *mega.Download
|
||||
err := utils.Retry(3, time.Second, func() (err error) {
|
||||
down, err = d.c.NewDownload(node.Node)
|
||||
down, err = d.c.NewDownload(node.n)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@ -97,37 +97,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
|
||||
return readers.NewLimitedReadCloser(oo, length), nil
|
||||
}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers}
|
||||
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
|
||||
resultLink := &model.Link{
|
||||
RangeReadCloser: *resultRangeReadCloser,
|
||||
RangeReadCloser: resultRangeReadCloser,
|
||||
}
|
||||
return resultLink, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to convert dir to mega node")
|
||||
return nil, fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
if parentNode, ok := parentDir.(*MegaNode); ok {
|
||||
_, err := d.c.CreateDir(dirName, parentNode.Node)
|
||||
_, err := d.c.CreateDir(dirName, parentNode.n)
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
return d.c.Move(srcNode.Node, dstNode.Node)
|
||||
return d.c.Move(srcNode.n, dstNode.n)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||
if srcNode, ok := srcObj.(*MegaNode); ok {
|
||||
return d.c.Rename(srcNode.Node, newName)
|
||||
return d.c.Rename(srcNode.n, newName)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
@ -136,14 +136,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
||||
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
|
||||
if node, ok := obj.(*MegaNode); ok {
|
||||
return d.c.Delete(node.Node, false)
|
||||
return d.c.Delete(node.n, false)
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
if dstNode, ok := dstDir.(*MegaNode); ok {
|
||||
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
|
||||
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -175,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
_, err = u.Finish()
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("unable to convert dir to mega node")
|
||||
return fmt.Errorf("unable to convert dir to mega n")
|
||||
}
|
||||
|
||||
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||
|
@ -1,6 +1,7 @@
|
||||
package mega
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -8,29 +9,36 @@ import (
|
||||
)
|
||||
|
||||
type MegaNode struct {
|
||||
*mega.Node
|
||||
n *mega.Node
|
||||
}
|
||||
|
||||
//func (m *MegaNode) GetSize() int64 {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
//
|
||||
//func (m *MegaNode) GetName() string {
|
||||
// //TODO implement me
|
||||
// panic("implement me")
|
||||
//}
|
||||
func (m *MegaNode) GetSize() int64 {
|
||||
return m.n.GetSize()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetName() string {
|
||||
return m.n.GetName()
|
||||
}
|
||||
|
||||
func (m *MegaNode) CreateTime() time.Time {
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetHash() utils.HashInfo {
|
||||
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (m *MegaNode) ModTime() time.Time {
|
||||
return m.GetTimeStamp()
|
||||
return m.n.GetTimeStamp()
|
||||
}
|
||||
|
||||
func (m *MegaNode) IsDir() bool {
|
||||
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
|
||||
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetID() string {
|
||||
return m.GetHash()
|
||||
return m.n.GetHash()
|
||||
}
|
||||
|
||||
func (m *MegaNode) GetPath() string {
|
||||
|
@ -6,16 +6,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
"github.com/foxxorcat/mopan-sdk-go"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type MoPan struct {
|
||||
@ -23,7 +25,8 @@ type MoPan struct {
|
||||
Addition
|
||||
client *mopan.MoClient
|
||||
|
||||
userID string
|
||||
userID string
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *MoPan) Config() driver.Config {
|
||||
@ -35,6 +38,10 @@ func (d *MoPan) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *MoPan) Init(ctx context.Context) error {
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 1 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 3, "3"
|
||||
}
|
||||
login := func() error {
|
||||
data, err := d.client.Login(d.Phone, d.Password)
|
||||
if err != nil {
|
||||
@ -47,9 +54,19 @@ func (d *MoPan) Init(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
d.userID = info.UserID
|
||||
log.Debugf("[mopan] Phone: %s UserCloudStorageRelations: %+v", d.Phone, data.UserCloudStorageRelations)
|
||||
cloudCircleApp, _ := d.client.QueryAllCloudCircleApp()
|
||||
log.Debugf("[mopan] Phone: %s CloudCircleApp: %+v", d.Phone, cloudCircleApp)
|
||||
if d.RootFolderID == "" {
|
||||
for _, userCloudStorage := range data.UserCloudStorageRelations {
|
||||
if userCloudStorage.Path == "/文件" {
|
||||
d.RootFolderID = userCloudStorage.FolderID
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
d.client = mopan.NewMoClient().
|
||||
d.client = mopan.NewMoClientWithRestyClient(base.NewRestyClient()).
|
||||
SetRestyClient(base.RestyClient).
|
||||
SetOnAuthorizationExpired(func(_ error) error {
|
||||
err := login()
|
||||
@ -87,6 +104,7 @@ func (d *MoPan) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
|
||||
break
|
||||
}
|
||||
|
||||
log.Debugf("[mopan] Phone: %s folder: %+v", d.Phone, data.FileListAO.FolderList)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FolderList, folderToObj)...)
|
||||
files = append(files, utils.MustSliceConvert(data.FileListAO.FileList, fileToObj)...)
|
||||
}
|
||||
@ -212,68 +230,88 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
initUpdload, err := d.client.InitMultiUpload(ctx, mopan.UpdloadFileParam{
|
||||
// step.1
|
||||
uploadPartData, err := mopan.InitUploadPartData(ctx, mopan.UpdloadFileParam{
|
||||
ParentFolderId: dstDir.GetID(),
|
||||
FileName: stream.GetName(),
|
||||
FileSize: stream.GetSize(),
|
||||
File: file,
|
||||
}, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfo)
|
||||
// 尝试恢复进度
|
||||
initUpdload, ok := base.GetUploadProgress[*mopan.InitMultiUploadData](d, d.client.Authorization, uploadPartData.FileMd5)
|
||||
if !ok {
|
||||
// step.2
|
||||
initUpdload, err = d.client.InitMultiUpload(ctx, *uploadPartData, mopan.WarpParamOption(
|
||||
mopan.ParamOptionShareFile(d.CloudID),
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.client.CloudDiskStartBusiness()
|
||||
}
|
||||
|
||||
if !initUpdload.FileDataExists {
|
||||
fmt.Println(d.client.CloudDiskStartBusiness())
|
||||
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
// step.3
|
||||
parts, err := d.client.GetAllMultiUploadUrls(initUpdload.UploadFileID, initUpdload.PartInfos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, part := range parts {
|
||||
if utils.IsCanceled(ctx) {
|
||||
return nil, ctx.Err()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
i, part, byteSize := i, part, initUpdload.PartSize
|
||||
if part.PartNumber == uploadPartData.PartTotal {
|
||||
byteSize = initUpdload.LastPartSize
|
||||
}
|
||||
|
||||
err := retry.Do(func() error {
|
||||
if _, err := file.Seek(int64(part.PartNumber-1)*int64(initUpdload.PartSize), io.SeekStart); err != nil {
|
||||
return retry.Unrecoverable(err)
|
||||
}
|
||||
|
||||
req, err := part.NewRequest(ctx, io.LimitReader(file, int64(initUpdload.PartSize)))
|
||||
// step.4
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
req, err := part.NewRequest(ctx, io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := base.HttpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
up(100 * int(threadG.Success()) / len(parts))
|
||||
initUpdload.PartInfos[i] = ""
|
||||
return nil
|
||||
},
|
||||
retry.Context(ctx),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.MaxDelay(5*time.Second))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
})
|
||||
}
|
||||
if err = threadG.Wait(); err != nil {
|
||||
if errors.Is(err, context.Canceled) {
|
||||
initUpdload.PartInfos = utils.SliceFilter(initUpdload.PartInfos, func(s string) bool { return s != "" })
|
||||
base.SaveUploadProgress(d, initUpdload, d.client.Authorization, uploadPartData.FileMd5)
|
||||
}
|
||||
up(100 * (i + 1) / len(parts))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
//step.5
|
||||
uFile, err := d.client.CommitMultiUploadFile(initUpdload.UploadFileID, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -9,7 +9,7 @@ type Addition struct {
|
||||
Phone string `json:"phone" required:"true"`
|
||||
Password string `json:"password" required:"true"`
|
||||
|
||||
RootFolderID string `json:"root_folder_id" default:"-11" required:"true" help:"be careful when using the -11 value, some operations may cause system errors"`
|
||||
RootFolderID string `json:"root_folder_id" default:""`
|
||||
|
||||
CloudID string `json:"cloud_id"`
|
||||
|
||||
@ -17,6 +17,8 @@ type Addition struct {
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
|
||||
DeviceInfo string `json:"device_info"`
|
||||
|
||||
UploadThread string `json:"upload_thread" default:"3" help:"1<=thread<=32"`
|
||||
}
|
||||
|
||||
func (a *Addition) GetRootId() string {
|
||||
@ -24,7 +26,7 @@ func (a *Addition) GetRootId() string {
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
Name: "MoPan",
|
||||
Name: "MoPan",
|
||||
// DefaultRoot: "root, / or other",
|
||||
CheckStatus: true,
|
||||
Alert: "warning|This network disk may store your password in clear text. Please set your password carefully",
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -124,13 +123,12 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
// cal gcid
|
||||
sha1Str, err := getGcid(tempFile, stream.GetSize())
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
||||
"User-Agent": []string{ua},
|
||||
},
|
||||
Concurrency: 2,
|
||||
PartSize: 10 * 1024 * 1024,
|
||||
PartSize: 10 * utils.MB,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
m := md5.New()
|
||||
_, err = io.Copy(m, tempFile)
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
@ -96,13 +97,13 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo
|
||||
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||
return d.Put(ctx, &model.Object{
|
||||
Path: stdpath.Join(parentDir.GetPath(), dirName),
|
||||
}, &model.FileStream{
|
||||
}, &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: getPlaceholderName(d.Placeholder),
|
||||
Modified: time.Now(),
|
||||
},
|
||||
ReadCloser: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
Reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
}, func(int) {})
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
|
||||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
|
@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
|
||||
Modified: f.ModTime(),
|
||||
Size: f.Size(),
|
||||
IsFolder: f.IsDir(),
|
||||
Ctime: f.(*smb2.FileStat).CreationTime,
|
||||
},
|
||||
}
|
||||
files = append(files, &file)
|
||||
@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
return nil, err
|
||||
}
|
||||
link := &model.Link{
|
||||
ReadSeekCloser: remoteFile,
|
||||
MFile: remoteFile,
|
||||
}
|
||||
d.updateLastConnTime()
|
||||
return link, nil
|
||||
|
@ -11,7 +11,6 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -116,13 +115,12 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
var Default int64 = 4 * 1024 * 1024
|
||||
defaultByteData := make([]byte, Default)
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -333,13 +332,12 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
||||
}
|
||||
|
||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
|
||||
tempFile, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}()
|
||||
|
||||
gcid, err := getGcid(tempFile, stream.GetSize())
|
||||
|
@ -2,6 +2,8 @@ package thunder
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
@ -84,6 +86,8 @@ type Link struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
var _ model.Obj = (*Files)(nil)
|
||||
|
||||
type Files struct {
|
||||
Kind string `json:"kind"`
|
||||
ID string `json:"id"`
|
||||
@ -146,6 +150,14 @@ type Files struct {
|
||||
//Collection interface{} `json:"collection"`
|
||||
}
|
||||
|
||||
func (c *Files) CreateTime() time.Time {
|
||||
return c.CreatedTime
|
||||
}
|
||||
|
||||
func (c *Files) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size }
|
||||
func (c *Files) GetName() string { return c.Name }
|
||||
func (c *Files) ModTime() time.Time { return c.ModifiedTime }
|
||||
|
@ -80,7 +80,11 @@ func (d *USS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
|
||||
downExp := time.Hour * time.Duration(d.SignURLExpire)
|
||||
expireAt := time.Now().Add(downExp).Unix()
|
||||
upd := url.QueryEscape(path.Base(file.GetPath()))
|
||||
signStr := strings.Join([]string{d.OperatorPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&")
|
||||
tokenOrPassword := d.AntiTheftChainToken
|
||||
if tokenOrPassword == "" {
|
||||
tokenOrPassword = d.OperatorPassword
|
||||
}
|
||||
signStr := strings.Join([]string{tokenOrPassword, fmt.Sprint(expireAt), fmt.Sprintf("/%s", key)}, "&")
|
||||
upt := utils.GetMD5EncodeStr(signStr)[12:20] + fmt.Sprint(expireAt)
|
||||
link := fmt.Sprintf("%s?_upd=%s&_upt=%s", u, upd, upt)
|
||||
return &model.Link{URL: link}, nil
|
||||
|
@ -7,10 +7,11 @@ import (
|
||||
|
||||
type Addition struct {
|
||||
driver.RootPath
|
||||
Bucket string `json:"bucket" required:"true"`
|
||||
Endpoint string `json:"endpoint" required:"true"`
|
||||
OperatorName string `json:"operator_name" required:"true"`
|
||||
OperatorPassword string `json:"operator_password" required:"true"`
|
||||
Bucket string `json:"bucket" required:"true"`
|
||||
Endpoint string `json:"endpoint" required:"true"`
|
||||
OperatorName string `json:"operator_name" required:"true"`
|
||||
OperatorPassword string `json:"operator_password" required:"true"`
|
||||
AntiTheftChainToken string `json:"anti_theft_chain_token" required:"false" default:""`
|
||||
//CustomHost string `json:"custom_host"` //Endpoint与CustomHost作用相同,去除
|
||||
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"`
|
||||
}
|
||||
|
@ -52,18 +52,29 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
type nopReadSeekCloser struct {
|
||||
type DummyMFile struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (nopReadSeekCloser) Seek(offset int64, whence int) (int64, error) {
|
||||
func (f DummyMFile) Read(p []byte) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (f DummyMFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return offset, nil
|
||||
}
|
||||
func (nopReadSeekCloser) Close() error { return nil }
|
||||
|
||||
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||
return &model.Link{
|
||||
ReadSeekCloser: nopReadSeekCloser{io.LimitReader(random.Rand, file.GetSize())},
|
||||
MFile: DummyMFile{Reader: random.Rand},
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -2,11 +2,11 @@ package weiyun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
@ -15,7 +15,9 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/cron"
|
||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/avast/retry-go"
|
||||
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
|
||||
)
|
||||
|
||||
@ -26,6 +28,8 @@ type WeiYun struct {
|
||||
client *weiyunsdkgo.WeiYunClient
|
||||
cron *cron.Cron
|
||||
rootFolder *Folder
|
||||
|
||||
uploadThread int
|
||||
}
|
||||
|
||||
func (d *WeiYun) Config() driver.Config {
|
||||
@ -37,7 +41,13 @@ func (d *WeiYun) GetAddition() driver.Additional {
|
||||
}
|
||||
|
||||
func (d *WeiYun) Init(ctx context.Context) error {
|
||||
d.client = weiyunsdkgo.NewWeiYunClientWithRestyClient(base.RestyClient)
|
||||
// 限制上传线程数
|
||||
d.uploadThread, _ = strconv.Atoi(d.UploadThread)
|
||||
if d.uploadThread < 4 || d.uploadThread > 32 {
|
||||
d.uploadThread, d.UploadThread = 4, "4"
|
||||
}
|
||||
|
||||
d.client = weiyunsdkgo.NewWeiYunClientWithRestyClient(base.NewRestyClient())
|
||||
err := d.client.SetCookiesStr(d.Cookies).RefreshCtoken()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -77,6 +87,10 @@ func (d *WeiYun) Init(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(folders) == 0 {
|
||||
return fmt.Errorf("invalid directory ID")
|
||||
}
|
||||
|
||||
folder := folders[len(folders)-1]
|
||||
d.rootFolder = &Folder{
|
||||
PFolder: &Folder{
|
||||
@ -187,6 +201,7 @@ func (d *WeiYun) MakeDir(ctx context.Context, parentDir model.Obj, dirName strin
|
||||
}
|
||||
|
||||
func (d *WeiYun) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||
// TODO: 默认策略为重命名,使用缓存可能出现冲突。微云app也有这个冲突,不知道腾讯怎么搞的
|
||||
if dstDir, ok := dstDir.(*Folder); ok {
|
||||
dstParam := weiyunsdkgo.FolderParam{
|
||||
PdirKey: dstDir.GetPKey(),
|
||||
@ -204,7 +219,6 @@ func (d *WeiYun) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &File{
|
||||
PFolder: dstDir,
|
||||
File: srcObj.File,
|
||||
@ -219,7 +233,6 @@ func (d *WeiYun) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Folder{
|
||||
PFolder: dstDir,
|
||||
Folder: srcObj.Folder,
|
||||
@ -271,7 +284,6 @@ func (d *WeiYun) Rename(ctx context.Context, srcObj model.Obj, newName string) (
|
||||
}
|
||||
|
||||
func (d *WeiYun) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
// TODO copy obj, optional
|
||||
return errs.NotImplement
|
||||
}
|
||||
|
||||
@ -292,19 +304,17 @@ func (d *WeiYun) Remove(ctx context.Context, obj model.Obj) error {
|
||||
DirName: obj.GetName(),
|
||||
})
|
||||
}
|
||||
// TODO remove obj, optional
|
||||
return errs.NotSupport
|
||||
}
|
||||
|
||||
func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||
if folder, ok := dstDir.(*Folder); ok {
|
||||
file, err := utils.CreateTempFile(stream, stream.GetSize())
|
||||
file, err := stream.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
_ = file.Close()
|
||||
_ = os.Remove(file.Name())
|
||||
}()
|
||||
|
||||
// step 1.
|
||||
@ -325,35 +335,44 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
||||
|
||||
// fast upload
|
||||
if !preData.FileExist {
|
||||
// step 2.
|
||||
upCtx, cancel := context.WithCancelCause(ctx)
|
||||
var wg sync.WaitGroup
|
||||
// step.2 增加上传通道
|
||||
if len(preData.ChannelList) < d.uploadThread {
|
||||
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
|
||||
}
|
||||
// step.3 上传
|
||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
|
||||
retry.Attempts(3),
|
||||
retry.Delay(time.Second),
|
||||
retry.DelayType(retry.BackOffDelay))
|
||||
|
||||
for _, channel := range preData.ChannelList {
|
||||
wg.Add(1)
|
||||
go func(channel weiyunsdkgo.UploadChannelData) {
|
||||
defer wg.Done()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
return
|
||||
}
|
||||
if utils.IsCanceled(upCtx) {
|
||||
break
|
||||
}
|
||||
|
||||
var channel = channel
|
||||
threadG.Go(func(ctx context.Context) error {
|
||||
for {
|
||||
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
||||
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
||||
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
||||
if err != nil {
|
||||
cancel(err)
|
||||
return
|
||||
return err
|
||||
}
|
||||
// 上传完成
|
||||
if upData.UploadState != 1 {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
channel = upData.Channel
|
||||
}
|
||||
}(channel)
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
if utils.IsCanceled(upCtx) {
|
||||
return nil, context.Cause(upCtx)
|
||||
if err = threadG.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@ type Addition struct {
|
||||
Cookies string `json:"cookies" required:"true"`
|
||||
OrderBy string `json:"order_by" type:"select" options:"name,size,updated_at" default:"name"`
|
||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||
UploadThread string `json:"upload_thread" default:"4" help:"4<=thread<=32"`
|
||||
}
|
||||
|
||||
var config = driver.Config{
|
||||
|
@ -1,6 +1,7 @@
|
||||
package weiyun
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"time"
|
||||
|
||||
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
|
||||
@ -21,12 +22,27 @@ func (f *File) GetPath() string { return "" }
|
||||
func (f *File) GetPKey() string {
|
||||
return f.PFolder.DirKey
|
||||
}
|
||||
func (f *File) CreateTime() time.Time {
|
||||
return time.Time(f.FileCtime)
|
||||
}
|
||||
|
||||
func (f *File) GetHash() utils.HashInfo {
|
||||
return utils.NewHashInfo(utils.SHA1, f.FileSha)
|
||||
}
|
||||
|
||||
type Folder struct {
|
||||
PFolder *Folder
|
||||
weiyunsdkgo.Folder
|
||||
}
|
||||
|
||||
func (f *Folder) CreateTime() time.Time {
|
||||
return time.Time(f.DirCtime)
|
||||
}
|
||||
|
||||
func (f *Folder) GetHash() utils.HashInfo {
|
||||
return utils.HashInfo{}
|
||||
}
|
||||
|
||||
func (f *Folder) GetID() string { return f.DirKey }
|
||||
func (f *Folder) GetSize() int64 { return 0 }
|
||||
func (f *Folder) GetName() string { return f.DirName }
|
||||
|
70
go.mod
70
go.mod
@ -3,25 +3,31 @@ module github.com/alist-org/alist/v3
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/SheltonZhu/115driver v1.0.14
|
||||
github.com/SheltonZhu/115driver v1.0.16
|
||||
github.com/Xhofe/go-cache v0.0.0-20220723083548-714439c8af9a
|
||||
github.com/Xhofe/rateg v0.0.0-20230728072201-251a4e1adad4
|
||||
github.com/Xhofe/wopan-sdk-go v0.1.1
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible
|
||||
github.com/avast/retry-go v3.0.0+incompatible
|
||||
github.com/aws/aws-sdk-go v1.44.316
|
||||
github.com/aws/aws-sdk-go v1.44.327
|
||||
github.com/blevesearch/bleve/v2 v2.3.9
|
||||
github.com/caarlos0/env/v9 v9.0.0
|
||||
github.com/charmbracelet/bubbles v0.16.1
|
||||
github.com/charmbracelet/bubbletea v0.24.2
|
||||
github.com/charmbracelet/lipgloss v0.7.1
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible
|
||||
github.com/deckarep/golang-set/v2 v2.3.0
|
||||
github.com/deckarep/golang-set/v2 v2.3.1
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/djherbis/times v1.5.0
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.1
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.1
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.4
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.2
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/go-webauthn/webauthn v0.8.6
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/google/uuid v1.3.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/ipfs/go-ipfs-api v0.6.1
|
||||
@ -29,20 +35,22 @@ require (
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/maruel/natural v1.1.0
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.6-0.20230213180117-971c283182b6
|
||||
github.com/pquerna/otp v1.4.0
|
||||
github.com/rclone/rclone v1.63.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca
|
||||
github.com/u2takey/ffmpeg-go v0.4.1
|
||||
github.com/u2takey/ffmpeg-go v0.5.0
|
||||
github.com/upyun/go-sdk/v3 v3.0.4
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
||||
golang.org/x/crypto v0.11.0
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b
|
||||
golang.org/x/image v0.10.0
|
||||
golang.org/x/net v0.13.0
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5
|
||||
golang.org/x/crypto v0.12.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/image v0.11.0
|
||||
golang.org/x/net v0.14.0
|
||||
golang.org/x/oauth2 v0.10.0
|
||||
gorm.io/driver/mysql v1.4.7
|
||||
gorm.io/driver/postgres v1.4.8
|
||||
@ -57,8 +65,8 @@ require (
|
||||
github.com/RoaringBitmap/roaring v1.2.3 // indirect
|
||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||
github.com/aead/ecdh v0.2.0 // indirect
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible // indirect
|
||||
github.com/andreburgaud/crypt2go v1.1.0 // indirect
|
||||
github.com/andreburgaud/crypt2go v1.2.0 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.2.0 // indirect
|
||||
@ -82,9 +90,12 @@ require (
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gaoyb7/115drive-webdav v0.1.8 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
@ -95,15 +106,18 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-webauthn/x v0.1.4 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 // indirect
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/go-tpm v0.9.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/ipfs/boxo v0.8.0 // indirect
|
||||
github.com/ipfs/go-cid v0.4.0 // indirect
|
||||
github.com/ipfs/go-cid v0.4.1 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.0 // indirect
|
||||
@ -116,30 +130,38 @@ require (
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.26.3 // indirect
|
||||
github.com/libp2p/go-libp2p v0.27.8 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/reflow v0.3.0 // indirect
|
||||
github.com/muesli/termenv v0.15.1 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.8.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.1.1 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.9.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.8.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.1 // indirect
|
||||
github.com/multiformats/go-multistream v0.4.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/ncw/swift/v2 v2.0.2 // indirect
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20221009101310-08b764073b77 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.18 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
@ -147,6 +169,7 @@ require (
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/rfjakob/eme v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.7 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e // indirect
|
||||
@ -157,13 +180,14 @@ require (
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/u2takey/go-utils v0.3.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
go.etcd.io/bbolt v1.3.7 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/term v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/api v0.134.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
120
go.sum
120
go.sum
@ -8,8 +8,10 @@ github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/RoaringBitmap/roaring v1.2.3 h1:yqreLINqIrX22ErkKI0vY47/ivtJr6n+kMhVOVmhWBY=
|
||||
github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
|
||||
github.com/SheltonZhu/115driver v1.0.14 h1:uW3dl8J9KDMw+3gPxQdhTysoGhw0/uI1484GT9xhfU4=
|
||||
github.com/SheltonZhu/115driver v1.0.14/go.mod h1:00ixivHH5HqDj4S7kAWbkuUrjtsJTxc7cGv5RMw3RVs=
|
||||
github.com/SheltonZhu/115driver v1.0.15 h1:RRvgXvXEzvrPwkRno0CUIg7ucEphbsfwct2mQxfNOdQ=
|
||||
github.com/SheltonZhu/115driver v1.0.15/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4=
|
||||
github.com/SheltonZhu/115driver v1.0.16 h1:XOhqRtKF9huTCobM5rWVd9DtJyBKLJSnBwWPF3+GM+k=
|
||||
github.com/SheltonZhu/115driver v1.0.16/go.mod h1:e3fPOBANbH/FsTya8FquJwOR3ErhCQgEab3q6CVY2k4=
|
||||
github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A=
|
||||
github.com/Xhofe/go-cache v0.0.0-20220723083548-714439c8af9a h1:RenIAa2q4H8UcS/cqmwdT1WCWIAH5aumP8m8RpbqVsE=
|
||||
github.com/Xhofe/go-cache v0.0.0-20220723083548-714439c8af9a/go.mod h1:sSBbaOg90XwWKtpT56kVujF0bIeVITnPlssLclogS04=
|
||||
@ -23,13 +25,19 @@ github.com/aead/ecdh v0.2.0 h1:pYop54xVaq/CEREFEcukHRZfTdjiWvYIsZDXXrBapQQ=
|
||||
github.com/aead/ecdh v0.2.0/go.mod h1:a9HHtXuSo8J1Js1MwLQx2mBhkXMT6YwUmVVEY4tTB8U=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible h1:QoRMR0TCctLDqBCMyOu1eXdZyMw3F7uGA9qPn2J4+R8=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.5+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible h1:KpbJFXwhVeuxNtBJ74MCGbIoaBok2uZvkD7QXp2+Wis=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/andreburgaud/crypt2go v1.1.0 h1:eitZxTPY1krUsxinsng3Qvt/Ud7q/aQmmYRh8p4hyPw=
|
||||
github.com/andreburgaud/crypt2go v1.1.0/go.mod h1:4qhZPzarj1dCIRmCkpdgCklwp+hBq9yEt0zPe9Ayuhc=
|
||||
github.com/andreburgaud/crypt2go v1.2.0 h1:oly/ENAodeqTYpUafgd4r3v+VKLQnmOKUyfpj+TxHbE=
|
||||
github.com/andreburgaud/crypt2go v1.2.0/go.mod h1:kKRqlrX/3Q9Ki7HdUsoh0cX1Urq14/Hcta4l4VrIXrI=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aws/aws-sdk-go v1.38.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.316 h1:UC3alCEyzj2XU13ZFGIOHW3yjCNLGTIGVauyetl9fwE=
|
||||
github.com/aws/aws-sdk-go v1.44.316/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.327 h1:ZS8oO4+7MOBLhkdwIhgtVeDzCeWOlTfKJS7EgggbIEY=
|
||||
github.com/aws/aws-sdk-go v1.44.327/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@ -79,10 +87,18 @@ github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc
|
||||
github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/charmbracelet/bubbles v0.16.1 h1:6uzpAAaT9ZqKssntbvZMlksWHruQLNxg49H5WdeuYSY=
|
||||
github.com/charmbracelet/bubbles v0.16.1/go.mod h1:2QCp9LFlEsBQMvIYERr7Ww2H2bA7xen1idUDIzm/+Xc=
|
||||
github.com/charmbracelet/bubbletea v0.24.2 h1:uaQIKx9Ai6Gdh5zpTbGiWpytMU+CfsPp06RaW2cx/SY=
|
||||
github.com/charmbracelet/bubbletea v0.24.2/go.mod h1:XdrNrV4J8GiyshTtx3DNuYkR1FDaJmO3l2nejekbsgg=
|
||||
github.com/charmbracelet/lipgloss v0.7.1 h1:17WMwi7N1b1rVWOjMT+rCh7sQkvDU75B2hbZpc5Kc1E=
|
||||
github.com/charmbracelet/lipgloss v0.7.1/go.mod h1:yG0k3giv8Qj8edTCbbg6AlQ5e8KNWpFujkNawKNhE2c=
|
||||
github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY=
|
||||
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk=
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
@ -94,20 +110,26 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g=
|
||||
github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
|
||||
github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/djherbis/times v1.5.0 h1:79myA211VwPhFTqUk8xehWrsEO+zcIZj0zT8mXPVARU=
|
||||
github.com/djherbis/times v1.5.0/go.mod h1:5q7FDLvbNg1L/KaBmPcWlVR9NmoKo3+ucqUA3ijQhA0=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564 h1:I6KUy4CI6hHjqnyJLNCEi7YHVMkwwtfSr2k9splgdSM=
|
||||
github.com/dustinxie/ecc v0.0.0-20210511000915-959544187564/go.mod h1:yekO+3ZShy19S+bsmnERmznGy9Rfg6dWWWpiGJjNAz8=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.1 h1:JYMeCu4PFpqgHapvOz4jPMT7CxR6Yebu3aWkgGMDeIU=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.1/go.mod h1:LpBPmwezjQNyhaNo3HGzgFtQbhvxmF5ZybSVuKi7OVA=
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.1 h1:m4qcJk0adr+bpM4es2zCqP3jhMEwEPyTMGICsamygEQ=
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.1/go.mod h1:AKsLFuWhWlClpGrg1zxTdMejugZEZtmhIuElAk3W83s=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.3 h1:6ww0ulyLDh6neXZBqUM2PDbxQ6lfdkQbr0FCh9BTY0Y=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.3/go.mod h1:iWHA2JFhzmKR28ySp1ON0g6DjLaYtvb5jhTqPVTDW9A=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.4 h1:6utvPiBv8KDRDVKB7A4FERdrVxcHKZd2fBFCNuKcXzU=
|
||||
github.com/foxxorcat/mopan-sdk-go v0.1.4/go.mod h1:iWHA2JFhzmKR28ySp1ON0g6DjLaYtvb5jhTqPVTDW9A=
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.2 h1:waRWIBmjL9GCcndJ8HvOYrrVB4hhoPYzRrn3I/Cnzqw=
|
||||
github.com/foxxorcat/weiyun-sdk-go v0.1.2/go.mod h1:AKsLFuWhWlClpGrg1zxTdMejugZEZtmhIuElAk3W83s=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88=
|
||||
github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gaoyb7/115drive-webdav v0.1.8 h1:EJt4PSmcbvBY4KUh2zSo5p6fN9LZFNkIzuKejipubVw=
|
||||
@ -142,15 +164,22 @@ github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPr
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
|
||||
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||
github.com/go-webauthn/webauthn v0.8.6 h1:bKMtL1qzd2WTFkf1mFTVbreYrwn7dsYmEPjTq6QN90E=
|
||||
github.com/go-webauthn/webauthn v0.8.6/go.mod h1:emwVLMCI5yx9evTTvr0r+aOZCdWJqMfbRhF0MufyUog=
|
||||
github.com/go-webauthn/x v0.1.4 h1:sGmIFhcY70l6k7JIDfnjVBiAAFEssga5lXIUXe0GtAs=
|
||||
github.com/go-webauthn/x v0.1.4/go.mod h1:75Ug0oK6KYpANh5hDOanfDI+dvPWHk788naJVG/37H8=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE=
|
||||
github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo=
|
||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
@ -162,11 +191,15 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk=
|
||||
github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
@ -183,8 +216,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/ipfs/boxo v0.8.0 h1:UdjAJmHzQHo/j3g3b1bAcAXCj/GM6iTwvSlBDvPBNBs=
|
||||
github.com/ipfs/boxo v0.8.0/go.mod h1:RIsi4CnTyQ7AUsNn5gXljJYZlQrHBMnJp94p73liFiA=
|
||||
github.com/ipfs/go-cid v0.4.0 h1:a4pdZq0sx6ZSxbCizebnKiMCx/xI/aBBFlB73IgH4rA=
|
||||
github.com/ipfs/go-cid v0.4.0/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||
github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s=
|
||||
github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk=
|
||||
github.com/ipfs/go-ipfs-api v0.6.1 h1:nK5oeFOdMh1ogT+GCOcyBFOOcFGNuudSb1rg9YDyAKE=
|
||||
github.com/ipfs/go-ipfs-api v0.6.1/go.mod h1:8pl+ZMF2LX42szbqGbpOBEiI1/rYaImvTvJtG0g+rL4=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
@ -205,9 +238,11 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg=
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
@ -233,8 +268,10 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM=
|
||||
github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro=
|
||||
github.com/libp2p/go-libp2p v0.26.3 h1:6g/psubqwdaBqNNoidbRKSTBEYgaOuKBhHl8Q5tO+PM=
|
||||
github.com/libp2p/go-libp2p v0.26.3/go.mod h1:x75BN32YbwuY0Awm2Uix4d4KOz+/4piInkp4Wr3yOo8=
|
||||
github.com/libp2p/go-libp2p v0.27.8 h1:IX5x/4yKwyPQeVS2AXHZ3J4YATM9oHBGH1gBc23jBAI=
|
||||
github.com/libp2p/go-libp2p v0.27.8/go.mod h1:eCFFtd0s5i/EVKR7+5Ki8bM7qwkNW3TPTTSSW9sz8NE=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik=
|
||||
github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE=
|
||||
@ -246,6 +283,11 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
|
||||
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
@ -254,6 +296,8 @@ github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@ -265,14 +309,22 @@ github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
|
||||
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34=
|
||||
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
|
||||
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
|
||||
github.com/muesli/termenv v0.15.1 h1:UzuTb/+hhlBugQz28rpzey4ZuKcZ03MeKsoG7IJZIxs=
|
||||
github.com/muesli/termenv v0.15.1/go.mod h1:HeAQPTzpfs016yGtA4g00CsdYnVLJvxsS4ANqrZs2sQ=
|
||||
github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE=
|
||||
github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI=
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.8.0 h1:aqjksEcqK+iD/Foe1RRFsGZh8+XFiGo7FgUCZlpv3LU=
|
||||
github.com/multiformats/go-multiaddr v0.8.0/go.mod h1:Fs50eBDWvZu+l3/9S6xAE7ZYj6yhxlvaVZjakWN7xRs=
|
||||
github.com/multiformats/go-multibase v0.1.1 h1:3ASCDsuLX8+j4kx58qnJ4YFq/JWTJpCyDW27ztsVTOI=
|
||||
github.com/multiformats/go-multibase v0.1.1/go.mod h1:ZEjHE+IsUrgp5mhlEAYjMtZwK1k4haNkcaPg9aoe1a8=
|
||||
github.com/multiformats/go-multiaddr v0.9.0 h1:3h4V1LHIk5w4hJHekMKWALPXErDfz/sggzwC/NcqbDQ=
|
||||
github.com/multiformats/go-multiaddr v0.9.0/go.mod h1:mI67Lb1EeTOYb8GQfL/7wpIZwc46ElrvzhYnoJOmTT0=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.8.1 h1:ycepHwavHafh3grIbR1jIXnKCsFm0fqsfEOsJ8NtKE8=
|
||||
github.com/multiformats/go-multicodec v0.8.1/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||
github.com/multiformats/go-multihash v0.2.1 h1:aem8ZT0VA2nCHHk7bPJ1BjUbHNciqZC/d16Vve9l108=
|
||||
@ -281,18 +333,23 @@ github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3d
|
||||
github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
|
||||
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
|
||||
github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk=
|
||||
github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20221009101310-08b764073b77 h1:dg/EaaJLPIg4xn2kaZil7Ax3wfoxcFXaBwyOTlcz5AI=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20221009101310-08b764073b77/go.mod h1:FD9a09Vw07CSMTdT0Y7ttStOa1WZsnPBslliMw2DkeM=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831 h1:K3T3eu4h5aYIOzUtLjN08L4Qt4WGaJONMgcaD0ayBJQ=
|
||||
github.com/orzogc/fake115uploader v0.3.3-0.20230715111618-58f9eb76f831/go.mod h1:lSHD4lC4zlMl+zcoysdJcd5KFzsWwOD8BJbyg1Ws9Ng=
|
||||
github.com/panjf2000/ants/v2 v2.4.2/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@ -321,6 +378,10 @@ github.com/rclone/rclone v1.63.1 h1:iITCUNBfAXnguHjRPFq+w/gGIW0L0las78h4H5CH2Ms=
|
||||
github.com/rclone/rclone v1.63.1/go.mod h1:eUQaKsf1wJfHKB0RDoM8RaPAeRB2eI/Qw+Vc9Ho5FGM=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
@ -372,6 +433,8 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/u2takey/ffmpeg-go v0.4.1 h1:l5ClIwL3N2LaH1zF3xivb3kP2HW95eyG5xhHE1JdZ9Y=
|
||||
github.com/u2takey/ffmpeg-go v0.4.1/go.mod h1:ruZWkvC1FEiUNjmROowOAps3ZcWxEiOpFoHCvk97kGc=
|
||||
github.com/u2takey/ffmpeg-go v0.5.0 h1:r7d86XuL7uLWJ5mzSeQ03uvjfIhiJYvsRAJFCW4uklU=
|
||||
github.com/u2takey/ffmpeg-go v0.5.0/go.mod h1:ruZWkvC1FEiUNjmROowOAps3ZcWxEiOpFoHCvk97kGc=
|
||||
github.com/u2takey/go-utils v0.3.1 h1:TaQTgmEZZeDHQFYfd+AdUT1cT4QJgJn/XVPELhHw4ys=
|
||||
github.com/u2takey/go-utils v0.3.1/go.mod h1:6e+v5vEZ/6gu12w/DC2ixZdZtCrNokVxD0JUklcqdCs=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
@ -381,8 +444,10 @@ github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZ
|
||||
github.com/upyun/go-sdk/v3 v3.0.4 h1:2DCJa/Yi7/3ZybT9UCPATSzvU3wpPPxhXinNlb1Hi8Q=
|
||||
github.com/upyun/go-sdk/v3 v3.0.4/go.mod h1:P/SnuuwhrIgAVRd/ZpzDWqCsBAf/oHg7UggbAxyZa0E=
|
||||
github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0 h1:j3un8DqYvvAOqKI5OPz+/RRVhDFipbPKI4t2Uk5RBJw=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXorZG0KzTxbp0Cr1n3FEegfmyd9br1k=
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
|
||||
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
@ -405,11 +470,17 @@ golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b h1:r+vk0EmXNmekl0S0BascoeeoHk/L7wmaW2QF90K+kYI=
|
||||
golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.10.0 h1:gXjUUtwtx5yOE0VKWq1CH4IJAClq4UGgUA3i+rpON9M=
|
||||
golang.org/x/image v0.10.0/go.mod h1:jtrku+n79PfroUbvDdeUWMAI+heR786BofxrbiSF+J0=
|
||||
golang.org/x/image v0.11.0 h1:ds2RoQvBvYTiJkwpSFDwCcDFNX7DqjL2WsUgTNk0Ooo=
|
||||
golang.org/x/image v0.11.0/go.mod h1:bglhjqbqVuEb9e9+eNR45Jfu7D+T4Qan+NhQk8Ck2P8=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@ -424,6 +495,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
|
||||
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
|
||||
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -454,14 +527,17 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@ -471,6 +547,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
|
@ -2,6 +2,7 @@ package aria2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -162,22 +163,27 @@ func (m *Monitor) Complete() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", file.Path)
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: path.Base(file.Path),
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
IsFolder: false,
|
||||
},
|
||||
ReadCloser: f,
|
||||
Mimetype: mimetype,
|
||||
Reader: f,
|
||||
Closers: utils.NewClosers(f),
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relDir, err := filepath.Rel(m.tempDir, filepath.Dir(file.Path))
|
||||
if err != nil {
|
||||
log.Errorf("find relation directory error: %v", err)
|
||||
}
|
||||
newDistDir := filepath.Join(dstDirActualPath, relDir)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, stream, tsk.SetProgress)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, ss, tsk.SetProgress)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
25
internal/authn/authn.go
Normal file
25
internal/authn/authn.go
Normal file
@ -0,0 +1,25 @@
|
||||
package authn
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/setting"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
)
|
||||
|
||||
func NewAuthnInstance(r *http.Request) (*webauthn.WebAuthn, error) {
|
||||
siteUrl, err := url.Parse(common.GetApiUrl(r))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return webauthn.New(&webauthn.Config{
|
||||
RPDisplayName: setting.GetStr(conf.SiteTitle),
|
||||
RPID: siteUrl.Hostname(),
|
||||
//RPOrigin: siteUrl.String(),
|
||||
RPOrigins: []string{siteUrl.String()},
|
||||
// RPOrigin: "http://localhost:5173"
|
||||
})
|
||||
}
|
@ -139,6 +139,7 @@ func InitialSettings() []model.SettingItem {
|
||||
{Key: conf.OcrApi, Value: "https://api.nn.ci/ocr/file/json", Type: conf.TypeString, Group: model.GLOBAL},
|
||||
{Key: conf.FilenameCharMapping, Value: `{"/": "|"}`, Type: conf.TypeText, Group: model.GLOBAL},
|
||||
{Key: conf.ForwardDirectLinkParams, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL},
|
||||
{Key: conf.WebauthnLoginEnabled, Value: "false", Type: conf.TypeBool, Group: model.GLOBAL, Flag: model.PUBLIC},
|
||||
|
||||
// aria2 settings
|
||||
{Key: conf.Aria2Uri, Value: "http://localhost:6800/jsonrpc", Type: conf.TypeString, Group: model.ARIA2, Flag: model.PRIVATE},
|
||||
|
@ -83,8 +83,8 @@ func DefaultConfig() *Config {
|
||||
Log: LogConfig{
|
||||
Enable: true,
|
||||
Name: logPath,
|
||||
MaxSize: 10,
|
||||
MaxBackups: 5,
|
||||
MaxSize: 50,
|
||||
MaxBackups: 30,
|
||||
MaxAge: 28,
|
||||
},
|
||||
MaxConnections: 0,
|
||||
|
@ -41,6 +41,7 @@ const (
|
||||
OcrApi = "ocr_api"
|
||||
FilenameCharMapping = "filename_char_mapping"
|
||||
ForwardDirectLinkParams = "forward_direct_link_params"
|
||||
WebauthnLoginEnabled = "webauthn_login_enabled"
|
||||
|
||||
// index
|
||||
SearchIndex = "search_index"
|
||||
|
@ -29,5 +29,5 @@ func AutoMigrate(dst ...interface{}) error {
|
||||
}
|
||||
|
||||
func GetDb() *gorm.DB {
|
||||
return db;
|
||||
return db
|
||||
}
|
||||
|
@ -1,7 +1,11 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -59,3 +63,40 @@ func GetUsers(pageIndex, pageSize int) (users []model.User, count int64, err err
|
||||
func DeleteUserById(id uint) error {
|
||||
return errors.WithStack(db.Delete(&model.User{}, id).Error)
|
||||
}
|
||||
|
||||
func UpdateAuthn(userID uint, authn string) error {
|
||||
return db.Model(&model.User{ID: userID}).Update("authn", authn).Error
|
||||
}
|
||||
|
||||
func RegisterAuthn(u *model.User, credential *webauthn.Credential) error {
|
||||
if u == nil {
|
||||
return errors.New("user is nil")
|
||||
}
|
||||
exists := u.WebAuthnCredentials()
|
||||
if credential != nil {
|
||||
exists = append(exists, *credential)
|
||||
}
|
||||
res, err := utils.Json.Marshal(exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UpdateAuthn(u.ID, string(res))
|
||||
}
|
||||
|
||||
func RemoveAuthn(u *model.User, id string) error {
|
||||
exists := u.WebAuthnCredentials()
|
||||
for i := 0; i < len(exists); i++ {
|
||||
idEncoded := base64.StdEncoding.EncodeToString(exists[i].ID)
|
||||
if idEncoded == id {
|
||||
exists[len(exists)-1], exists[i] = exists[i], exists[len(exists)-1]
|
||||
exists = exists[:len(exists)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
res, err := utils.Json.Marshal(exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UpdateAuthn(u.ID, string(res))
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ var (
|
||||
MetaNotFound = errors.New("meta not found")
|
||||
StorageNotFound = errors.New("storage not found")
|
||||
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
|
||||
StreamPeekFail = errors.New("StreamPeekFail")
|
||||
)
|
||||
|
||||
// NewErr wrap constant error with an extra message
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
@ -94,9 +95,14 @@ func copyFileBetween2Storages(tsk *task.Task[uint64], srcStorage, dstStorage dri
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] link", srcFilePath)
|
||||
}
|
||||
stream, err := getFileStreamFromLink(srcFile, link)
|
||||
fs := stream.FileStream{
|
||||
Obj: srcFile,
|
||||
Ctx: tsk.Ctx,
|
||||
}
|
||||
// any link provided is seekable
|
||||
ss, err := stream.NewSeekableStream(fs, link)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed get [%s] stream", srcFilePath)
|
||||
}
|
||||
return op.Put(tsk.Ctx, dstStorage, dstDirPath, stream, tsk.SetProgress, true)
|
||||
return op.Put(tsk.Ctx, dstStorage, dstDirPath, ss, tsk.SetProgress, true)
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
@ -93,7 +92,7 @@ func Remove(ctx context.Context, path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error {
|
||||
func PutDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error {
|
||||
err := putDirectly(ctx, dstDirPath, file, lazyCache...)
|
||||
if err != nil {
|
||||
log.Errorf("failed put %s: %+v", dstDirPath, err)
|
||||
@ -101,7 +100,7 @@ func PutDirectly(ctx context.Context, dstDirPath string, file *model.FileStream,
|
||||
return err
|
||||
}
|
||||
|
||||
func PutAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
func PutAsTask(dstDirPath string, file model.FileStreamer) error {
|
||||
err := putAsTask(dstDirPath, file)
|
||||
if err != nil {
|
||||
log.Errorf("failed put %s: %+v", dstDirPath, err)
|
||||
|
@ -3,13 +3,12 @@ package fs
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -18,7 +17,7 @@ var UploadTaskManager = task.NewTaskManager(3, func(tid *uint64) {
|
||||
})
|
||||
|
||||
// putAsTask add as a put task and return immediately
|
||||
func putAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
func putAsTask(dstDirPath string, file model.FileStreamer) error {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
@ -27,11 +26,12 @@ func putAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
return errors.WithStack(errs.UploadNotSupported)
|
||||
}
|
||||
if file.NeedStore() {
|
||||
tempFile, err := utils.CreateTempFile(file, file.GetSize())
|
||||
_, err := file.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create temp file")
|
||||
}
|
||||
file.SetReadCloser(tempFile)
|
||||
//file.SetReader(tempFile)
|
||||
//file.SetTmpFile(tempFile)
|
||||
}
|
||||
UploadTaskManager.Submit(task.WithCancelCtx(&task.Task[uint64]{
|
||||
Name: fmt.Sprintf("upload %s to [%s](%s)", file.GetName(), storage.GetStorage().MountPath, dstDirActualPath),
|
||||
@ -43,7 +43,7 @@ func putAsTask(dstDirPath string, file *model.FileStream) error {
|
||||
}
|
||||
|
||||
// putDirect put the file and return after finish
|
||||
func putDirectly(ctx context.Context, dstDirPath string, file *model.FileStream, lazyCache ...bool) error {
|
||||
func putDirectly(ctx context.Context, dstDirPath string, file model.FileStreamer, lazyCache ...bool) error {
|
||||
storage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed get storage")
|
||||
|
@ -1,55 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/server/common"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getFileStreamFromLink(file model.Obj, link *model.Link) (*model.FileStream, error) {
|
||||
var rc io.ReadCloser
|
||||
var err error
|
||||
mimetype := utils.GetMimeType(file.GetName())
|
||||
if link.RangeReadCloser.RangeReader != nil {
|
||||
rc, err = link.RangeReadCloser.RangeReader(http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if link.ReadSeekCloser != nil {
|
||||
rc = link.ReadSeekCloser
|
||||
} else {
|
||||
//TODO: add accelerator
|
||||
req, err := http.NewRequest(http.MethodGet, link.URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create request for %s", link.URL)
|
||||
}
|
||||
for h, val := range link.Header {
|
||||
req.Header[h] = val
|
||||
}
|
||||
res, err := common.HttpClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get response for %s", link.URL)
|
||||
}
|
||||
mt := res.Header.Get("Content-Type")
|
||||
if mt != "" && strings.ToLower(mt) != "application/octet-stream" {
|
||||
mimetype = mt
|
||||
}
|
||||
rc = res.Body
|
||||
}
|
||||
// if can't get mimetype, use default application/octet-stream
|
||||
if mimetype == "" {
|
||||
mimetype = "application/octet-stream"
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
Obj: file,
|
||||
ReadCloser: rc,
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
return stream, nil
|
||||
}
|
@ -1,11 +1,13 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type ListArgs struct {
|
||||
@ -21,16 +23,17 @@ type LinkArgs struct {
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
URL string `json:"url"`
|
||||
Header http.Header `json:"header"` // needed header (for url) or response header(for data or writer)
|
||||
RangeReadCloser RangeReadCloser // recommended way
|
||||
ReadSeekCloser io.ReadSeekCloser // best for local,smb.. file system, which exposes ReadSeekCloser
|
||||
URL string `json:"url"` // most common way
|
||||
Header http.Header `json:"header"` // needed header (for url)
|
||||
RangeReadCloser RangeReadCloserIF `json:"-"` // recommended way if can't use URL
|
||||
MFile File `json:"-"` // best for local,smb... file system, which exposes MFile
|
||||
|
||||
Expiration *time.Duration // local cache expire Duration
|
||||
IPCacheKey bool // add ip to cache key
|
||||
IPCacheKey bool `json:"-"` // add ip to cache key
|
||||
|
||||
//for accelerating request, use multi-thread downloading
|
||||
Concurrency int
|
||||
PartSize int
|
||||
Concurrency int `json:"concurrency"`
|
||||
PartSize int `json:"part_size"`
|
||||
}
|
||||
|
||||
type OtherArgs struct {
|
||||
@ -44,10 +47,23 @@ type FsOtherArgs struct {
|
||||
Method string `json:"method" form:"method"`
|
||||
Data interface{} `json:"data" form:"data"`
|
||||
}
|
||||
type RangeReadCloser struct {
|
||||
RangeReader RangeReaderFunc
|
||||
Closers *utils.Closers
|
||||
type RangeReadCloserIF interface {
|
||||
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
utils.ClosersIF
|
||||
}
|
||||
|
||||
type WriterFunc func(w io.Writer) error
|
||||
type RangeReaderFunc func(httpRange http_range.Range) (io.ReadCloser, error)
|
||||
var _ RangeReadCloserIF = (*RangeReadCloser)(nil)
|
||||
|
||||
type RangeReadCloser struct {
|
||||
RangeReader RangeReaderFunc
|
||||
utils.Closers
|
||||
}
|
||||
|
||||
func (r RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||
rc, err := r.RangeReader(ctx, httpRange)
|
||||
r.Closers.Add(rc)
|
||||
return rc, err
|
||||
}
|
||||
|
||||
// type WriterFunc func(w io.Writer) error
|
||||
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
|
25
internal/model/file.go
Normal file
25
internal/model/file.go
Normal file
@ -0,0 +1,25 @@
|
||||
package model
|
||||
|
||||
import "io"
|
||||
|
||||
// File is basic file level accessing interface
|
||||
type File interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type NopMFileIF interface {
|
||||
io.Reader
|
||||
io.ReaderAt
|
||||
io.Seeker
|
||||
}
|
||||
type NopMFile struct {
|
||||
NopMFileIF
|
||||
}
|
||||
|
||||
func (NopMFile) Close() error { return nil }
|
||||
func NewNopMFile(r NopMFileIF) File {
|
||||
return NopMFile{r}
|
||||
}
|
@ -1,6 +1,8 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"io"
|
||||
"regexp"
|
||||
"sort"
|
||||
@ -20,8 +22,9 @@ type Obj interface {
|
||||
GetSize() int64
|
||||
GetName() string
|
||||
ModTime() time.Time
|
||||
CreateTime() time.Time
|
||||
IsDir() bool
|
||||
//GetHash() (string, string)
|
||||
GetHash() utils.HashInfo
|
||||
|
||||
// The internal information of the driver.
|
||||
// If you want to use it, please understand what it means
|
||||
@ -29,14 +32,20 @@ type Obj interface {
|
||||
GetPath() string
|
||||
}
|
||||
|
||||
// FileStreamer ->check FileStream for more comments
|
||||
type FileStreamer interface {
|
||||
io.ReadCloser
|
||||
io.Reader
|
||||
io.Closer
|
||||
Obj
|
||||
GetMimetype() string
|
||||
SetReadCloser(io.ReadCloser)
|
||||
//SetReader(io.Reader)
|
||||
NeedStore() bool
|
||||
GetReadCloser() io.ReadCloser
|
||||
GetOld() Obj
|
||||
GetExist() Obj
|
||||
SetExist(Obj)
|
||||
//for a non-seekable Stream, RangeRead supports peeking some data, and CacheFullInTempFile still works
|
||||
RangeRead(http_range.Range) (io.Reader, error)
|
||||
//for a non-seekable Stream, if Read is called, this function won't work
|
||||
CacheFullInTempFile() (File, error)
|
||||
}
|
||||
|
||||
type URL interface {
|
||||
@ -50,9 +59,6 @@ type Thumb interface {
|
||||
type SetPath interface {
|
||||
SetPath(path string)
|
||||
}
|
||||
type SetHash interface {
|
||||
SetHash(hash string, hashType string)
|
||||
}
|
||||
|
||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
||||
if orderBy == "" {
|
||||
|
@ -28,9 +28,9 @@ type Object struct {
|
||||
Name string
|
||||
Size int64
|
||||
Modified time.Time
|
||||
Ctime time.Time // file create time
|
||||
IsFolder bool
|
||||
Hash string
|
||||
HashType string
|
||||
HashInfo utils.HashInfo
|
||||
}
|
||||
|
||||
func (o *Object) GetName() string {
|
||||
@ -44,6 +44,12 @@ func (o *Object) GetSize() int64 {
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.Modified
|
||||
}
|
||||
func (o *Object) CreateTime() time.Time {
|
||||
if o.Ctime.IsZero() {
|
||||
return o.ModTime()
|
||||
}
|
||||
return o.Ctime
|
||||
}
|
||||
|
||||
func (o *Object) IsDir() bool {
|
||||
return o.IsFolder
|
||||
@ -61,13 +67,8 @@ func (o *Object) SetPath(path string) {
|
||||
o.Path = path
|
||||
}
|
||||
|
||||
func (o *Object) SetHash(hash string, hashType string) {
|
||||
o.Hash = hash
|
||||
o.HashType = hashType
|
||||
}
|
||||
|
||||
func (o *Object) GetHash() (string, string) {
|
||||
return o.Hash, o.HashType
|
||||
func (o *Object) GetHash() utils.HashInfo {
|
||||
return o.HashInfo
|
||||
}
|
||||
|
||||
type Thumbnail struct {
|
||||
|
@ -1,33 +0,0 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
Obj
|
||||
io.ReadCloser
|
||||
Mimetype string
|
||||
WebPutAsTask bool
|
||||
Old Obj
|
||||
}
|
||||
|
||||
func (f *FileStream) GetMimetype() string {
|
||||
return f.Mimetype
|
||||
}
|
||||
|
||||
func (f *FileStream) NeedStore() bool {
|
||||
return f.WebPutAsTask
|
||||
}
|
||||
|
||||
func (f *FileStream) GetReadCloser() io.ReadCloser {
|
||||
return f.ReadCloser
|
||||
}
|
||||
|
||||
func (f *FileStream) SetReadCloser(rc io.ReadCloser) {
|
||||
f.ReadCloser = rc
|
||||
}
|
||||
|
||||
func (f *FileStream) GetOld() Obj {
|
||||
return f.Old
|
||||
}
|
@ -1,11 +1,14 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@ -21,10 +24,10 @@ type User struct {
|
||||
ID uint `json:"id" gorm:"primaryKey"` // unique key
|
||||
Username string `json:"username" gorm:"unique" binding:"required"` // username
|
||||
PwdHash string `json:"-"` // password hash
|
||||
Salt string // unique salt
|
||||
Password string `json:"password"` // password
|
||||
BasePath string `json:"base_path"` // base path
|
||||
Role int `json:"role"` // user's role
|
||||
Salt string `json:"-"` // unique salt
|
||||
Password string `json:"password"` // password
|
||||
BasePath string `json:"base_path"` // base path
|
||||
Role int `json:"role"` // user's role
|
||||
Disabled bool `json:"disabled"`
|
||||
// Determine permissions by bit
|
||||
// 0: can see hidden files
|
||||
@ -41,6 +44,7 @@ type User struct {
|
||||
Permission int32 `json:"permission"`
|
||||
OtpSecret string `json:"-"`
|
||||
SsoID string `json:"sso_id"` // unique by sso platform
|
||||
Authn string `gorm:"type:text" json:"-"`
|
||||
}
|
||||
|
||||
func (u *User) IsGuest() bool {
|
||||
@ -120,13 +124,40 @@ func (u *User) JoinPath(reqPath string) (string, error) {
|
||||
}
|
||||
|
||||
func StaticHash(password string) string {
|
||||
return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", password, StaticHashSalt)))
|
||||
return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", password, StaticHashSalt)))
|
||||
}
|
||||
|
||||
func HashPwd(static string, salt string) string {
|
||||
return utils.GetSHA256Encode([]byte(fmt.Sprintf("%s-%s", static, salt)))
|
||||
return utils.HashData(utils.SHA256, []byte(fmt.Sprintf("%s-%s", static, salt)))
|
||||
}
|
||||
|
||||
func TwoHashPwd(password string, salt string) string {
|
||||
return HashPwd(StaticHash(password), salt)
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnID() []byte {
|
||||
bs := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(bs, uint64(u.ID))
|
||||
return bs
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnName() string {
|
||||
return u.Username
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnDisplayName() string {
|
||||
return u.Username
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnCredentials() []webauthn.Credential {
|
||||
var res []webauthn.Credential
|
||||
err := json.Unmarshal([]byte(u.Authn), &res)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (u *User) WebAuthnIcon() string {
|
||||
return "https://alist.nn.ci/logo.svg"
|
||||
}
|
||||
|
@ -1,11 +1,9 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
@ -13,6 +11,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
@ -41,7 +43,7 @@ type Downloader struct {
|
||||
//RequestParam HttpRequestParams
|
||||
HttpClient HttpRequestFunc
|
||||
}
|
||||
type HttpRequestFunc func(params *HttpRequestParams) (*http.Response, error)
|
||||
type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error)
|
||||
|
||||
func NewDownloader(options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
@ -60,7 +62,7 @@ func NewDownloader(options ...func(*Downloader)) *Downloader {
|
||||
// cache some data, then return Reader with assembled data
|
||||
// Supports range, do not support unknown FileSize, and will fail if FileSize is incorrect
|
||||
// memory usage is at about Concurrency*PartSize, use this wisely
|
||||
func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readCloser *io.ReadCloser, err error) {
|
||||
func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readCloser io.ReadCloser, err error) {
|
||||
|
||||
var finalP HttpRequestParams
|
||||
awsutil.Copy(&finalP, p)
|
||||
@ -107,7 +109,7 @@ type downloader struct {
|
||||
}
|
||||
|
||||
// download performs the implementation of the object download across ranged GETs.
|
||||
func (d *downloader) download() (*io.ReadCloser, error) {
|
||||
func (d *downloader) download() (io.ReadCloser, error) {
|
||||
d.ctx, d.cancel = context.WithCancel(d.ctx)
|
||||
|
||||
pos := d.params.Range.Start
|
||||
@ -129,11 +131,11 @@ func (d *downloader) download() (*io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
if d.cfg.Concurrency == 1 {
|
||||
resp, err := d.cfg.HttpClient(d.params)
|
||||
resp, err := d.cfg.HttpClient(d.ctx, d.params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp.Body, nil
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
// workers
|
||||
@ -152,7 +154,7 @@ func (d *downloader) download() (*io.ReadCloser, error) {
|
||||
var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf)
|
||||
|
||||
// Return error
|
||||
return &rc, d.err
|
||||
return rc, d.err
|
||||
}
|
||||
func (d *downloader) sendChunkTask() *chunk {
|
||||
ch := &d.chunks[d.nextChunk]
|
||||
@ -201,7 +203,6 @@ func (d *downloader) downloadPart() {
|
||||
//defer d.wg.Done()
|
||||
for {
|
||||
c, ok := <-d.chunkChannel
|
||||
log.Debugf("downloadPart tried to get chunk")
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
@ -210,7 +211,7 @@ func (d *downloader) downloadPart() {
|
||||
// of download producer.
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("downloadPart tried to get chunk")
|
||||
if err := d.downloadChunk(&c); err != nil {
|
||||
d.setErr(err)
|
||||
}
|
||||
@ -219,7 +220,7 @@ func (d *downloader) downloadPart() {
|
||||
|
||||
// downloadChunk downloads the chunk
|
||||
func (d *downloader) downloadChunk(ch *chunk) error {
|
||||
log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.buf.buffer.id)
|
||||
log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.id)
|
||||
var n int64
|
||||
var err error
|
||||
params := d.getParamsFromChunk(ch)
|
||||
@ -257,10 +258,11 @@ func (d *downloader) downloadChunk(ch *chunk) error {
|
||||
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
|
||||
resp, err := d.cfg.HttpClient(params)
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
//only check file size on the first task
|
||||
if ch.id == 0 {
|
||||
err = d.checkTotalBytes(resp)
|
||||
@ -278,7 +280,6 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
|
||||
err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n)
|
||||
return n, &errReadingBody{err: err}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return n, nil
|
||||
}
|
||||
@ -370,10 +371,10 @@ type chunk struct {
|
||||
//boundary http_range.Range
|
||||
}
|
||||
|
||||
func DefaultHttpRequestFunc(params *HttpRequestParams) (*http.Response, error) {
|
||||
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
|
||||
header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef)
|
||||
|
||||
res, err := RequestHttp("GET", header, params.URL)
|
||||
res, err := RequestHttp(ctx, "GET", header, params.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -384,7 +385,7 @@ type HttpRequestParams struct {
|
||||
URL string
|
||||
//only want data within this range
|
||||
Range http_range.Range
|
||||
HeaderRef *http.Header
|
||||
HeaderRef http.Header
|
||||
//total file size
|
||||
Size int64
|
||||
}
|
||||
@ -401,13 +402,8 @@ func (e *errReadingBody) Unwrap() error {
|
||||
}
|
||||
|
||||
type MultiReadCloser struct {
|
||||
io.ReadCloser
|
||||
|
||||
//total int //total bufArr
|
||||
//wPos int //current reader wPos
|
||||
cfg *cfg
|
||||
closer closerFunc
|
||||
//getBuf getBufFunc
|
||||
finish finishBufFUnc
|
||||
}
|
||||
|
||||
@ -448,99 +444,26 @@ func (mr MultiReadCloser) Close() error {
|
||||
return mr.closer()
|
||||
}
|
||||
|
||||
type Buffer struct {
|
||||
data []byte
|
||||
wPos int //writer position
|
||||
id int
|
||||
rPos int //reader position
|
||||
lock sync.Mutex
|
||||
|
||||
once bool //combined use with notify & lock, to get notify once
|
||||
notify chan int // notifies new writes
|
||||
}
|
||||
|
||||
func (buf *Buffer) Write(p []byte) (n int, err error) {
|
||||
inSize := len(p)
|
||||
if inSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if inSize > len(buf.data)-buf.wPos {
|
||||
return 0, fmt.Errorf("exceeding buffer max size,inSize=%d ,buf.data.len=%d , buf.wPos=%d",
|
||||
inSize, len(buf.data), buf.wPos)
|
||||
}
|
||||
copy(buf.data[buf.wPos:], p)
|
||||
buf.wPos += inSize
|
||||
|
||||
//give read a notice if once==true
|
||||
buf.lock.Lock()
|
||||
if buf.once == true {
|
||||
buf.notify <- inSize //struct{}{}
|
||||
}
|
||||
buf.once = false
|
||||
buf.lock.Unlock()
|
||||
|
||||
return inSize, nil
|
||||
}
|
||||
|
||||
func (buf *Buffer) getPos() (n int) {
|
||||
return buf.wPos
|
||||
}
|
||||
func (buf *Buffer) reset() {
|
||||
buf.wPos = 0
|
||||
buf.rPos = 0
|
||||
}
|
||||
|
||||
// waitTillNewWrite notify caller that new write happens
|
||||
func (buf *Buffer) waitTillNewWrite(pos int) error {
|
||||
//log.Debugf("waitTillNewWrite, current wPos=%d", pos)
|
||||
var err error
|
||||
|
||||
//defer buffer.lock.Unlock()
|
||||
if pos >= len(buf.data) {
|
||||
err = fmt.Errorf("there will not be any new write")
|
||||
} else if pos > buf.wPos {
|
||||
err = fmt.Errorf("illegal read position")
|
||||
} else if pos == buf.wPos {
|
||||
buf.lock.Lock()
|
||||
buf.once = true
|
||||
//buffer.wg1.Add(1)
|
||||
buf.lock.Unlock()
|
||||
//wait for write
|
||||
log.Debugf("waitTillNewWrite wait for notify")
|
||||
writes := <-buf.notify
|
||||
log.Debugf("waitTillNewWrite got new write from notify, last writes:%+v", writes)
|
||||
//if pos >= buf.wPos {
|
||||
// //wrote 0 bytes
|
||||
// return fmt.Errorf("write has error")
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
//only case: wPos < buffer.wPos
|
||||
return err
|
||||
}
|
||||
|
||||
type Buf struct {
|
||||
buffer *Buffer // Buffer we read from
|
||||
size int //expected size
|
||||
buffer *bytes.Buffer
|
||||
size int //expected size
|
||||
ctx context.Context
|
||||
off int
|
||||
rw sync.RWMutex
|
||||
notify chan struct{}
|
||||
}
|
||||
|
||||
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
||||
// when read is faster write, immediately feed data to read after written
|
||||
func NewBuf(ctx context.Context, maxSize int, id int) *Buf {
|
||||
d := make([]byte, maxSize)
|
||||
buffer := &Buffer{data: d, id: id, notify: make(chan int)}
|
||||
buffer.reset()
|
||||
return &Buf{ctx: ctx, buffer: buffer, size: maxSize}
|
||||
d := make([]byte, 0, maxSize)
|
||||
return &Buf{ctx: ctx, buffer: bytes.NewBuffer(d), size: maxSize, notify: make(chan struct{})}
|
||||
|
||||
}
|
||||
func (br *Buf) Reset(size int) {
|
||||
br.buffer.reset()
|
||||
br.buffer.Reset()
|
||||
br.size = size
|
||||
}
|
||||
func (br *Buf) GetId() int {
|
||||
return br.buffer.id
|
||||
br.off = 0
|
||||
}
|
||||
|
||||
func (br *Buf) Read(p []byte) (n int, err error) {
|
||||
@ -550,48 +473,49 @@ func (br *Buf) Read(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if br.buffer.rPos == br.size {
|
||||
if br.off >= br.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
//persist buffer position as another thread is keep increasing it
|
||||
bufPos := br.buffer.getPos()
|
||||
outSize := bufPos - br.buffer.rPos
|
||||
|
||||
if outSize == 0 {
|
||||
//var wg sync.WaitGroup
|
||||
err := br.waitTillNewWrite(br.buffer.rPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bufPos = br.buffer.getPos()
|
||||
outSize = bufPos - br.buffer.rPos
|
||||
br.rw.RLock()
|
||||
n, err = br.buffer.Read(p)
|
||||
br.rw.RUnlock()
|
||||
if err == nil {
|
||||
br.off += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
if len(p) < outSize {
|
||||
// p is not big enough
|
||||
outSize = len(p)
|
||||
if err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
copy(p, br.buffer.data[br.buffer.rPos:br.buffer.rPos+outSize])
|
||||
br.buffer.rPos += outSize
|
||||
if br.buffer.rPos == br.size {
|
||||
err = io.EOF
|
||||
if n != 0 {
|
||||
br.off += n
|
||||
return n, nil
|
||||
}
|
||||
// n==0, err==io.EOF
|
||||
// wait for new write for 200ms
|
||||
select {
|
||||
case <-br.ctx.Done():
|
||||
return 0, br.ctx.Err()
|
||||
case <-br.notify:
|
||||
return 0, nil
|
||||
case <-time.After(time.Millisecond * 200):
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return outSize, err
|
||||
}
|
||||
|
||||
// waitTillNewWrite is expensive, since we just checked that no new data, wait 0.2s
|
||||
func (br *Buf) waitTillNewWrite(pos int) error {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return br.buffer.waitTillNewWrite(br.buffer.rPos)
|
||||
}
|
||||
|
||||
func (br *Buf) Write(p []byte) (n int, err error) {
|
||||
if err := br.ctx.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return br.buffer.Write(p)
|
||||
br.rw.Lock()
|
||||
defer br.rw.Unlock()
|
||||
n, err = br.buffer.Write(p)
|
||||
select {
|
||||
case br.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (br *Buf) Close() {
|
||||
close(br.buffer.notify)
|
||||
close(br.notify)
|
||||
}
|
||||
|
@ -7,14 +7,15 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var buf22MB = make([]byte, 1024*1024*22)
|
||||
@ -55,7 +56,7 @@ func TestDownloadOrder(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
resultBuf, err := io.ReadAll(*readCloser)
|
||||
resultBuf, err := io.ReadAll(readCloser)
|
||||
if err != nil {
|
||||
t.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
@ -111,7 +112,7 @@ func TestDownloadSingle(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
resultBuf, err := io.ReadAll(*readCloser)
|
||||
resultBuf, err := io.ReadAll(readCloser)
|
||||
if err != nil {
|
||||
t.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
@ -142,7 +143,7 @@ type downloadCaptureClient struct {
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (c *downloadCaptureClient) HttpRequest(params *HttpRequestParams) (*http.Response, error) {
|
||||
func (c *downloadCaptureClient) HttpRequest(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
|
@ -1,14 +1,8 @@
|
||||
package net
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
@ -18,6 +12,14 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
//this file is inspired by GO_SDK net.http.ServeContent
|
||||
@ -109,7 +111,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
}
|
||||
switch {
|
||||
case len(ranges) == 0:
|
||||
reader, err := RangeReaderFunc(http_range.Range{0, -1})
|
||||
reader, err := RangeReaderFunc(context.Background(), http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@ -128,7 +130,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
// does not request multiple parts might not support
|
||||
// multipart responses."
|
||||
ra := ranges[0]
|
||||
sendContent, err = RangeReaderFunc(ra)
|
||||
sendContent, err = RangeReaderFunc(context.Background(), ra)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
@ -155,7 +157,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
reader, err := RangeReaderFunc(ra)
|
||||
reader, err := RangeReaderFunc(context.Background(), ra)
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
@ -191,37 +193,33 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
||||
}
|
||||
//defer sendContent.Close()
|
||||
}
|
||||
func ProcessHeader(origin, override *http.Header) *http.Header {
|
||||
func ProcessHeader(origin, override http.Header) http.Header {
|
||||
result := http.Header{}
|
||||
// client header
|
||||
for h, val := range *origin {
|
||||
for h, val := range origin {
|
||||
if utils.SliceContains(conf.SlicesMap[conf.ProxyIgnoreHeaders], strings.ToLower(h)) {
|
||||
continue
|
||||
}
|
||||
result[h] = val
|
||||
}
|
||||
// needed header
|
||||
for h, val := range *override {
|
||||
for h, val := range override {
|
||||
result[h] = val
|
||||
}
|
||||
return &result
|
||||
return result
|
||||
}
|
||||
|
||||
// RequestHttp deal with Header properly then send the request
|
||||
func RequestHttp(httpMethod string, headerOverride *http.Header, URL string) (*http.Response, error) {
|
||||
req, err := http.NewRequest(httpMethod, URL, nil)
|
||||
func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Header, URL string) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, httpMethod, URL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header = *headerOverride
|
||||
log.Debugln("request Header: ", req.Header)
|
||||
log.Debugln("request URL: ", URL)
|
||||
req.Header = headerOverride
|
||||
res, err := HttpClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("response status: %d", res.StatusCode)
|
||||
log.Debugln("response Header: ", res.Header)
|
||||
// TODO clean header with blocklist or passlist
|
||||
res.Header.Del("set-cookie")
|
||||
if res.StatusCode >= 400 {
|
||||
@ -230,7 +228,6 @@ func RequestHttp(httpMethod string, headerOverride *http.Header, URL string) (*h
|
||||
log.Debugln(msg)
|
||||
return res, errors.New(msg)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,6 @@ package op
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"time"
|
||||
|
||||
@ -481,18 +480,10 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *model.FileStream, up driver.UpdateProgress, lazyCache ...bool) error {
|
||||
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file model.FileStreamer, up driver.UpdateProgress, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
defer func() {
|
||||
if f, ok := file.GetReadCloser().(*os.File); ok {
|
||||
err := os.RemoveAll(f.Name())
|
||||
if err != nil {
|
||||
log.Errorf("failed to remove file [%s]", f.Name())
|
||||
}
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if err := file.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
@ -508,7 +499,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo
|
||||
if fi.GetSize() == 0 {
|
||||
err = Remove(ctx, storage, dstPath)
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "failed remove file that exist and have size 0")
|
||||
return errors.WithMessagef(err, "while uploading, failed remove existing file which size = 0")
|
||||
}
|
||||
} else if storage.Config().NoOverwriteUpload {
|
||||
// try to rename old obj
|
||||
@ -517,7 +508,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *mo
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
file.Old = fi
|
||||
file.SetExist(fi)
|
||||
}
|
||||
}
|
||||
err = MakeDir(ctx, storage, dstDirPath)
|
||||
|
@ -2,7 +2,7 @@ package qbittorrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@ -157,17 +157,22 @@ func (m *Monitor) complete() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", tempPath)
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: fileName,
|
||||
Size: size,
|
||||
Modified: time.Now(),
|
||||
IsFolder: false,
|
||||
},
|
||||
ReadCloser: struct{ io.ReadSeekCloser }{f},
|
||||
Mimetype: mimetype,
|
||||
Reader: f,
|
||||
Closers: utils.NewClosers(f),
|
||||
Mimetype: mimetype,
|
||||
}
|
||||
return op.Put(tsk.Ctx, storage, dstDir, stream, tsk.SetProgress)
|
||||
ss, err := stream.NewSeekableStream(s, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return op.Put(tsk.Ctx, storage, dstDir, ss, tsk.SetProgress)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
233
internal/stream/stream.go
Normal file
233
internal/stream/stream.go
Normal file
@ -0,0 +1,233 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
type FileStream struct {
|
||||
Ctx context.Context
|
||||
model.Obj
|
||||
io.Reader
|
||||
Mimetype string
|
||||
WebPutAsTask bool
|
||||
Exist model.Obj //the file existed in the destination, we can reuse some info since we wil overwrite it
|
||||
utils.Closers
|
||||
tmpFile *os.File //if present, tmpFile has full content, it will be deleted at last
|
||||
peekBuff *bytes.Reader
|
||||
}
|
||||
|
||||
func (f *FileStream) GetMimetype() string {
|
||||
return f.Mimetype
|
||||
}
|
||||
|
||||
func (f *FileStream) NeedStore() bool {
|
||||
return f.WebPutAsTask
|
||||
}
|
||||
func (f *FileStream) Close() error {
|
||||
var err1, err2 error
|
||||
err1 = f.Closers.Close()
|
||||
if f.tmpFile != nil {
|
||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Join(err1, err2)
|
||||
}
|
||||
|
||||
func (f *FileStream) GetExist() model.Obj {
|
||||
return f.Exist
|
||||
}
|
||||
func (f *FileStream) SetExist(obj model.Obj) {
|
||||
f.Exist = obj
|
||||
}
|
||||
|
||||
// CacheFullInTempFile save all data into tmpFile. Not recommended since it wears disk,
|
||||
// and can't start upload until the file is written. It's not thread-safe!
|
||||
func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
const InMemoryBufMaxSize = 10 // Megabytes
|
||||
const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
|
||||
|
||||
// RangeRead have to cache all data first since only Reader is provided.
|
||||
// also support a peeking RangeRead at very start, but won't buffer more than 10MB data in memory
|
||||
func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = f.GetSize()
|
||||
}
|
||||
if f.peekBuff != nil && httpRange.Start < int64(f.peekBuff.Len()) && httpRange.Start+httpRange.Length-1 < int64(f.peekBuff.Len()) {
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if f.tmpFile == nil {
|
||||
if httpRange.Start == 0 && httpRange.Length <= InMemoryBufMaxSizeBytes && f.peekBuff == nil {
|
||||
bufSize := utils.Min(httpRange.Length, f.GetSize())
|
||||
newBuf := bytes.NewBuffer(make([]byte, 0, bufSize))
|
||||
n, err := io.CopyN(newBuf, f.Reader, bufSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n != bufSize {
|
||||
return nil, fmt.Errorf("stream RangeRead did not get all data in peek, expect =%d ,actual =%d", bufSize, n)
|
||||
}
|
||||
f.peekBuff = bytes.NewReader(newBuf.Bytes())
|
||||
f.Reader = io.MultiReader(f.peekBuff, f.Reader)
|
||||
return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil
|
||||
} else {
|
||||
_, err := f.CacheFullInTempFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return io.NewSectionReader(f.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
|
||||
var _ model.FileStreamer = (*SeekableStream)(nil)
|
||||
var _ model.FileStreamer = (*FileStream)(nil)
|
||||
|
||||
//var _ seekableStream = (*FileStream)(nil)
|
||||
|
||||
// for most internal stream, which is either RangeReadCloser or MFile
|
||||
type SeekableStream struct {
|
||||
FileStream
|
||||
Link *model.Link
|
||||
// should have one of belows to support rangeRead
|
||||
rangeReadCloser model.RangeReadCloserIF
|
||||
mFile model.File
|
||||
}
|
||||
|
||||
func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error) {
|
||||
if len(fs.Mimetype) == 0 {
|
||||
fs.Mimetype = utils.GetMimeType(fs.Obj.GetName())
|
||||
}
|
||||
ss := SeekableStream{FileStream: fs, Link: link}
|
||||
if ss.Reader != nil {
|
||||
result, ok := ss.Reader.(model.File)
|
||||
if ok {
|
||||
ss.mFile = result
|
||||
ss.Closers.Add(result)
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
if ss.Link != nil {
|
||||
if ss.Link.MFile != nil {
|
||||
ss.mFile = ss.Link.MFile
|
||||
ss.Reader = ss.Link.MFile
|
||||
ss.Closers.Add(ss.Link.MFile)
|
||||
return &ss, nil
|
||||
}
|
||||
|
||||
if ss.Link.RangeReadCloser != nil {
|
||||
ss.rangeReadCloser = ss.Link.RangeReadCloser
|
||||
return &ss, nil
|
||||
}
|
||||
if len(ss.Link.URL) > 0 {
|
||||
rrc, err := GetRangeReadCloserFromLink(ss.GetSize(), link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.rangeReadCloser = rrc
|
||||
return &ss, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
|
||||
//func (ss *SeekableStream) Peek(length int) {
|
||||
//
|
||||
//}
|
||||
|
||||
// RangeRead is not thread-safe, pls use it in single thread only.
|
||||
func (ss *SeekableStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
|
||||
if httpRange.Length == -1 {
|
||||
httpRange.Length = ss.GetSize()
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return io.NewSectionReader(ss.mFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.tmpFile != nil {
|
||||
return io.NewSectionReader(ss.tmpFile, httpRange.Start, httpRange.Length), nil
|
||||
}
|
||||
if ss.rangeReadCloser != nil {
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, httpRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find mFile or rangeReadCloser")
|
||||
}
|
||||
|
||||
//func (f *FileStream) GetReader() io.Reader {
|
||||
// return f.Reader
|
||||
//}
|
||||
|
||||
// only provide Reader as full stream when it's demanded. in rapid-upload, we can skip this to save memory
|
||||
func (ss *SeekableStream) Read(p []byte) (n int, err error) {
|
||||
//f.mu.Lock()
|
||||
|
||||
//f.peekedOnce = true
|
||||
//defer f.mu.Unlock()
|
||||
if ss.Reader == nil {
|
||||
if ss.rangeReadCloser == nil {
|
||||
return 0, fmt.Errorf("illegal seekableStream")
|
||||
}
|
||||
rc, err := ss.rangeReadCloser.RangeRead(ss.Ctx, http_range.Range{Length: -1})
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
ss.Reader = io.NopCloser(rc)
|
||||
ss.Closers.Add(rc)
|
||||
|
||||
}
|
||||
return ss.Reader.Read(p)
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return ss.mFile, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(ss, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(tmpF)
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
f.Reader = r
|
||||
f.tmpFile = r
|
||||
}
|
84
internal/stream/util.go
Normal file
84
internal/stream/util.go
Normal file
@ -0,0 +1,84 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/net"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCloserIF, error) {
|
||||
if len(link.URL) == 0 {
|
||||
return nil, fmt.Errorf("can't create RangeReadCloser since URL is empty in link")
|
||||
}
|
||||
//remoteClosers := utils.EmptyClosers()
|
||||
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
|
||||
if link.Concurrency != 0 || link.PartSize != 0 {
|
||||
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||
down := net.NewDownloader(func(d *net.Downloader) {
|
||||
d.Concurrency = link.Concurrency
|
||||
d.PartSize = link.PartSize
|
||||
})
|
||||
req := &net.HttpRequestParams{
|
||||
URL: link.URL,
|
||||
Range: r,
|
||||
Size: size,
|
||||
HeaderRef: header,
|
||||
}
|
||||
rc, err := down.Download(ctx, req)
|
||||
if err != nil {
|
||||
return nil, errs.NewErr(err, "GetReadCloserFromLink failed")
|
||||
}
|
||||
return rc, nil
|
||||
|
||||
}
|
||||
if len(link.URL) > 0 {
|
||||
response, err := RequestRangedHttp(ctx, link, r.Start, r.Length)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("http request failure,status: %d err:%s", response.StatusCode, err)
|
||||
}
|
||||
if r.Start == 0 && (r.Length == -1 || r.Length == size) || response.StatusCode == http.StatusPartialContent ||
|
||||
checkContentRange(&response.Header, size, r.Start) {
|
||||
return response.Body, nil
|
||||
} else if response.StatusCode == http.StatusOK {
|
||||
log.Warnf("remote http server not supporting range request, expect low perfromace!")
|
||||
readCloser, err := net.GetRangedHttpReader(response.Body, r.Start, r.Length)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readCloser, nil
|
||||
|
||||
}
|
||||
|
||||
return response.Body, nil
|
||||
}
|
||||
|
||||
return nil, errs.NotSupport
|
||||
}
|
||||
resultRangeReadCloser := model.RangeReadCloser{RangeReader: rangeReaderFunc}
|
||||
return &resultRangeReadCloser, nil
|
||||
}
|
||||
|
||||
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||
header := net.ProcessHeader(http.Header{}, link.Header)
|
||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||
|
||||
return net.RequestHttp(ctx, "GET", header, link.URL)
|
||||
}
|
||||
|
||||
// 139 cloud does not properly return 206 http status code, add a hack here
|
||||
func checkContentRange(header *http.Header, size, offset int64) bool {
|
||||
r, err2 := http_range.ParseRange(header.Get("Content-Range"), size)
|
||||
if err2 != nil {
|
||||
log.Warnf("exception trying to parse Content-Range, will ignore,err=%s", err2)
|
||||
}
|
||||
if len(r) == 1 && r[0].Start == offset {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
93
pkg/errgroup/errgroup.go
Normal file
93
pkg/errgroup/errgroup.go
Normal file
@ -0,0 +1,93 @@
|
||||
package errgroup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/avast/retry-go"
|
||||
)
|
||||
|
||||
type token struct{}
|
||||
type Group struct {
|
||||
cancel func(error)
|
||||
ctx context.Context
|
||||
opts []retry.Option
|
||||
|
||||
success uint64
|
||||
|
||||
wg sync.WaitGroup
|
||||
sem chan token
|
||||
}
|
||||
|
||||
func NewGroupWithContext(ctx context.Context, limit int, retryOpts ...retry.Option) (*Group, context.Context) {
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
return (&Group{cancel: cancel, ctx: ctx, opts: append(retryOpts, retry.Context(ctx))}).SetLimit(limit), ctx
|
||||
}
|
||||
|
||||
func (g *Group) done() {
|
||||
if g.sem != nil {
|
||||
<-g.sem
|
||||
}
|
||||
g.wg.Done()
|
||||
atomic.AddUint64(&g.success, 1)
|
||||
}
|
||||
|
||||
func (g *Group) Wait() error {
|
||||
g.wg.Wait()
|
||||
return context.Cause(g.ctx)
|
||||
}
|
||||
|
||||
func (g *Group) Go(f func(ctx context.Context) error) {
|
||||
if g.sem != nil {
|
||||
g.sem <- token{}
|
||||
}
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil {
|
||||
g.cancel(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (g *Group) TryGo(f func(ctx context.Context) error) bool {
|
||||
if g.sem != nil {
|
||||
select {
|
||||
case g.sem <- token{}:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
g.wg.Add(1)
|
||||
go func() {
|
||||
defer g.done()
|
||||
if err := retry.Do(func() error { return f(g.ctx) }, g.opts...); err != nil {
|
||||
g.cancel(err)
|
||||
}
|
||||
}()
|
||||
return true
|
||||
}
|
||||
|
||||
func (g *Group) SetLimit(n int) *Group {
|
||||
if len(g.sem) != 0 {
|
||||
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
|
||||
}
|
||||
if n > 0 {
|
||||
g.sem = make(chan token, n)
|
||||
} else {
|
||||
g.sem = nil
|
||||
}
|
||||
return g
|
||||
}
|
||||
|
||||
func (g *Group) Success() uint64 {
|
||||
return atomic.LoadUint64(&g.success)
|
||||
}
|
||||
|
||||
func (g *Group) Err() error {
|
||||
return context.Cause(g.ctx)
|
||||
}
|
@ -109,21 +109,16 @@ func ParseRange(s string, size int64) ([]Range, error) { // nolint:gocognit
|
||||
|
||||
func (r Range) MimeHeader(contentType string, size int64) textproto.MIMEHeader {
|
||||
return textproto.MIMEHeader{
|
||||
"Content-Range": {r.contentRange(size)},
|
||||
"Content-Range": {r.ContentRange(size)},
|
||||
"Content-Type": {contentType},
|
||||
}
|
||||
}
|
||||
|
||||
// for http response header
|
||||
func (r Range) contentRange(size int64) string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", r.Start, r.Start+r.Length-1, size)
|
||||
}
|
||||
|
||||
// ApplyRangeToHttpHeader for http request header
|
||||
func ApplyRangeToHttpHeader(p Range, headerRef *http.Header) *http.Header {
|
||||
func ApplyRangeToHttpHeader(p Range, headerRef http.Header) http.Header {
|
||||
header := headerRef
|
||||
if header == nil {
|
||||
header = &http.Header{}
|
||||
header = http.Header{}
|
||||
}
|
||||
if p.Start == 0 && p.Length < 0 {
|
||||
header.Del("Range")
|
||||
|
@ -2,7 +2,6 @@ package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"io"
|
||||
"mime"
|
||||
"os"
|
||||
@ -10,6 +9,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -112,7 +113,7 @@ func CreateNestedFile(path string) (*os.File, error) {
|
||||
}
|
||||
|
||||
// CreateTempFile create temp file from io.ReadCloser, and seek to 0
|
||||
func CreateTempFile(r io.ReadCloser, size int64) (*os.File, error) {
|
||||
func CreateTempFile(r io.Reader, size int64) (*os.File, error) {
|
||||
if f, ok := r.(*os.File); ok {
|
||||
return f, nil
|
||||
}
|
||||
@ -127,7 +128,7 @@ func CreateTempFile(r io.ReadCloser, size int64) (*os.File, error) {
|
||||
}
|
||||
if size != 0 && readBytes != size {
|
||||
_ = os.Remove(f.Name())
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %s, expect = %s ", readBytes, size)
|
||||
return nil, errs.NewErr(err, "CreateTempFile failed, incoming stream actual size= %d, expect = %d ", readBytes, size)
|
||||
}
|
||||
_, err = f.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
@ -170,3 +171,10 @@ func GetMimeType(name string) string {
|
||||
}
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
const (
|
||||
KB = 1 << (10 * (iota + 1))
|
||||
MB
|
||||
GB
|
||||
TB
|
||||
)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user