Compare commits
29 Commits
feat/115op
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
3375c26c41 | ||
|
ab68faef44 | ||
|
2e21df0661 | ||
|
af18cb138b | ||
|
31c55a2adf | ||
|
465dd1703d | ||
|
a6304285b6 | ||
|
affd0cecd1 | ||
|
37640221c0 | ||
|
e4bd223d1c | ||
|
0cde4e73d6 | ||
|
7b62dcb88c | ||
|
c38dc6df7c | ||
|
5668e4a4ea | ||
|
1335f80362 | ||
|
704d3854df | ||
|
44cc71d354 | ||
|
9a9aee9ac6 | ||
|
4fcc3a187e | ||
|
10a76c701d | ||
|
6e13923225 | ||
|
32890da29f | ||
|
758554a40f | ||
|
4563aea47e | ||
|
35d6f3b8fc | ||
|
b4e6ab12d9 | ||
|
3499c4db87 | ||
|
d20f41d687 | ||
|
d16ba65f42 |
@ -77,6 +77,7 @@ English | [中文](./README_cn.md) | [日本語](./README_ja.md) | [Contributing
|
|||||||
- [x] [Dropbox](https://www.dropbox.com/)
|
- [x] [Dropbox](https://www.dropbox.com/)
|
||||||
- [x] [FeijiPan](https://www.feijipan.com/)
|
- [x] [FeijiPan](https://www.feijipan.com/)
|
||||||
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
- [x] [dogecloud](https://www.dogecloud.com/product/oss)
|
||||||
|
- [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs)
|
||||||
- [x] Easy to deploy and out-of-the-box
|
- [x] Easy to deploy and out-of-the-box
|
||||||
- [x] File preview (PDF, markdown, code, plain text, ...)
|
- [x] File preview (PDF, markdown, code, plain text, ...)
|
||||||
- [x] Image preview in gallery mode
|
- [x] Image preview in gallery mode
|
||||||
|
@ -12,6 +12,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
_ "github.com/alist-org/alist/v3/drivers"
|
_ "github.com/alist-org/alist/v3/drivers"
|
||||||
|
"github.com/alist-org/alist/v3/internal/bootstrap"
|
||||||
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
"github.com/alist-org/alist/v3/internal/bootstrap/data"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
@ -137,6 +138,7 @@ var LangCmd = &cobra.Command{
|
|||||||
Use: "lang",
|
Use: "lang",
|
||||||
Short: "Generate language json file",
|
Short: "Generate language json file",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
bootstrap.InitConfig()
|
||||||
err := os.MkdirAll("lang", 0777)
|
err := os.MkdirAll("lang", 0777)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
utils.Log.Fatalf("failed create folder: %s", err.Error())
|
||||||
|
299
drivers/115_open/driver.go
Normal file
299
drivers/115_open/driver.go
Normal file
@ -0,0 +1,299 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/cmd/flags"
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Open115 struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
client *sdk.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Init(ctx context.Context) error {
|
||||||
|
d.client = sdk.New(sdk.WithRefreshToken(d.Addition.RefreshToken),
|
||||||
|
sdk.WithAccessToken(d.Addition.AccessToken),
|
||||||
|
sdk.WithOnRefreshToken(func(s1, s2 string) {
|
||||||
|
d.Addition.AccessToken = s1
|
||||||
|
d.Addition.RefreshToken = s2
|
||||||
|
op.MustSaveDriverStorage(d)
|
||||||
|
}))
|
||||||
|
if flags.Debug || flags.Dev {
|
||||||
|
d.client.SetDebug(true)
|
||||||
|
}
|
||||||
|
_, err := d.client.UserInfo(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
var res []model.Obj
|
||||||
|
pageSize := int64(200)
|
||||||
|
offset := int64(0)
|
||||||
|
for {
|
||||||
|
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
|
||||||
|
CID: dir.GetID(),
|
||||||
|
Limit: pageSize,
|
||||||
|
Offset: offset,
|
||||||
|
ASC: d.Addition.OrderDirection == "asc",
|
||||||
|
O: d.Addition.OrderBy,
|
||||||
|
// Cur: 1,
|
||||||
|
ShowDir: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
|
||||||
|
obj := Obj(src)
|
||||||
|
return &obj
|
||||||
|
})...)
|
||||||
|
if len(res) >= int(resp.Count) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset += pageSize
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
var ua string
|
||||||
|
if args.Header != nil {
|
||||||
|
ua = args.Header.Get("User-Agent")
|
||||||
|
}
|
||||||
|
if ua == "" {
|
||||||
|
ua = base.UserAgent
|
||||||
|
}
|
||||||
|
obj, ok := file.(*Obj)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
pc := obj.Pc
|
||||||
|
resp, err := d.client.DownURL(ctx, pc, ua)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
u, ok := resp[obj.GetID()]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("can't get link")
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: u.URL.URL,
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{ua},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
resp, err := d.client.Mkdir(ctx, parentDir.GetID(), dirName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Obj{
|
||||||
|
Fid: resp.FileID,
|
||||||
|
Pid: parentDir.GetID(),
|
||||||
|
Fn: dirName,
|
||||||
|
Fc: "0",
|
||||||
|
Upt: time.Now().Unix(),
|
||||||
|
Uet: time.Now().Unix(),
|
||||||
|
UpPt: time.Now().Unix(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
_, err := d.client.Move(ctx, &sdk.MoveReq{
|
||||||
|
FileIDs: srcObj.GetID(),
|
||||||
|
ToCid: dstDir.GetID(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
FileNma: newName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
obj, ok := srcObj.(*Obj)
|
||||||
|
if ok {
|
||||||
|
obj.Fn = newName
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
_, err := d.client.Copy(ctx, &sdk.CopyReq{
|
||||||
|
PID: dstDir.GetID(),
|
||||||
|
FileID: srcObj.GetID(),
|
||||||
|
NoDupli: "1",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return srcObj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
_obj, ok := obj.(*Obj)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("can't convert obj")
|
||||||
|
}
|
||||||
|
_, err := d.client.DelFile(ctx, &sdk.DelFileReq{
|
||||||
|
FileIDs: _obj.GetID(),
|
||||||
|
ParentID: _obj.Pid,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
tempF, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// cal full sha1
|
||||||
|
sha1, err := utils.HashReader(utils.SHA1, tempF)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// pre 128k sha1
|
||||||
|
sha1128k, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, 128*1024))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 1. Init
|
||||||
|
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||||
|
FileName: file.GetName(),
|
||||||
|
FileSize: file.GetSize(),
|
||||||
|
Target: dstDir.GetID(),
|
||||||
|
FileID: strings.ToUpper(sha1),
|
||||||
|
PreID: strings.ToUpper(sha1128k),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Status == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 2. two way verify
|
||||||
|
if utils.SliceContains([]int{6, 7, 8}, resp.Status) {
|
||||||
|
signCheck := strings.Split(resp.SignCheck, "-") //"sign_check": "2392148-2392298" 取2392148-2392298之间的内容(包含2392148、2392298)的sha1
|
||||||
|
start, err := strconv.ParseInt(signCheck[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
end, err := strconv.ParseInt(signCheck[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(start, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
signVal, err := utils.HashReader(utils.SHA1, io.LimitReader(tempF, end-start+1))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempF.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp, err = d.client.UploadInit(ctx, &sdk.UploadInitReq{
|
||||||
|
FileName: file.GetName(),
|
||||||
|
FileSize: file.GetSize(),
|
||||||
|
Target: dstDir.GetID(),
|
||||||
|
FileID: strings.ToUpper(sha1),
|
||||||
|
PreID: strings.ToUpper(sha1128k),
|
||||||
|
SignKey: resp.SignKey,
|
||||||
|
SignVal: strings.ToUpper(signVal),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if resp.Status == 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// 3. get upload token
|
||||||
|
tokenResp, err := d.client.UploadGetToken(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// 4. upload
|
||||||
|
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// // TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// // TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
// func (d *Open115) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// // TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// // a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// // return errs.NotImplement to use an internal archive tool
|
||||||
|
// return nil, errs.NotImplement
|
||||||
|
// }
|
||||||
|
|
||||||
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Open115)(nil)
|
36
drivers/115_open/meta.go
Normal file
36
drivers/115_open/meta.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
driver.RootID
|
||||||
|
// define other
|
||||||
|
RefreshToken string `json:"refresh_token" required:"true"`
|
||||||
|
OrderBy string `json:"order_by" type:"select" options:"file_name,file_size,user_utime,file_type"`
|
||||||
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc"`
|
||||||
|
AccessToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "115 Open",
|
||||||
|
LocalSort: false,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: false,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Open115{}
|
||||||
|
})
|
||||||
|
}
|
59
drivers/115_open/types.go
Normal file
59
drivers/115_open/types.go
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Obj sdk.GetFilesResp_File
|
||||||
|
|
||||||
|
// Thumb implements model.Thumb.
|
||||||
|
func (o *Obj) Thumb() string {
|
||||||
|
return o.Thumbnail
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateTime implements model.Obj.
|
||||||
|
func (o *Obj) CreateTime() time.Time {
|
||||||
|
return time.Unix(o.UpPt, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHash implements model.Obj.
|
||||||
|
func (o *Obj) GetHash() utils.HashInfo {
|
||||||
|
return utils.NewHashInfo(utils.SHA1, o.Sha1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetID implements model.Obj.
|
||||||
|
func (o *Obj) GetID() string {
|
||||||
|
return o.Fid
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName implements model.Obj.
|
||||||
|
func (o *Obj) GetName() string {
|
||||||
|
return o.Fn
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPath implements model.Obj.
|
||||||
|
func (o *Obj) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSize implements model.Obj.
|
||||||
|
func (o *Obj) GetSize() int64 {
|
||||||
|
return o.FS
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDir implements model.Obj.
|
||||||
|
func (o *Obj) IsDir() bool {
|
||||||
|
return o.Fc == "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime implements model.Obj.
|
||||||
|
func (o *Obj) ModTime() time.Time {
|
||||||
|
return time.Unix(o.Upt, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ model.Obj = (*Obj)(nil)
|
||||||
|
var _ model.Thumb = (*Obj)(nil)
|
140
drivers/115_open/upload.go
Normal file
140
drivers/115_open/upload.go
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
|
sdk "github.com/xhofe/115-sdk-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
func calPartSize(fileSize int64) int64 {
|
||||||
|
var partSize int64 = 20 * utils.MB
|
||||||
|
if fileSize > partSize {
|
||||||
|
if fileSize > 1*utils.TB { // file Size over 1TB
|
||||||
|
partSize = 5 * utils.GB // file part size 5GB
|
||||||
|
} else if fileSize > 768*utils.GB { // over 768GB
|
||||||
|
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
|
||||||
|
} else if fileSize > 512*utils.GB { // over 512GB
|
||||||
|
partSize = 82463373 // ≈ 78.6432MB
|
||||||
|
} else if fileSize > 384*utils.GB { // over 384GB
|
||||||
|
partSize = 54975582 // ≈ 52.4288MB
|
||||||
|
} else if fileSize > 256*utils.GB { // over 256GB
|
||||||
|
partSize = 41231687 // ≈ 39.3216MB
|
||||||
|
} else if fileSize > 128*utils.GB { // over 128GB
|
||||||
|
partSize = 27487791 // ≈ 26.2144MB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return partSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||||
|
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = bucket.PutObject(initResp.Object, tempF,
|
||||||
|
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||||
|
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||||
|
)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// type CallbackResult struct {
|
||||||
|
// State bool `json:"state"`
|
||||||
|
// Code int `json:"code"`
|
||||||
|
// Message string `json:"message"`
|
||||||
|
// Data struct {
|
||||||
|
// PickCode string `json:"pick_code"`
|
||||||
|
// FileName string `json:"file_name"`
|
||||||
|
// FileSize int64 `json:"file_size"`
|
||||||
|
// FileID string `json:"file_id"`
|
||||||
|
// ThumbURL string `json:"thumb_url"`
|
||||||
|
// Sha1 string `json:"sha1"`
|
||||||
|
// Aid int `json:"aid"`
|
||||||
|
// Cid string `json:"cid"`
|
||||||
|
// } `json:"data"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
|
||||||
|
fileSize := stream.GetSize()
|
||||||
|
chunkSize := calPartSize(fileSize)
|
||||||
|
|
||||||
|
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
bucket, err := ossClient.Bucket(initResp.Bucket)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
|
||||||
|
parts := make([]oss.UploadPart, partNum)
|
||||||
|
offset := int64(0)
|
||||||
|
for i := int64(1); i <= partNum; i++ {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
partSize := chunkSize
|
||||||
|
if i == partNum {
|
||||||
|
partSize = fileSize - (i-1)*chunkSize
|
||||||
|
}
|
||||||
|
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
|
||||||
|
err = retry.Do(func() error {
|
||||||
|
_ = rd.Reset()
|
||||||
|
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
|
||||||
|
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parts[i-1] = part
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.DelayType(retry.BackOffDelay),
|
||||||
|
retry.Delay(time.Second))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == partNum {
|
||||||
|
offset = fileSize
|
||||||
|
} else {
|
||||||
|
offset += partSize
|
||||||
|
}
|
||||||
|
up(float64(offset) / float64(fileSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// callbackRespBytes := make([]byte, 1024)
|
||||||
|
_, err = bucket.CompleteMultipartUpload(
|
||||||
|
imur,
|
||||||
|
parts,
|
||||||
|
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
|
||||||
|
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
|
||||||
|
// oss.CallbackResult(&callbackRespBytes),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3
drivers/115_open/util.go
Normal file
3
drivers/115_open/util.go
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
package _115_open
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
@ -3,6 +3,7 @@ package _139
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -740,14 +741,20 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
var reportSize int64
|
||||||
|
if d.ReportRealSize {
|
||||||
|
reportSize = stream.GetSize()
|
||||||
|
} else {
|
||||||
|
reportSize = 0
|
||||||
|
}
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"manualRename": 2,
|
"manualRename": 2,
|
||||||
"operation": 0,
|
"operation": 0,
|
||||||
"fileCount": 1,
|
"fileCount": 1,
|
||||||
"totalSize": 0, // 去除上传大小限制
|
"totalSize": reportSize,
|
||||||
"uploadContentList": []base.Json{{
|
"uploadContentList": []base.Json{{
|
||||||
"contentName": stream.GetName(),
|
"contentName": stream.GetName(),
|
||||||
"contentSize": 0, // 去除上传大小限制
|
"contentSize": reportSize,
|
||||||
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||||
}},
|
}},
|
||||||
"parentCatalogID": dstDir.GetID(),
|
"parentCatalogID": dstDir.GetID(),
|
||||||
@ -765,10 +772,10 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"operation": 0,
|
"operation": 0,
|
||||||
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
"path": path.Join(dstDir.GetPath(), dstDir.GetID()),
|
||||||
"seqNo": random.String(32), //序列号不能为空
|
"seqNo": random.String(32), //序列号不能为空
|
||||||
"totalSize": 0,
|
"totalSize": reportSize,
|
||||||
"uploadContentList": []base.Json{{
|
"uploadContentList": []base.Json{{
|
||||||
"contentName": stream.GetName(),
|
"contentName": stream.GetName(),
|
||||||
"contentSize": 0,
|
"contentSize": reportSize,
|
||||||
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
// "digest": "5a3231986ce7a6b46e408612d385bafa"
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
@ -779,6 +786,9 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if resp.Data.Result.ResultCode != "0" {
|
||||||
|
return fmt.Errorf("get file upload url failed with result code: %s, message: %s", resp.Data.Result.ResultCode, resp.Data.Result.ResultDesc)
|
||||||
|
}
|
||||||
|
|
||||||
// Progress
|
// Progress
|
||||||
p := driver.NewProgress(stream.GetSize(), up)
|
p := driver.NewProgress(stream.GetSize(), up)
|
||||||
@ -820,13 +830,23 @@ func (d *Yun139) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_ = res.Body.Close()
|
|
||||||
log.Debugf("%+v", res)
|
|
||||||
if res.StatusCode != http.StatusOK {
|
if res.StatusCode != http.StatusOK {
|
||||||
|
res.Body.Close()
|
||||||
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
return fmt.Errorf("unexpected status code: %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
|
bodyBytes, err := io.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading response body: %v", err)
|
||||||
|
}
|
||||||
|
var result InterLayerUploadResult
|
||||||
|
err = xml.Unmarshal(bodyBytes, &result)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing XML: %v", err)
|
||||||
|
}
|
||||||
|
if result.ResultCode != 0 {
|
||||||
|
return fmt.Errorf("upload failed with result code: %d, message: %s", result.ResultCode, result.Msg)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
|
@ -12,6 +12,7 @@ type Addition struct {
|
|||||||
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
Type string `json:"type" type:"select" options:"personal_new,family,group,personal" default:"personal_new"`
|
||||||
CloudID string `json:"cloud_id"`
|
CloudID string `json:"cloud_id"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
|
ReportRealSize bool `json:"report_real_size" type:"bool" default:"true" help:"Enable to report the real file size during upload"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -143,6 +143,13 @@ type UploadResp struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InterLayerUploadResult struct {
|
||||||
|
XMLName xml.Name `xml:"result"`
|
||||||
|
Text string `xml:",chardata"`
|
||||||
|
ResultCode int `xml:"resultCode"`
|
||||||
|
Msg string `xml:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
type CloudContent struct {
|
type CloudContent struct {
|
||||||
ContentID string `json:"contentID"`
|
ContentID string `json:"contentID"`
|
||||||
//Modifier string `json:"modifier"`
|
//Modifier string `json:"modifier"`
|
||||||
|
@ -520,9 +520,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
byteData := make([]byte, sliceSize)
|
byteData := make([]byte, sliceSize)
|
||||||
if i == count {
|
if i == count {
|
||||||
byteData = byteData[:lastPartSize]
|
byteData = byteData[:lastPartSize]
|
||||||
@ -541,6 +538,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
|||||||
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer sem.Release(1)
|
defer sem.Release(1)
|
||||||
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -3,6 +3,7 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -126,8 +127,46 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
|
|||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, parentDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot make sub-dir")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be moved")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be moved to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fs.Move(ctx, *srcPath, *dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
reqPath, err := d.getReqPath(ctx, srcObj)
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.Rename(ctx, *reqPath, newName)
|
return fs.Rename(ctx, *reqPath, newName)
|
||||||
}
|
}
|
||||||
@ -137,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be copied")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be copied to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = fs.Copy(ctx, *srcPath, *dstPath)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
reqPath, err := d.getReqPath(ctx, obj)
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, obj, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fs.Remove(ctx, *reqPath)
|
return fs.Remove(ctx, *reqPath)
|
||||||
}
|
}
|
||||||
@ -148,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.PutDirectly(ctx, *reqPath, s)
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be Put")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
reqPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if err == nil {
|
||||||
|
return fs.PutURL(ctx, *reqPath, name, url)
|
||||||
|
}
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot offline download")
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
return meta, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
l, err := d.listArchive(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容?
|
||||||
|
// 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用
|
||||||
|
// 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用
|
||||||
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
|
dsts, ok := d.pathMap[root]
|
||||||
|
if !ok {
|
||||||
|
return nil, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
for _, dst := range dsts {
|
||||||
|
link, err := d.extract(ctx, dst, sub, args)
|
||||||
|
if err == nil {
|
||||||
|
if !args.Redirect && len(link.URL) > 0 {
|
||||||
|
if d.DownloadConcurrency > 0 {
|
||||||
|
link.Concurrency = d.DownloadConcurrency
|
||||||
|
}
|
||||||
|
if d.DownloadPartSize > 0 {
|
||||||
|
link.PartSize = d.DownloadPartSize * utils.KB
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||||
|
if !d.Writable {
|
||||||
|
return errs.PermissionDenied
|
||||||
|
}
|
||||||
|
srcPath, err := d.getReqPath(ctx, srcObj, false)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name files cannot be decompressed")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dstPath, err := d.getReqPath(ctx, dstDir, true)
|
||||||
|
if errs.IsNotImplement(err) {
|
||||||
|
return errors.New("same-name dirs cannot be decompressed to")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*Alias)(nil)
|
var _ driver.Driver = (*Alias)(nil)
|
||||||
|
@ -13,13 +13,14 @@ type Addition struct {
|
|||||||
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
|
||||||
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
|
||||||
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
|
||||||
|
Writable bool `json:"writable" type:"bool" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "Alias",
|
Name: "Alias",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
NoCache: true,
|
NoCache: true,
|
||||||
NoUpload: true,
|
NoUpload: false,
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
ProxyRangeOption: true,
|
ProxyRangeOption: true,
|
||||||
}
|
}
|
||||||
|
@ -3,9 +3,11 @@ package alias
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/fs"
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
@ -125,9 +127,9 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
|
|||||||
return link, err
|
return link, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
|
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) {
|
||||||
root, sub := d.getRootAndPath(obj.GetPath())
|
root, sub := d.getRootAndPath(obj.GetPath())
|
||||||
if sub == "" {
|
if sub == "" && !isParent {
|
||||||
return nil, errs.NotSupport
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
dsts, ok := d.pathMap[root]
|
dsts, ok := d.pathMap[root]
|
||||||
@ -156,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error)
|
|||||||
}
|
}
|
||||||
return reqPath, nil
|
return reqPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
|
||||||
|
ArchiveArgs: args,
|
||||||
|
Refresh: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
|
||||||
|
ArchiveInnerArgs: args,
|
||||||
|
Refresh: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
reqPath := stdpath.Join(dst, sub)
|
||||||
|
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := storage.(driver.ArchiveReader); ok {
|
||||||
|
if _, ok := storage.(*Alias); !ok && !args.Redirect {
|
||||||
|
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||||
|
return link, err
|
||||||
|
}
|
||||||
|
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if common.ShouldProxy(storage, stdpath.Base(sub)) {
|
||||||
|
link := &model.Link{
|
||||||
|
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
|
||||||
|
common.GetApiUrl(args.HttpReq),
|
||||||
|
utils.EncodePath(reqPath, true),
|
||||||
|
utils.EncodePath(args.InnerPath, true),
|
||||||
|
url.QueryEscape(args.Password),
|
||||||
|
sign.SignArchive(reqPath)),
|
||||||
|
}
|
||||||
|
if args.HttpReq != nil && d.ProxyRange {
|
||||||
|
link.RangeReadCloser = common.NoProxyRange
|
||||||
|
}
|
||||||
|
return link, nil
|
||||||
|
}
|
||||||
|
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
|
||||||
|
return link, err
|
||||||
|
}
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
@ -5,12 +5,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional {
|
|||||||
func (d *AListV3) Init(ctx context.Context) error {
|
func (d *AListV3) Init(ctx context.Context) error {
|
||||||
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
|
||||||
var resp common.Resp[MeResp]
|
var resp common.Resp[MeResp]
|
||||||
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// re-get the user info
|
// re-get the user info
|
||||||
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetResult(&resp)
|
req.SetResult(&resp)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if resp.Data.Role == model.GUEST {
|
if resp.Data.Role == model.GUEST {
|
||||||
url := d.Address + "/api/public/settings"
|
u := d.Address + "/api/public/settings"
|
||||||
res, err := base.RestyClient.R().Get(url)
|
res, err := base.RestyClient.R().Get(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
var resp common.Resp[FsListResp]
|
var resp common.Resp[FsListResp]
|
||||||
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(ListReq{
|
req.SetResult(&resp).SetBody(ListReq{
|
||||||
PageReq: model.PageReq{
|
PageReq: model.PageReq{
|
||||||
Page: 1,
|
Page: 1,
|
||||||
@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
userAgent = base.UserAgent
|
userAgent = base.UserAgent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(FsGetReq{
|
req.SetResult(&resp).SetBody(FsGetReq{
|
||||||
Path: file.GetPath(),
|
Path: file.GetPath(),
|
||||||
Password: d.MetaPassword,
|
Password: d.MetaPassword,
|
||||||
@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MkdirOrLinkReq{
|
req.SetBody(MkdirOrLinkReq{
|
||||||
Path: path.Join(parentDir.GetPath(), dirName),
|
Path: path.Join(parentDir.GetPath(), dirName),
|
||||||
})
|
})
|
||||||
@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RenameReq{
|
req.SetBody(RenameReq{
|
||||||
Path: srcObj.GetPath(),
|
Path: srcObj.GetPath(),
|
||||||
Name: newName,
|
Name: newName,
|
||||||
@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(MoveCopyReq{
|
req.SetBody(MoveCopyReq{
|
||||||
SrcDir: path.Dir(srcObj.GetPath()),
|
SrcDir: path.Dir(srcObj.GetPath()),
|
||||||
DstDir: dstDir.GetPath(),
|
DstDir: dstDir.GetPath(),
|
||||||
@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(RemoveReq{
|
req.SetBody(RemoveReq{
|
||||||
Dir: path.Dir(obj.GetPath()),
|
Dir: path.Dir(obj.GetPath()),
|
||||||
Names: []string{obj.GetName()},
|
Names: []string{obj.GetName()},
|
||||||
@ -232,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveMetaResp]
|
||||||
|
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if code == 202 {
|
||||||
|
return nil, errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var tree []model.ObjTree
|
||||||
|
if resp.Data.Content != nil {
|
||||||
|
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
|
||||||
|
for _, content := range resp.Data.Content {
|
||||||
|
tree = append(tree, &content)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &model.ArchiveMetaInfo{
|
||||||
|
Comment: resp.Data.Comment,
|
||||||
|
Encrypted: resp.Data.Encrypted,
|
||||||
|
Tree: tree,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveListResp]
|
||||||
|
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveListReq{
|
||||||
|
ArchiveMetaReq: ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
},
|
||||||
|
PageReq: model.PageReq{
|
||||||
|
Page: 1,
|
||||||
|
PerPage: 0,
|
||||||
|
},
|
||||||
|
InnerPath: args.InnerPath,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if code == 202 {
|
||||||
|
return nil, errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var files []model.Obj
|
||||||
|
for _, f := range resp.Data.Content {
|
||||||
|
file := model.ObjThumb{
|
||||||
|
Object: model.Object{
|
||||||
|
Name: f.Name,
|
||||||
|
Modified: f.Modified,
|
||||||
|
Ctime: f.Created,
|
||||||
|
Size: f.Size,
|
||||||
|
IsFolder: f.IsDir,
|
||||||
|
HashInfo: utils.FromString(f.HashInfo),
|
||||||
|
},
|
||||||
|
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
|
||||||
|
}
|
||||||
|
files = append(files, &file)
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
var resp common.Resp[ArchiveMetaResp]
|
||||||
|
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetResult(&resp).SetBody(ArchiveMetaReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
Password: d.MetaPassword,
|
||||||
|
Path: obj.GetPath(),
|
||||||
|
Refresh: false,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
|
||||||
|
resp.Data.RawURL,
|
||||||
|
utils.EncodePath(args.InnerPath, true),
|
||||||
|
url.QueryEscape(args.Password),
|
||||||
|
resp.Data.Sign),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
|
||||||
|
if !d.ForwardArchiveReq {
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
dir, name := path.Split(srcObj.GetPath())
|
||||||
|
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
|
||||||
|
req.SetBody(DecompressReq{
|
||||||
|
ArchivePass: args.Password,
|
||||||
|
CacheFull: args.CacheFull,
|
||||||
|
DstDir: dstDir.GetPath(),
|
||||||
|
InnerPath: args.InnerPath,
|
||||||
|
Name: []string{name},
|
||||||
|
PutIntoNewDir: args.PutIntoNewDir,
|
||||||
|
SrcDir: dir,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -7,12 +7,13 @@ import (
|
|||||||
|
|
||||||
type Addition struct {
|
type Addition struct {
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
Address string `json:"url" required:"true"`
|
Address string `json:"url" required:"true"`
|
||||||
MetaPassword string `json:"meta_password"`
|
MetaPassword string `json:"meta_password"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
|
||||||
|
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ListReq struct {
|
type ListReq struct {
|
||||||
@ -81,3 +82,89 @@ type MeResp struct {
|
|||||||
SsoId string `json:"sso_id"`
|
SsoId string `json:"sso_id"`
|
||||||
Otp bool `json:"otp"`
|
Otp bool `json:"otp"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ArchiveMetaReq struct {
|
||||||
|
ArchivePass string `json:"archive_pass"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Refresh bool `json:"refresh"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeResp struct {
|
||||||
|
ObjResp
|
||||||
|
Children []TreeResp `json:"children"`
|
||||||
|
hashCache *utils.HashInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetSize() int64 {
|
||||||
|
return t.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetName() string {
|
||||||
|
return t.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) ModTime() time.Time {
|
||||||
|
return t.Modified
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) CreateTime() time.Time {
|
||||||
|
return t.Created
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) IsDir() bool {
|
||||||
|
return t.ObjResp.IsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetHash() utils.HashInfo {
|
||||||
|
return utils.FromString(t.HashInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetID() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetPath() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) GetChildren() []model.ObjTree {
|
||||||
|
ret := make([]model.ObjTree, 0, len(t.Children))
|
||||||
|
for _, child := range t.Children {
|
||||||
|
ret = append(ret, &child)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TreeResp) Thumb() string {
|
||||||
|
return t.ObjResp.Thumb
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveMetaResp struct {
|
||||||
|
Comment string `json:"comment"`
|
||||||
|
Encrypted bool `json:"encrypted"`
|
||||||
|
Content []TreeResp `json:"content"`
|
||||||
|
RawURL string `json:"raw_url"`
|
||||||
|
Sign string `json:"sign"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveListReq struct {
|
||||||
|
model.PageReq
|
||||||
|
ArchiveMetaReq
|
||||||
|
InnerPath string `json:"inner_path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveListResp struct {
|
||||||
|
Content []ObjResp `json:"content"`
|
||||||
|
Total int64 `json:"total"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DecompressReq struct {
|
||||||
|
ArchivePass string `json:"archive_pass"`
|
||||||
|
CacheFull bool `json:"cache_full"`
|
||||||
|
DstDir string `json:"dst_dir"`
|
||||||
|
InnerPath string `json:"inner_path"`
|
||||||
|
Name []string `json:"name"`
|
||||||
|
PutIntoNewDir bool `json:"put_into_new_dir"`
|
||||||
|
SrcDir string `json:"src_dir"`
|
||||||
|
}
|
||||||
|
@ -17,7 +17,7 @@ func (d *AListV3) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var resp common.Resp[LoginResp]
|
var resp common.Resp[LoginResp]
|
||||||
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetResult(&resp).SetBody(base.Json{
|
req.SetResult(&resp).SetBody(base.Json{
|
||||||
"username": d.Username,
|
"username": d.Username,
|
||||||
"password": d.Password,
|
"password": d.Password,
|
||||||
@ -31,7 +31,7 @@ func (d *AListV3) login() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
|
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) {
|
||||||
url := d.Address + "/api" + api
|
url := d.Address + "/api" + api
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeader("Authorization", d.Token)
|
req.SetHeader("Authorization", d.Token)
|
||||||
@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
|
|||||||
}
|
}
|
||||||
res, err := req.Execute(method, url)
|
res, err := req.Execute(method, url)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
code := 0
|
||||||
|
if res != nil {
|
||||||
|
code = res.StatusCode()
|
||||||
|
}
|
||||||
|
return nil, code, err
|
||||||
}
|
}
|
||||||
log.Debugf("[alist_v3] response body: %s", res.String())
|
log.Debugf("[alist_v3] response body: %s", res.String())
|
||||||
if res.StatusCode() >= 400 {
|
if res.StatusCode() >= 400 {
|
||||||
return nil, fmt.Errorf("request failed, status: %s", res.Status())
|
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
|
||||||
}
|
}
|
||||||
code := utils.Json.Get(res.Body(), "code").ToInt()
|
code := utils.Json.Get(res.Body(), "code").ToInt()
|
||||||
if code != 200 {
|
if code != 200 {
|
||||||
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
|
||||||
err = d.login()
|
err = d.login()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, code, err
|
||||||
}
|
}
|
||||||
return d.request(api, method, callback, true)
|
return d.request(api, method, callback, true)
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
|
||||||
}
|
}
|
||||||
return res.Body(), nil
|
return res.Body(), 200, nil
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package drivers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115"
|
_ "github.com/alist-org/alist/v3/drivers/115"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/115_open"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
_ "github.com/alist-org/alist/v3/drivers/115_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123"
|
_ "github.com/alist-org/alist/v3/drivers/123"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
_ "github.com/alist-org/alist/v3/drivers/123_link"
|
||||||
@ -15,12 +16,14 @@ import (
|
|||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_open"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
_ "github.com/alist-org/alist/v3/drivers/aliyundrive_share"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/azure_blob"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_netdisk"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_photo"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
_ "github.com/alist-org/alist/v3/drivers/baidu_share"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
_ "github.com/alist-org/alist/v3/drivers/crypt"
|
||||||
|
_ "github.com/alist-org/alist/v3/drivers/doubao"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
_ "github.com/alist-org/alist/v3/drivers/dropbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
_ "github.com/alist-org/alist/v3/drivers/febbox"
|
||||||
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
_ "github.com/alist-org/alist/v3/drivers/ftp"
|
||||||
|
313
drivers/azure_blob/driver.go
Normal file
313
drivers/azure_blob/driver.go
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
)
|
||||||
|
// Azure Blob Storage based on the blob APIs
|
||||||
|
// Link: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api
|
||||||
|
type AzureBlob struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
client *azblob.Client
|
||||||
|
containerClient *container.Client
|
||||||
|
config driver.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config returns the driver configuration.
|
||||||
|
func (d *AzureBlob) Config() driver.Config {
|
||||||
|
return d.config
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAddition returns additional settings specific to Azure Blob Storage.
|
||||||
|
func (d *AzureBlob) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes the Azure Blob Storage client using shared key authentication.
|
||||||
|
func (d *AzureBlob) Init(ctx context.Context) error {
|
||||||
|
// Validate the endpoint URL
|
||||||
|
accountName := extractAccountName(d.Addition.Endpoint)
|
||||||
|
if !regexp.MustCompile(`^[a-z0-9]+$`).MatchString(accountName) {
|
||||||
|
return fmt.Errorf("invalid storage account name: must be chars of lowercase letters or numbers only")
|
||||||
|
}
|
||||||
|
|
||||||
|
credential, err := azblob.NewSharedKeyCredential(accountName, d.Addition.AccessKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create credential: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if Endpoint is just account name
|
||||||
|
endpoint := d.Addition.Endpoint
|
||||||
|
if accountName == endpoint {
|
||||||
|
endpoint = fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
|
||||||
|
}
|
||||||
|
// Initialize Azure Blob client with retry policy
|
||||||
|
client, err := azblob.NewClientWithSharedKeyCredential(endpoint, credential,
|
||||||
|
&azblob.ClientOptions{ClientOptions: azcore.ClientOptions{
|
||||||
|
Retry: policy.RetryOptions{
|
||||||
|
MaxRetries: MaxRetries,
|
||||||
|
RetryDelay: RetryDelay,
|
||||||
|
},
|
||||||
|
}})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create client: %w", err)
|
||||||
|
}
|
||||||
|
d.client = client
|
||||||
|
|
||||||
|
// Ensure container exists or create it
|
||||||
|
containerName := strings.Trim(d.Addition.ContainerName, "/ \\")
|
||||||
|
if containerName == "" {
|
||||||
|
return fmt.Errorf("container name cannot be empty")
|
||||||
|
}
|
||||||
|
return d.createContainerIfNotExists(ctx, containerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop releases resources associated with the Azure Blob client.
|
||||||
|
func (d *AzureBlob) Drop(ctx context.Context) error {
|
||||||
|
d.client = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List retrieves blobs and directories under the specified path.
|
||||||
|
func (d *AzureBlob) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
prefix := ensureTrailingSlash(dir.GetPath())
|
||||||
|
|
||||||
|
pager := d.containerClient.NewListBlobsHierarchyPager("/", &container.ListBlobsHierarchyOptions{
|
||||||
|
Prefix: &prefix,
|
||||||
|
})
|
||||||
|
|
||||||
|
var objs []model.Obj
|
||||||
|
for pager.More() {
|
||||||
|
page, err := pager.NextPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process directories
|
||||||
|
for _, blobPrefix := range page.Segment.BlobPrefixes {
|
||||||
|
objs = append(objs, &model.Object{
|
||||||
|
Name: path.Base(strings.TrimSuffix(*blobPrefix.Name, "/")),
|
||||||
|
Path: *blobPrefix.Name,
|
||||||
|
Modified: *blobPrefix.Properties.LastModified,
|
||||||
|
Ctime: *blobPrefix.Properties.CreationTime,
|
||||||
|
IsFolder: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process files
|
||||||
|
for _, blob := range page.Segment.BlobItems {
|
||||||
|
if strings.HasSuffix(*blob.Name, "/") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
objs = append(objs, &model.Object{
|
||||||
|
Name: path.Base(*blob.Name),
|
||||||
|
Path: *blob.Name,
|
||||||
|
Size: *blob.Properties.ContentLength,
|
||||||
|
Modified: *blob.Properties.LastModified,
|
||||||
|
Ctime: *blob.Properties.CreationTime,
|
||||||
|
IsFolder: false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return objs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Link generates a temporary SAS URL for accessing a blob.
|
||||||
|
func (d *AzureBlob) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
blobClient := d.containerClient.NewBlobClient(file.GetPath())
|
||||||
|
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
||||||
|
|
||||||
|
sasURL, err := blobClient.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to generate SAS URL: %w", err)
|
||||||
|
}
|
||||||
|
return &model.Link{URL: sasURL}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MakeDir creates a virtual directory by uploading an empty blob as a marker.
|
||||||
|
func (d *AzureBlob) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
dirPath := path.Join(parentDir.GetPath(), dirName)
|
||||||
|
if err := d.mkDir(ctx, dirPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory marker: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dirPath,
|
||||||
|
Name: dirName,
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move relocates an object (file or directory) to a new directory.
|
||||||
|
func (d *AzureBlob) Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
srcPath := srcObj.GetPath()
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
||||||
|
|
||||||
|
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
||||||
|
return nil, fmt.Errorf("move operation failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: srcObj.IsDir(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename changes the name of an existing object.
|
||||||
|
func (d *AzureBlob) Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error) {
|
||||||
|
srcPath := srcObj.GetPath()
|
||||||
|
dstPath := path.Join(path.Dir(srcPath), newName)
|
||||||
|
|
||||||
|
if err := d.moveOrRename(ctx, srcPath, dstPath, srcObj.IsDir(), srcObj.GetSize()); err != nil {
|
||||||
|
return nil, fmt.Errorf("rename operation failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: newName,
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: srcObj.IsDir(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy duplicates an object (file or directory) to a specified destination directory.
|
||||||
|
func (d *AzureBlob) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
dstPath := path.Join(dstDir.GetPath(), srcObj.GetName())
|
||||||
|
|
||||||
|
// Handle directory copying using flat listing
|
||||||
|
if srcObj.IsDir() {
|
||||||
|
srcPrefix := srcObj.GetPath()
|
||||||
|
srcPrefix = ensureTrailingSlash(srcPrefix)
|
||||||
|
|
||||||
|
// Get all blobs under the source directory
|
||||||
|
blobs, err := d.flattenListBlobs(ctx, srcPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list source directory contents: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process each blob - copy to destination
|
||||||
|
for _, blob := range blobs {
|
||||||
|
// Skip the directory marker itself
|
||||||
|
if *blob.Name == srcPrefix {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate relative path from source
|
||||||
|
relPath := strings.TrimPrefix(*blob.Name, srcPrefix)
|
||||||
|
itemDstPath := path.Join(dstPath, relPath)
|
||||||
|
|
||||||
|
if strings.HasSuffix(itemDstPath, "/") || (blob.Metadata["hdi_isfolder"] != nil && *blob.Metadata["hdi_isfolder"] == "true") {
|
||||||
|
// Create directory marker at destination
|
||||||
|
err := d.mkDir(ctx, itemDstPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Copy the blob
|
||||||
|
if err := d.copyFile(ctx, *blob.Name, itemDstPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to copy %s: %w", *blob.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create directory marker at destination if needed
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
err := d.mkDir(ctx, dstPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy a single file
|
||||||
|
if err := d.copyFile(ctx, srcObj.GetPath(), dstPath); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to copy blob: %w", err)
|
||||||
|
}
|
||||||
|
return &model.Object{
|
||||||
|
Path: dstPath,
|
||||||
|
Name: srcObj.GetName(),
|
||||||
|
Size: srcObj.GetSize(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deletes a specified blob or recursively deletes a directory and its contents.
|
||||||
|
func (d *AzureBlob) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
path := obj.GetPath()
|
||||||
|
|
||||||
|
// Handle recursive directory deletion
|
||||||
|
if obj.IsDir() {
|
||||||
|
return d.deleteFolder(ctx, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete single file
|
||||||
|
return d.deleteFile(ctx, path, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put uploads a file stream to Azure Blob Storage with progress tracking.
|
||||||
|
func (d *AzureBlob) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
blobPath := path.Join(dstDir.GetPath(), stream.GetName())
|
||||||
|
blobClient := d.containerClient.NewBlockBlobClient(blobPath)
|
||||||
|
|
||||||
|
// Determine optimal upload options based on file size
|
||||||
|
options := optimizedUploadOptions(stream.GetSize())
|
||||||
|
|
||||||
|
// Track upload progress
|
||||||
|
progressTracker := &progressTracker{
|
||||||
|
total: stream.GetSize(),
|
||||||
|
updateProgress: up,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap stream to handle context cancellation and progress tracking
|
||||||
|
limitedStream := driver.NewLimitedUploadStream(ctx, io.TeeReader(stream, progressTracker))
|
||||||
|
|
||||||
|
// Upload the stream to Azure Blob Storage
|
||||||
|
_, err := blobClient.UploadStream(ctx, limitedStream, options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Object{
|
||||||
|
Path: blobPath,
|
||||||
|
Name: stream.GetName(),
|
||||||
|
Size: stream.GetSize(),
|
||||||
|
Modified: time.Now(),
|
||||||
|
IsFolder: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following methods related to archive handling are not implemented yet.
|
||||||
|
// func (d *AzureBlob) GetArchiveMeta(...) {...}
|
||||||
|
// func (d *AzureBlob) ListArchive(...) {...}
|
||||||
|
// func (d *AzureBlob) Extract(...) {...}
|
||||||
|
// func (d *AzureBlob) ArchiveDecompress(...) {...}
|
||||||
|
|
||||||
|
// Ensure AzureBlob implements the driver.Driver interface.
|
||||||
|
var _ driver.Driver = (*AzureBlob)(nil)
|
27
drivers/azure_blob/meta.go
Normal file
27
drivers/azure_blob/meta.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
Endpoint string `json:"endpoint" required:"true" default:"https://<accountname>.blob.core.windows.net/" help:"e.g. https://accountname.blob.core.windows.net/. The full endpoint URL for Azure Storage, including the unique storage account name (3 ~ 24 numbers and lowercase letters only)."`
|
||||||
|
AccessKey string `json:"access_key" required:"true" help:"The access key for Azure Storage, used for authentication. https://learn.microsoft.com/azure/storage/common/storage-account-keys-manage"`
|
||||||
|
ContainerName string `json:"container_name" required:"true" help:"The name of the container in Azure Storage (created in the Azure portal). https://learn.microsoft.com/azure/storage/blobs/blob-containers-portal"`
|
||||||
|
SignURLExpire int `json:"sign_url_expire" type:"number" default:"4" help:"The expiration time for SAS URLs, in hours."`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Azure Blob Storage",
|
||||||
|
LocalSort: true,
|
||||||
|
CheckStatus: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &AzureBlob{
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
20
drivers/azure_blob/types.go
Normal file
20
drivers/azure_blob/types.go
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import "github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
|
||||||
|
// progressTracker is used to track upload progress
|
||||||
|
type progressTracker struct {
|
||||||
|
total int64
|
||||||
|
current int64
|
||||||
|
updateProgress driver.UpdateProgress
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements io.Writer to track progress
|
||||||
|
func (pt *progressTracker) Write(p []byte) (n int, err error) {
|
||||||
|
n = len(p)
|
||||||
|
pt.current += int64(n)
|
||||||
|
if pt.updateProgress != nil && pt.total > 0 {
|
||||||
|
pt.updateProgress(float64(pt.current) * 100 / float64(pt.total))
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
401
drivers/azure_blob/util.go
Normal file
401
drivers/azure_blob/util.go
Normal file
@ -0,0 +1,401 @@
|
|||||||
|
package azure_blob
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxRetries defines the maximum number of retry attempts for Azure operations
|
||||||
|
MaxRetries = 3
|
||||||
|
// RetryDelay defines the base delay between retries
|
||||||
|
RetryDelay = 3 * time.Second
|
||||||
|
// MaxBatchSize defines the maximum number of operations in a single batch request
|
||||||
|
MaxBatchSize = 128
|
||||||
|
)
|
||||||
|
|
||||||
|
// extractAccountName 从 Azure 存储 Endpoint 中提取账户名
|
||||||
|
func extractAccountName(endpoint string) string {
|
||||||
|
// 移除协议前缀
|
||||||
|
endpoint = strings.TrimPrefix(endpoint, "https://")
|
||||||
|
endpoint = strings.TrimPrefix(endpoint, "http://")
|
||||||
|
|
||||||
|
// 获取第一个点之前的部分(即账户名)
|
||||||
|
parts := strings.Split(endpoint, ".")
|
||||||
|
if len(parts) > 0 {
|
||||||
|
// to lower case
|
||||||
|
return strings.ToLower(parts[0])
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNotFoundError checks if the error is a "not found" type error
|
||||||
|
func isNotFoundError(err error) bool {
|
||||||
|
var storageErr *azcore.ResponseError
|
||||||
|
if errors.As(err, &storageErr) {
|
||||||
|
return storageErr.StatusCode == 404
|
||||||
|
}
|
||||||
|
// Fallback to string matching for backwards compatibility
|
||||||
|
return err != nil && strings.Contains(err.Error(), "BlobNotFound")
|
||||||
|
}
|
||||||
|
|
||||||
|
// flattenListBlobs - Optimize blob listing to handle pagination better
|
||||||
|
func (d *AzureBlob) flattenListBlobs(ctx context.Context, prefix string) ([]container.BlobItem, error) {
|
||||||
|
// Standardize prefix format
|
||||||
|
prefix = ensureTrailingSlash(prefix)
|
||||||
|
|
||||||
|
var blobItems []container.BlobItem
|
||||||
|
pager := d.containerClient.NewListBlobsFlatPager(&container.ListBlobsFlatOptions{
|
||||||
|
Prefix: &prefix,
|
||||||
|
Include: container.ListBlobsInclude{
|
||||||
|
Metadata: true,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
for pager.More() {
|
||||||
|
page, err := pager.NextPage(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, blob := range page.Segment.BlobItems {
|
||||||
|
blobItems = append(blobItems, *blob)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return blobItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchDeleteBlobs - Simplify batch deletion logic
|
||||||
|
func (d *AzureBlob) batchDeleteBlobs(ctx context.Context, blobPaths []string) error {
|
||||||
|
if len(blobPaths) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process in batches of MaxBatchSize
|
||||||
|
for i := 0; i < len(blobPaths); i += MaxBatchSize {
|
||||||
|
end := min(i+MaxBatchSize, len(blobPaths))
|
||||||
|
currentBatch := blobPaths[i:end]
|
||||||
|
|
||||||
|
// Create batch builder
|
||||||
|
batchBuilder, err := d.containerClient.NewBatchBuilder()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create batch builder: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add delete operations
|
||||||
|
for _, blobPath := range currentBatch {
|
||||||
|
if err := batchBuilder.Delete(blobPath, nil); err != nil {
|
||||||
|
return fmt.Errorf("failed to add delete operation for %s: %w", blobPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Submit batch
|
||||||
|
responses, err := d.containerClient.SubmitBatch(ctx, batchBuilder, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("batch delete request failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check responses
|
||||||
|
for _, resp := range responses.Responses {
|
||||||
|
if resp.Error != nil && !isNotFoundError(resp.Error) {
|
||||||
|
// 获取 blob 名称以提供更好的错误信息
|
||||||
|
blobName := "unknown"
|
||||||
|
if resp.BlobName != nil {
|
||||||
|
blobName = *resp.BlobName
|
||||||
|
}
|
||||||
|
return fmt.Errorf("failed to delete blob %s: %v", blobName, resp.Error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFolder recursively deletes a directory and all its contents
|
||||||
|
func (d *AzureBlob) deleteFolder(ctx context.Context, prefix string) error {
|
||||||
|
// Ensure directory path ends with slash
|
||||||
|
prefix = ensureTrailingSlash(prefix)
|
||||||
|
|
||||||
|
// Get all blobs under the directory using flattenListBlobs
|
||||||
|
globs, err := d.flattenListBlobs(ctx, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list blobs for deletion: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are blobs in the directory, delete them
|
||||||
|
if len(globs) > 0 {
|
||||||
|
// 分离文件和目录标记
|
||||||
|
var filePaths []string
|
||||||
|
var dirPaths []string
|
||||||
|
|
||||||
|
for _, blob := range globs {
|
||||||
|
blobName := *blob.Name
|
||||||
|
if isDirectory(blob) {
|
||||||
|
// remove trailing slash for directory names
|
||||||
|
dirPaths = append(dirPaths, strings.TrimSuffix(blobName, "/"))
|
||||||
|
} else {
|
||||||
|
filePaths = append(filePaths, blobName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 先删除文件,再删除目录
|
||||||
|
if len(filePaths) > 0 {
|
||||||
|
if err := d.batchDeleteBlobs(ctx, filePaths); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dirPaths) > 0 {
|
||||||
|
// 按路径深度分组
|
||||||
|
depthMap := make(map[int][]string)
|
||||||
|
for _, dir := range dirPaths {
|
||||||
|
depth := strings.Count(dir, "/") // 计算目录深度
|
||||||
|
depthMap[depth] = append(depthMap[depth], dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 按深度从大到小排序
|
||||||
|
var depths []int
|
||||||
|
for depth := range depthMap {
|
||||||
|
depths = append(depths, depth)
|
||||||
|
}
|
||||||
|
sort.Sort(sort.Reverse(sort.IntSlice(depths)))
|
||||||
|
|
||||||
|
// 按深度逐层批量删除
|
||||||
|
for _, depth := range depths {
|
||||||
|
batch := depthMap[depth]
|
||||||
|
if err := d.batchDeleteBlobs(ctx, batch); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 最后删除目录标记本身
|
||||||
|
return d.deleteEmptyDirectory(ctx, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteFile deletes a single file or blob with better error handling
|
||||||
|
func (d *AzureBlob) deleteFile(ctx context.Context, path string, isDir bool) error {
|
||||||
|
blobClient := d.containerClient.NewBlobClient(path)
|
||||||
|
_, err := blobClient.Delete(ctx, nil)
|
||||||
|
if err != nil && !(isDir && isNotFoundError(err)) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyFile copies a single blob from source path to destination path
|
||||||
|
func (d *AzureBlob) copyFile(ctx context.Context, srcPath, dstPath string) error {
|
||||||
|
srcBlob := d.containerClient.NewBlobClient(srcPath)
|
||||||
|
dstBlob := d.containerClient.NewBlobClient(dstPath)
|
||||||
|
|
||||||
|
// Use configured expiration time for SAS URL
|
||||||
|
expireDuration := time.Hour * time.Duration(d.SignURLExpire)
|
||||||
|
srcURL, err := srcBlob.GetSASURL(sas.BlobPermissions{Read: true}, time.Now().Add(expireDuration), nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to generate source SAS URL: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = dstBlob.StartCopyFromURL(ctx, srcURL, nil)
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// createContainerIfNotExists - Create container if not exists
|
||||||
|
// Clean up commented code
|
||||||
|
func (d *AzureBlob) createContainerIfNotExists(ctx context.Context, containerName string) error {
|
||||||
|
serviceClient := d.client.ServiceClient()
|
||||||
|
containerClient := serviceClient.NewContainerClient(containerName)
|
||||||
|
|
||||||
|
var options = service.CreateContainerOptions{}
|
||||||
|
_, err := containerClient.Create(ctx, &options)
|
||||||
|
if err != nil {
|
||||||
|
var responseErr *azcore.ResponseError
|
||||||
|
if errors.As(err, &responseErr) && responseErr.ErrorCode != "ContainerAlreadyExists" {
|
||||||
|
return fmt.Errorf("failed to create or access container [%s]: %w", containerName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
d.containerClient = containerClient
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// mkDir creates a virtual directory marker by uploading an empty blob with metadata.
|
||||||
|
func (d *AzureBlob) mkDir(ctx context.Context, fullDirName string) error {
|
||||||
|
dirPath := ensureTrailingSlash(fullDirName)
|
||||||
|
blobClient := d.containerClient.NewBlockBlobClient(dirPath)
|
||||||
|
|
||||||
|
// Upload an empty blob with metadata indicating it's a directory
|
||||||
|
_, err := blobClient.Upload(ctx, struct {
|
||||||
|
*bytes.Reader
|
||||||
|
io.Closer
|
||||||
|
}{
|
||||||
|
Reader: bytes.NewReader([]byte{}),
|
||||||
|
Closer: io.NopCloser(nil),
|
||||||
|
}, &blockblob.UploadOptions{
|
||||||
|
Metadata: map[string]*string{
|
||||||
|
"hdi_isfolder": to.Ptr("true"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureTrailingSlash ensures the provided path ends with a trailing slash.
|
||||||
|
func ensureTrailingSlash(path string) string {
|
||||||
|
if !strings.HasSuffix(path, "/") {
|
||||||
|
return path + "/"
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// moveOrRename moves or renames blobs or directories from source to destination.
|
||||||
|
func (d *AzureBlob) moveOrRename(ctx context.Context, srcPath, dstPath string, isDir bool, srcSize int64) error {
|
||||||
|
if isDir {
|
||||||
|
// Normalize paths for directory operations
|
||||||
|
srcPath = ensureTrailingSlash(srcPath)
|
||||||
|
dstPath = ensureTrailingSlash(dstPath)
|
||||||
|
|
||||||
|
// List all blobs under the source directory
|
||||||
|
blobs, err := d.flattenListBlobs(ctx, srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list blobs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate and copy each blob to the destination
|
||||||
|
for _, item := range blobs {
|
||||||
|
srcBlobName := *item.Name
|
||||||
|
relPath := strings.TrimPrefix(srcBlobName, srcPath)
|
||||||
|
itemDstPath := path.Join(dstPath, relPath)
|
||||||
|
|
||||||
|
if isDirectory(item) {
|
||||||
|
// Create directory marker at destination
|
||||||
|
if err := d.mkDir(ctx, itemDstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory marker [%s]: %w", itemDstPath, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Copy file blob to destination
|
||||||
|
if err := d.copyFile(ctx, srcBlobName, itemDstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to copy blob [%s]: %w", srcBlobName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle empty directories by creating a marker at destination
|
||||||
|
if len(blobs) == 0 {
|
||||||
|
if err := d.mkDir(ctx, dstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to create directory [%s]: %w", dstPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete source directory and its contents
|
||||||
|
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
||||||
|
log.Warnf("failed to delete source directory [%s]: %v\n, and try again", srcPath, err)
|
||||||
|
// Retry deletion once more and ignore the result
|
||||||
|
if err := d.deleteFolder(ctx, srcPath); err != nil {
|
||||||
|
log.Errorf("Retry deletion of source directory [%s] failed: %v", srcPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single file move or rename operation
|
||||||
|
if err := d.copyFile(ctx, srcPath, dstPath); err != nil {
|
||||||
|
return fmt.Errorf("failed to copy file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete source file after successful copy
|
||||||
|
if err := d.deleteFile(ctx, srcPath, false); err != nil {
|
||||||
|
log.Errorf("Error deleting source file [%s]: %v", srcPath, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// optimizedUploadOptions returns the optimal upload options based on file size
|
||||||
|
func optimizedUploadOptions(fileSize int64) *azblob.UploadStreamOptions {
|
||||||
|
options := &azblob.UploadStreamOptions{
|
||||||
|
BlockSize: 4 * 1024 * 1024, // 4MB block size
|
||||||
|
Concurrency: 4, // Default concurrency
|
||||||
|
}
|
||||||
|
|
||||||
|
// For large files, increase block size and concurrency
|
||||||
|
if fileSize > 256*1024*1024 { // For files larger than 256MB
|
||||||
|
options.BlockSize = 8 * 1024 * 1024 // 8MB blocks
|
||||||
|
options.Concurrency = 8 // More concurrent uploads
|
||||||
|
}
|
||||||
|
|
||||||
|
// For very large files (>1GB)
|
||||||
|
if fileSize > 1024*1024*1024 {
|
||||||
|
options.BlockSize = 16 * 1024 * 1024 // 16MB blocks
|
||||||
|
options.Concurrency = 16 // Higher concurrency
|
||||||
|
}
|
||||||
|
|
||||||
|
return options
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDirectory determines if a blob represents a directory
|
||||||
|
// Checks multiple indicators: path suffix, metadata, and content type
|
||||||
|
func isDirectory(blob container.BlobItem) bool {
|
||||||
|
// Check path suffix
|
||||||
|
if strings.HasSuffix(*blob.Name, "/") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check metadata for directory marker
|
||||||
|
if blob.Metadata != nil {
|
||||||
|
if val, ok := blob.Metadata["hdi_isfolder"]; ok && val != nil && *val == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Azure Storage Explorer and other tools may use different metadata keys
|
||||||
|
if val, ok := blob.Metadata["is_directory"]; ok && val != nil && strings.ToLower(*val) == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check content type (some tools mark directories with specific content types)
|
||||||
|
if blob.Properties != nil && blob.Properties.ContentType != nil {
|
||||||
|
contentType := strings.ToLower(*blob.Properties.ContentType)
|
||||||
|
if blob.Properties.ContentLength != nil && *blob.Properties.ContentLength == 0 && (contentType == "application/directory" || contentType == "directory") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteEmptyDirectory deletes a directory only if it's empty
|
||||||
|
func (d *AzureBlob) deleteEmptyDirectory(ctx context.Context, dirPath string) error {
|
||||||
|
// Directory is empty, delete the directory marker
|
||||||
|
blobClient := d.containerClient.NewBlobClient(strings.TrimSuffix(dirPath, "/"))
|
||||||
|
_, err := blobClient.Delete(ctx, nil)
|
||||||
|
|
||||||
|
// Also try deleting with trailing slash (for different directory marker formats)
|
||||||
|
if err != nil && isNotFoundError(err) {
|
||||||
|
blobClient = d.containerClient.NewBlobClient(dirPath)
|
||||||
|
_, err = blobClient.Delete(ctx, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore not found errors
|
||||||
|
if err != nil && isNotFoundError(err) {
|
||||||
|
log.Infof("Directory [%s] not found during deletion: %v", dirPath, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/errgroup"
|
"github.com/alist-org/alist/v3/pkg/errgroup"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
"github.com/avast/retry-go"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -77,6 +78,8 @@ func (d *BaiduNetdisk) List(ctx context.Context, dir model.Obj, args model.ListA
|
|||||||
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
if d.DownloadAPI == "crack" {
|
if d.DownloadAPI == "crack" {
|
||||||
return d.linkCrack(file, args)
|
return d.linkCrack(file, args)
|
||||||
|
} else if d.DownloadAPI == "crack_video" {
|
||||||
|
return d.linkCrackVideo(file, args)
|
||||||
}
|
}
|
||||||
return d.linkOfficial(file, args)
|
return d.linkOfficial(file, args)
|
||||||
}
|
}
|
||||||
@ -260,21 +263,24 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// step.2 上传分片
|
// step.2 上传分片
|
||||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread)
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
|
||||||
|
retry.Attempts(1),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
sem := semaphore.NewWeighted(3)
|
sem := semaphore.NewWeighted(3)
|
||||||
for i, partseq := range precreateResp.BlockList {
|
for i, partseq := range precreateResp.BlockList {
|
||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
|
||||||
if partseq+1 == count {
|
if partseq+1 == count {
|
||||||
byteSize = lastBlockSize
|
byteSize = lastBlockSize
|
||||||
}
|
}
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer sem.Release(1)
|
defer sem.Release(1)
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
|
@ -10,7 +10,7 @@ type Addition struct {
|
|||||||
driver.RootPath
|
driver.RootPath
|
||||||
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
OrderBy string `json:"order_by" type:"select" options:"name,time,size" default:"name"`
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"`
|
||||||
DownloadAPI string `json:"download_api" type:"select" options:"official,crack" default:"official"`
|
DownloadAPI string `json:"download_api" type:"select" options:"official,crack,crack_video" default:"official"`
|
||||||
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
ClientID string `json:"client_id" required:"true" default:"iYCeC9g08h5vuP9UqvPHKKSVrKFXGa1v"`
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
ClientSecret string `json:"client_secret" required:"true" default:"jXiFMOPVPCWlO2M5CwWQzffpNPaGTRBG"`
|
||||||
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
CustomCrackUA string `json:"custom_crack_ua" required:"true" default:"netdisk"`
|
||||||
@ -19,6 +19,7 @@ type Addition struct {
|
|||||||
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
UploadAPI string `json:"upload_api" default:"https://d.pcs.baidu.com"`
|
||||||
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
CustomUploadPartSize int64 `json:"custom_upload_part_size" type:"number" default:"0" help:"0 for auto"`
|
||||||
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
LowBandwithUploadMode bool `json:"low_bandwith_upload_mode" default:"false"`
|
||||||
|
OnlyListVideoFile bool `json:"only_list_video_file" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
|
@ -17,7 +17,7 @@ type TokenErrResp struct {
|
|||||||
type File struct {
|
type File struct {
|
||||||
//TkbindId int `json:"tkbind_id"`
|
//TkbindId int `json:"tkbind_id"`
|
||||||
//OwnerType int `json:"owner_type"`
|
//OwnerType int `json:"owner_type"`
|
||||||
//Category int `json:"category"`
|
Category int `json:"category"`
|
||||||
//RealCategory string `json:"real_category"`
|
//RealCategory string `json:"real_category"`
|
||||||
FsId int64 `json:"fs_id"`
|
FsId int64 `json:"fs_id"`
|
||||||
//OperId int `json:"oper_id"`
|
//OperId int `json:"oper_id"`
|
||||||
|
@ -79,6 +79,12 @@ func (d *BaiduNetdisk) request(furl string, method string, callback base.ReqCall
|
|||||||
return retry.Unrecoverable(err2)
|
return retry.Unrecoverable(err2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if 31023 == errno && d.DownloadAPI == "crack_video" {
|
||||||
|
result = res.Body()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
return fmt.Errorf("req: [%s] ,errno: %d, refer to https://pan.baidu.com/union/doc/", furl, errno)
|
||||||
}
|
}
|
||||||
result = res.Body()
|
result = res.Body()
|
||||||
@ -131,7 +137,16 @@ func (d *BaiduNetdisk) getFiles(dir string) ([]File, error) {
|
|||||||
if len(resp.List) == 0 {
|
if len(resp.List) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
res = append(res, resp.List...)
|
|
||||||
|
if d.OnlyListVideoFile {
|
||||||
|
for _, file := range resp.List {
|
||||||
|
if file.Isdir == 1 || file.Category == 1 {
|
||||||
|
res = append(res, file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
res = append(res, resp.List...)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
@ -187,6 +202,34 @@ func (d *BaiduNetdisk) linkCrack(file model.Obj, _ model.LinkArgs) (*model.Link,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *BaiduNetdisk) linkCrackVideo(file model.Obj, _ model.LinkArgs) (*model.Link, error) {
|
||||||
|
param := map[string]string{
|
||||||
|
"type": "VideoURL",
|
||||||
|
"path": fmt.Sprintf("%s", file.GetPath()),
|
||||||
|
"fs_id": file.GetID(),
|
||||||
|
"devuid": "0%1",
|
||||||
|
"clienttype": "1",
|
||||||
|
"channel": "android_15_25010PN30C_bd-netdisk_1523a",
|
||||||
|
"nom3u8": "1",
|
||||||
|
"dlink": "1",
|
||||||
|
"media": "1",
|
||||||
|
"origin": "dlna",
|
||||||
|
}
|
||||||
|
resp, err := d.request("https://pan.baidu.com/api/mediainfo", http.MethodGet, func(req *resty.Request) {
|
||||||
|
req.SetQueryParams(param)
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &model.Link{
|
||||||
|
URL: utils.Json.Get(resp, "info", "dlink").ToString(),
|
||||||
|
Header: http.Header{
|
||||||
|
"User-Agent": []string{d.CustomCrackUA},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
|
||||||
params := map[string]string{
|
params := map[string]string{
|
||||||
"method": "filemanager",
|
"method": "filemanager",
|
||||||
|
@ -321,9 +321,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
|
||||||
if partseq+1 == count {
|
if partseq+1 == count {
|
||||||
@ -331,6 +328,9 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
}
|
}
|
||||||
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer sem.Release(1)
|
defer sem.Release(1)
|
||||||
uploadParams := map[string]string{
|
uploadParams := map[string]string{
|
||||||
"method": "upload",
|
"method": "upload",
|
||||||
|
@ -1,13 +1,10 @@
|
|||||||
package cloudreve
|
package cloudreve
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@ -165,42 +162,18 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
|||||||
switch r.Policy.Type {
|
switch r.Policy.Type {
|
||||||
case "onedrive":
|
case "onedrive":
|
||||||
err = d.upOneDrive(ctx, stream, u, up)
|
err = d.upOneDrive(ctx, stream, u, up)
|
||||||
|
case "s3":
|
||||||
|
err = d.upS3(ctx, stream, u, up)
|
||||||
case "remote": // 从机存储
|
case "remote": // 从机存储
|
||||||
err = d.upRemote(ctx, stream, u, up)
|
err = d.upRemote(ctx, stream, u, up)
|
||||||
case "local": // 本机存储
|
case "local": // 本机存储
|
||||||
var chunkSize = u.ChunkSize
|
err = d.upLocal(ctx, stream, u, up)
|
||||||
var buf []byte
|
|
||||||
var chunk int
|
|
||||||
for {
|
|
||||||
var n int
|
|
||||||
buf = make([]byte, chunkSize)
|
|
||||||
n, err = io.ReadAtLeast(stream, buf, chunkSize)
|
|
||||||
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
buf = buf[:n]
|
|
||||||
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
|
||||||
req.SetHeader("Content-Type", "application/octet-stream")
|
|
||||||
req.SetHeader("Content-Length", strconv.Itoa(n))
|
|
||||||
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)))
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
chunk++
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
err = errs.NotImplement
|
err = errs.NotImplement
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// 删除失败的会话
|
// 删除失败的会话
|
||||||
err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -21,11 +21,12 @@ type Policy struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type UploadInfo struct {
|
type UploadInfo struct {
|
||||||
SessionID string `json:"sessionID"`
|
SessionID string `json:"sessionID"`
|
||||||
ChunkSize int `json:"chunkSize"`
|
ChunkSize int `json:"chunkSize"`
|
||||||
Expires int `json:"expires"`
|
Expires int `json:"expires"`
|
||||||
UploadURLs []string `json:"uploadURLs"`
|
UploadURLs []string `json:"uploadURLs"`
|
||||||
Credential string `json:"credential,omitempty"`
|
Credential string `json:"credential,omitempty"` // local
|
||||||
|
CompleteURL string `json:"completeURL,omitempty"` // s3
|
||||||
}
|
}
|
||||||
|
|
||||||
type DirectoryResp struct {
|
type DirectoryResp struct {
|
||||||
|
@ -27,17 +27,20 @@ import (
|
|||||||
|
|
||||||
const loginPath = "/user/session"
|
const loginPath = "/user/session"
|
||||||
|
|
||||||
|
func (d *Cloudreve) getUA() string {
|
||||||
|
if d.CustomUA != "" {
|
||||||
|
return d.CustomUA
|
||||||
|
}
|
||||||
|
return base.UserAgent
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
|
||||||
u := d.Address + "/api/v3" + path
|
u := d.Address + "/api/v3" + path
|
||||||
ua := d.CustomUA
|
|
||||||
if ua == "" {
|
|
||||||
ua = base.UserAgent
|
|
||||||
}
|
|
||||||
req := base.RestyClient.R()
|
req := base.RestyClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "application/json, text/plain, */*",
|
"Accept": "application/json, text/plain, */*",
|
||||||
"User-Agent": ua,
|
"User-Agent": d.getUA(),
|
||||||
})
|
})
|
||||||
|
|
||||||
var r Resp
|
var r Resp
|
||||||
@ -161,15 +164,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
if !d.Addition.EnableThumbAndFolderSize {
|
if !d.Addition.EnableThumbAndFolderSize {
|
||||||
return model.Thumbnail{}, nil
|
return model.Thumbnail{}, nil
|
||||||
}
|
}
|
||||||
ua := d.CustomUA
|
|
||||||
if ua == "" {
|
|
||||||
ua = base.UserAgent
|
|
||||||
}
|
|
||||||
req := base.NoRedirectClient.R()
|
req := base.NoRedirectClient.R()
|
||||||
req.SetHeaders(map[string]string{
|
req.SetHeaders(map[string]string{
|
||||||
"Cookie": "cloudreve-session=" + d.Cookie,
|
"Cookie": "cloudreve-session=" + d.Cookie,
|
||||||
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
|
||||||
"User-Agent": ua,
|
"User-Agent": d.getUA(),
|
||||||
})
|
})
|
||||||
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -180,6 +179,43 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
for finish < stream.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
|
||||||
|
var byteSize = DEFAULT
|
||||||
|
left := stream.GetSize() - finish
|
||||||
|
if left < DEFAULT {
|
||||||
|
byteSize = left
|
||||||
|
}
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(stream, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
|
||||||
|
req.SetHeader("Content-Type", "application/octet-stream")
|
||||||
|
req.SetContentLength(true)
|
||||||
|
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
|
||||||
|
req.SetHeader("User-Agent", d.getUA())
|
||||||
|
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
|
}, nil)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
finish += byteSize
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
uploadUrl := u.UploadURLs[0]
|
uploadUrl := u.UploadURLs[0]
|
||||||
credential := u.Credential
|
credential := u.Credential
|
||||||
@ -211,6 +247,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
|
|||||||
req.ContentLength = byteSize
|
req.ContentLength = byteSize
|
||||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
req.Header.Set("Authorization", fmt.Sprint(credential))
|
req.Header.Set("Authorization", fmt.Sprint(credential))
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
finish += byteSize
|
finish += byteSize
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -251,6 +288,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
|||||||
req.ContentLength = byteSize
|
req.ContentLength = byteSize
|
||||||
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
|
||||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
finish += byteSize
|
finish += byteSize
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -274,3 +312,82 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Cloudreve) upS3(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
|
||||||
|
var finish int64 = 0
|
||||||
|
var chunk int = 0
|
||||||
|
var etags []string
|
||||||
|
DEFAULT := int64(u.ChunkSize)
|
||||||
|
for finish < stream.GetSize() {
|
||||||
|
if utils.IsCanceled(ctx) {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
utils.Log.Debugf("[Cloudreve-S3] upload: %d", finish)
|
||||||
|
var byteSize = DEFAULT
|
||||||
|
left := stream.GetSize() - finish
|
||||||
|
if left < DEFAULT {
|
||||||
|
byteSize = left
|
||||||
|
}
|
||||||
|
byteData := make([]byte, byteSize)
|
||||||
|
n, err := io.ReadFull(stream, byteData)
|
||||||
|
utils.Log.Debug(err, n)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("PUT", u.UploadURLs[chunk],
|
||||||
|
driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
req.ContentLength = byteSize
|
||||||
|
finish += byteSize
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_ = res.Body.Close()
|
||||||
|
etags = append(etags, res.Header.Get("ETag"))
|
||||||
|
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||||
|
chunk++
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3LikeFinishUpload
|
||||||
|
// https://github.com/cloudreve/frontend/blob/b485bf297974cbe4834d2e8e744ae7b7e5b2ad39/src/component/Uploader/core/api/index.ts#L204-L252
|
||||||
|
bodyBuilder := &strings.Builder{}
|
||||||
|
bodyBuilder.WriteString("<CompleteMultipartUpload>")
|
||||||
|
for i, etag := range etags {
|
||||||
|
bodyBuilder.WriteString(fmt.Sprintf(
|
||||||
|
`<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>`,
|
||||||
|
i+1, // PartNumber 从 1 开始
|
||||||
|
etag,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
bodyBuilder.WriteString("</CompleteMultipartUpload>")
|
||||||
|
req, err := http.NewRequest(
|
||||||
|
"POST",
|
||||||
|
u.CompleteURL,
|
||||||
|
strings.NewReader(bodyBuilder.String()),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req.Header.Set("Content-Type", "application/xml")
|
||||||
|
req.Header.Set("User-Agent", d.getUA())
|
||||||
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(res.Body)
|
||||||
|
return fmt.Errorf("up status: %d, error: %s", res.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
// 上传成功发送回调请求
|
||||||
|
err = d.request(http.MethodGet, "/callback/s3/"+u.SessionID, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
174
drivers/doubao/driver.go
Normal file
174
drivers/doubao/driver.go
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/go-resty/resty/v2"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Doubao struct {
|
||||||
|
model.Storage
|
||||||
|
Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Config() driver.Config {
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) GetAddition() driver.Additional {
|
||||||
|
return &d.Addition
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Init(ctx context.Context) error {
|
||||||
|
// TODO login / refresh token
|
||||||
|
//op.MustSaveDriverStorage(d)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Drop(ctx context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
|
var files []model.Obj
|
||||||
|
var r NodeInfoResp
|
||||||
|
_, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_id": dir.GetID(),
|
||||||
|
"need_full_path": false,
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, child := range r.Data.Children {
|
||||||
|
files = append(files, &Object{
|
||||||
|
Object: model.Object{
|
||||||
|
ID: child.ID,
|
||||||
|
Path: child.ParentID,
|
||||||
|
Name: child.Name,
|
||||||
|
Size: child.Size,
|
||||||
|
Modified: time.Unix(child.UpdateTime, 0),
|
||||||
|
Ctime: time.Unix(child.CreateTime, 0),
|
||||||
|
IsFolder: child.NodeType == 1,
|
||||||
|
},
|
||||||
|
Key: child.Key,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
|
if u, ok := file.(*Object); ok {
|
||||||
|
var r GetFileUrlResp
|
||||||
|
_, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"uris": []string{u.Key},
|
||||||
|
"type": "file",
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &model.Link{
|
||||||
|
URL: r.Data.FileUrls[0].MainURL,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
return nil, errors.New("can't convert obj to URL")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
var r UploadNodeResp
|
||||||
|
_, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_list": []base.Json{
|
||||||
|
{
|
||||||
|
"local_id": uuid.New().String(),
|
||||||
|
"name": dirName,
|
||||||
|
"parent_id": parentDir.GetID(),
|
||||||
|
"node_type": 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
var r UploadNodeResp
|
||||||
|
_, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_list": []base.Json{
|
||||||
|
{"id": srcObj.GetID()},
|
||||||
|
},
|
||||||
|
"current_parent_id": srcObj.GetPath(),
|
||||||
|
"target_parent_id": dstDir.GetID(),
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
var r BaseResp
|
||||||
|
_, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{
|
||||||
|
"node_id": srcObj.GetID(),
|
||||||
|
"node_name": newName,
|
||||||
|
})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
|
||||||
|
// TODO copy obj, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
|
var r BaseResp
|
||||||
|
_, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) {
|
||||||
|
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
|
||||||
|
}, &r)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
|
// TODO upload file, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// return errs.NotImplement to use an internal archive tool
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
// return nil, errs.NotSupport
|
||||||
|
//}
|
||||||
|
|
||||||
|
var _ driver.Driver = (*Doubao)(nil)
|
34
drivers/doubao/meta.go
Normal file
34
drivers/doubao/meta.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Addition struct {
|
||||||
|
// Usually one of two
|
||||||
|
// driver.RootPath
|
||||||
|
driver.RootID
|
||||||
|
// define other
|
||||||
|
Cookie string `json:"cookie" type:"text"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var config = driver.Config{
|
||||||
|
Name: "Doubao",
|
||||||
|
LocalSort: true,
|
||||||
|
OnlyLocal: false,
|
||||||
|
OnlyProxy: false,
|
||||||
|
NoCache: false,
|
||||||
|
NoUpload: true,
|
||||||
|
NeedMs: false,
|
||||||
|
DefaultRoot: "0",
|
||||||
|
CheckStatus: false,
|
||||||
|
Alert: "",
|
||||||
|
NoOverwriteUpload: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
op.RegisterDriver(func() driver.Driver {
|
||||||
|
return &Doubao{}
|
||||||
|
})
|
||||||
|
}
|
64
drivers/doubao/types.go
Normal file
64
drivers/doubao/types.go
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import "github.com/alist-org/alist/v3/internal/model"
|
||||||
|
|
||||||
|
type BaseResp struct {
|
||||||
|
Code int `json:"code"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeInfoResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
NodeInfo NodeInfo `json:"node_info"`
|
||||||
|
Children []NodeInfo `json:"children"`
|
||||||
|
NextCursor string `json:"next_cursor"`
|
||||||
|
HasMore bool `json:"has_more"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NodeInfo struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Source int `json:"source"`
|
||||||
|
NameReviewStatus int `json:"name_review_status"`
|
||||||
|
ContentReviewStatus int `json:"content_review_status"`
|
||||||
|
RiskReviewStatus int `json:"risk_review_status"`
|
||||||
|
ConversationID string `json:"conversation_id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
CreateTime int64 `json:"create_time"`
|
||||||
|
UpdateTime int64 `json:"update_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GetFileUrlResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
FileUrls []struct {
|
||||||
|
URI string `json:"uri"`
|
||||||
|
MainURL string `json:"main_url"`
|
||||||
|
BackURL string `json:"back_url"`
|
||||||
|
} `json:"file_urls"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UploadNodeResp struct {
|
||||||
|
BaseResp
|
||||||
|
Data struct {
|
||||||
|
NodeList []struct {
|
||||||
|
LocalID string `json:"local_id"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Key string `json:"key"`
|
||||||
|
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
|
||||||
|
} `json:"node_list"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Object struct {
|
||||||
|
model.Object
|
||||||
|
Key string
|
||||||
|
}
|
38
drivers/doubao/util.go
Normal file
38
drivers/doubao/util.go
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package doubao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// do others that not defined in Driver interface
|
||||||
|
func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
||||||
|
url := "https://www.doubao.com" + path
|
||||||
|
req := base.RestyClient.R()
|
||||||
|
req.SetHeader("Cookie", d.Cookie)
|
||||||
|
if callback != nil {
|
||||||
|
callback(req)
|
||||||
|
}
|
||||||
|
var r BaseResp
|
||||||
|
req.SetResult(&r)
|
||||||
|
res, err := req.Execute(method, url)
|
||||||
|
log.Debugln(res.String())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 业务状态码检查(优先于HTTP状态码)
|
||||||
|
if r.Code != 0 {
|
||||||
|
return res.Body(), errors.New(r.Msg)
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
err = utils.Json.Unmarshal(res.Body(), resp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res.Body(), nil
|
||||||
|
}
|
@ -4,13 +4,13 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
shell "github.com/ipfs/go-ipfs-api"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
shell "github.com/ipfs/go-ipfs-api"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type IPFS struct {
|
type IPFS struct {
|
||||||
@ -44,27 +44,32 @@ func (d *IPFS) Drop(ctx context.Context) error {
|
|||||||
|
|
||||||
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
||||||
path := dir.GetPath()
|
path := dir.GetPath()
|
||||||
if path[len(path):] != "/" {
|
switch d.Mode {
|
||||||
path += "/"
|
case "ipfs":
|
||||||
|
path, _ = url.JoinPath("/ipfs", path)
|
||||||
|
case "ipns":
|
||||||
|
path, _ = url.JoinPath("/ipns", path)
|
||||||
|
case "mfs":
|
||||||
|
fileStat, err := d.sh.FilesStat(ctx, path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
path, _ = url.JoinPath("/ipfs", fileStat.Hash)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("mode error")
|
||||||
}
|
}
|
||||||
|
|
||||||
path_cid, err := d.sh.FilesStat(ctx, path)
|
dirs, err := d.sh.List(path)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dirs, err := d.sh.List(path_cid.Hash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
objlist := []model.Obj{}
|
objlist := []model.Obj{}
|
||||||
for _, file := range dirs {
|
for _, file := range dirs {
|
||||||
gateurl := *d.gateURL
|
gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash)
|
||||||
gateurl.Path = "ipfs/" + file.Hash
|
|
||||||
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
|
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
|
||||||
objlist = append(objlist, &model.ObjectURL{
|
objlist = append(objlist, &model.ObjectURL{
|
||||||
Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
|
||||||
Url: model.Url{Url: gateurl.String()},
|
Url: model.Url{Url: gateurl.String()},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -73,11 +78,15 @@ func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName())
|
gateurl := d.gateURL.JoinPath(file.GetID())
|
||||||
return &model.Link{URL: link}, nil
|
gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName())
|
||||||
|
return &model.Link{URL: gateurl.String()}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
||||||
|
if d.Mode != "mfs" {
|
||||||
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
path := parentDir.GetPath()
|
path := parentDir.GetPath()
|
||||||
if path[len(path):] != "/" {
|
if path[len(path):] != "/" {
|
||||||
path += "/"
|
path += "/"
|
||||||
@ -86,42 +95,48 @@ func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
if d.Mode != "mfs" {
|
||||||
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
||||||
|
if d.Mode != "mfs" {
|
||||||
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
|
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
|
||||||
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
// TODO copy obj, optional
|
if d.Mode != "mfs" {
|
||||||
fmt.Println(srcObj.GetPath())
|
return fmt.Errorf("only write in mfs mode")
|
||||||
fmt.Println(dstDir.GetPath())
|
}
|
||||||
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
|
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
|
||||||
fmt.Println(newFileName)
|
|
||||||
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
||||||
// TODO remove obj, optional
|
if d.Mode != "mfs" {
|
||||||
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// TODO upload file, optional
|
if d.Mode != "mfs" {
|
||||||
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
return fmt.Errorf("only write in mfs mode")
|
||||||
|
}
|
||||||
|
outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
|
||||||
Reader: s,
|
Reader: s,
|
||||||
UpdateProgress: up,
|
UpdateProgress: up,
|
||||||
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
}))
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
|
|
||||||
func ToFiles(dstDir string) shell.AddOpts {
|
|
||||||
return func(rb *shell.RequestBuilder) error {
|
|
||||||
rb.Option("to-files", dstDir)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/"))
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
@ -8,14 +8,16 @@ import (
|
|||||||
type Addition struct {
|
type Addition struct {
|
||||||
// Usually one of two
|
// Usually one of two
|
||||||
driver.RootPath
|
driver.RootPath
|
||||||
|
Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"`
|
||||||
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
|
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
|
||||||
Gateway string `json:"gateway" default:"https://ipfs.io"`
|
Gateway string `json:"gateway" default:"http://127.0.0.1:8080"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var config = driver.Config{
|
var config = driver.Config{
|
||||||
Name: "IPFS API",
|
Name: "IPFS API",
|
||||||
DefaultRoot: "/",
|
DefaultRoot: "/",
|
||||||
LocalSort: true,
|
LocalSort: true,
|
||||||
|
OnlyProxy: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -315,9 +315,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
if utils.IsCanceled(upCtx) {
|
if utils.IsCanceled(upCtx) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err = sem.Acquire(ctx, 1); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
i, part, byteSize := i, part, initUpdload.PartSize
|
i, part, byteSize := i, part, initUpdload.PartSize
|
||||||
if part.PartNumber == uploadPartData.PartTotal {
|
if part.PartNumber == uploadPartData.PartTotal {
|
||||||
byteSize = initUpdload.LastPartSize
|
byteSize = initUpdload.LastPartSize
|
||||||
@ -325,6 +322,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
|
|
||||||
// step.4
|
// step.4
|
||||||
threadG.Go(func(ctx context.Context) error {
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
if err = sem.Acquire(ctx, 1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer sem.Release(1)
|
defer sem.Release(1)
|
||||||
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
|
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
|
||||||
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
|
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))
|
||||||
|
@ -69,7 +69,7 @@ func (d *PikPak) Init(ctx context.Context) (err error) {
|
|||||||
d.ClientVersion = PCClientVersion
|
d.ClientVersion = PCClientVersion
|
||||||
d.PackageName = PCPackageName
|
d.PackageName = PCPackageName
|
||||||
d.Algorithms = PCAlgorithms
|
d.Algorithms = PCAlgorithms
|
||||||
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
|
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.Addition.CaptchaToken != "" && d.Addition.RefreshToken == "" {
|
if d.Addition.CaptchaToken != "" && d.Addition.RefreshToken == "" {
|
||||||
|
@ -28,34 +28,32 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var AndroidAlgorithms = []string{
|
var AndroidAlgorithms = []string{
|
||||||
"7xOq4Z8s",
|
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
|
||||||
"QE9/9+IQco",
|
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
|
||||||
"WdX5J9CPLZp",
|
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
|
||||||
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
|
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
|
||||||
"cc44M+l7GDhav",
|
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
|
||||||
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
|
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
|
||||||
"wla81BUVSmDkctHDpUT",
|
"z5jUTBSIpBN9g4qSJGlidNAutX6",
|
||||||
"c6wMr1sm1WxiR3i8LDAm3W",
|
"KJE2oveZ34du/g1tiimm",
|
||||||
"hRLrEQCFNYi0PFPV",
|
|
||||||
"o1J41zIraDtJPNuhBu7Ifb/q3",
|
|
||||||
"U",
|
|
||||||
"RrbZvV0CTu3gaZJ56PVKki4IeP",
|
|
||||||
"NNuRbLckJqUp1Do0YlrKCUP",
|
|
||||||
"UUwnBbipMTvInA0U0E9",
|
|
||||||
"VzGc",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var WebAlgorithms = []string{
|
var WebAlgorithms = []string{
|
||||||
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
|
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||||
"uSUX02HYJ1IkyLdhINEFcCf7l2",
|
"+r6CQVxjzJV6LCV",
|
||||||
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
|
"F",
|
||||||
"3binT1s/5a1pu3fGsN",
|
"pFJRC",
|
||||||
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
|
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||||
"DYS3StqnAEKdGddRP8CJrxUSFh",
|
"/750aCr4lm/Sly/c",
|
||||||
"crquW+4",
|
"RB+DT/gZCrbV",
|
||||||
"ryKqvW9B9hly+JAymXCIfag5Z",
|
"",
|
||||||
"Hr08T/NDTX1oSJfHk90c",
|
"CyLsf7hdkIRxRm215hl",
|
||||||
"i",
|
"7xHvLi2tOYP0Y92b",
|
||||||
|
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||||
|
"1UI3",
|
||||||
|
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||||
|
"ihtqpG6FMt65+Xk+tWUH2",
|
||||||
|
"NhXXU9rg4XXdzo7u5o",
|
||||||
}
|
}
|
||||||
|
|
||||||
var PCAlgorithms = []string{
|
var PCAlgorithms = []string{
|
||||||
@ -80,17 +78,17 @@ const (
|
|||||||
const (
|
const (
|
||||||
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
||||||
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||||
AndroidClientVersion = "1.49.3"
|
AndroidClientVersion = "1.53.2"
|
||||||
AndroidPackageName = "com.pikcloud.pikpak"
|
AndroidPackageName = "com.pikcloud.pikpak"
|
||||||
AndroidSdkVersion = "2.0.4.204101"
|
AndroidSdkVersion = "2.0.6.206003"
|
||||||
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
||||||
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||||
WebClientVersion = "undefined"
|
WebClientVersion = "2.0.0"
|
||||||
WebPackageName = "drive.mypikpak.com"
|
WebPackageName = "drive.mypikpak.com"
|
||||||
WebSdkVersion = "8.0.3"
|
WebSdkVersion = "8.0.3"
|
||||||
PCClientID = "YvtoWO6GNHiuCl7x"
|
PCClientID = "YvtoWO6GNHiuCl7x"
|
||||||
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
||||||
PCClientVersion = "undefined" // 2.5.6.4831
|
PCClientVersion = "undefined" // 2.6.11.4955
|
||||||
PCPackageName = "mypikpak.com"
|
PCPackageName = "mypikpak.com"
|
||||||
PCSdkVersion = "8.0.3"
|
PCSdkVersion = "8.0.3"
|
||||||
)
|
)
|
||||||
|
@ -66,7 +66,7 @@ func (d *PikPakShare) Init(ctx context.Context) error {
|
|||||||
d.ClientVersion = PCClientVersion
|
d.ClientVersion = PCClientVersion
|
||||||
d.PackageName = PCPackageName
|
d.PackageName = PCPackageName
|
||||||
d.Algorithms = PCAlgorithms
|
d.Algorithms = PCAlgorithms
|
||||||
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.5.6.4831 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
|
d.UserAgent = "MainWindow Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) PikPak/2.6.11.4955 Chrome/100.0.4896.160 Electron/18.3.15 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取CaptchaToken
|
// 获取CaptchaToken
|
||||||
|
@ -17,34 +17,32 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var AndroidAlgorithms = []string{
|
var AndroidAlgorithms = []string{
|
||||||
"7xOq4Z8s",
|
"SOP04dGzk0TNO7t7t9ekDbAmx+eq0OI1ovEx",
|
||||||
"QE9/9+IQco",
|
"nVBjhYiND4hZ2NCGyV5beamIr7k6ifAsAbl",
|
||||||
"WdX5J9CPLZp",
|
"Ddjpt5B/Cit6EDq2a6cXgxY9lkEIOw4yC1GDF28KrA",
|
||||||
"NmQ5qFAXqH3w984cYhMeC5TJR8j",
|
"VVCogcmSNIVvgV6U+AochorydiSymi68YVNGiz",
|
||||||
"cc44M+l7GDhav",
|
"u5ujk5sM62gpJOsB/1Gu/zsfgfZO",
|
||||||
"KxGjo/wHB+Yx8Lf7kMP+/m9I+",
|
"dXYIiBOAHZgzSruaQ2Nhrqc2im",
|
||||||
"wla81BUVSmDkctHDpUT",
|
"z5jUTBSIpBN9g4qSJGlidNAutX6",
|
||||||
"c6wMr1sm1WxiR3i8LDAm3W",
|
"KJE2oveZ34du/g1tiimm",
|
||||||
"hRLrEQCFNYi0PFPV",
|
|
||||||
"o1J41zIraDtJPNuhBu7Ifb/q3",
|
|
||||||
"U",
|
|
||||||
"RrbZvV0CTu3gaZJ56PVKki4IeP",
|
|
||||||
"NNuRbLckJqUp1Do0YlrKCUP",
|
|
||||||
"UUwnBbipMTvInA0U0E9",
|
|
||||||
"VzGc",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var WebAlgorithms = []string{
|
var WebAlgorithms = []string{
|
||||||
"fyZ4+p77W1U4zcWBUwefAIFhFxvADWtT1wzolCxhg9q7etmGUjXr",
|
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||||
"uSUX02HYJ1IkyLdhINEFcCf7l2",
|
"+r6CQVxjzJV6LCV",
|
||||||
"iWt97bqD/qvjIaPXB2Ja5rsBWtQtBZZmaHH2rMR41",
|
"F",
|
||||||
"3binT1s/5a1pu3fGsN",
|
"pFJRC",
|
||||||
"8YCCU+AIr7pg+yd7CkQEY16lDMwi8Rh4WNp5",
|
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||||
"DYS3StqnAEKdGddRP8CJrxUSFh",
|
"/750aCr4lm/Sly/c",
|
||||||
"crquW+4",
|
"RB+DT/gZCrbV",
|
||||||
"ryKqvW9B9hly+JAymXCIfag5Z",
|
"",
|
||||||
"Hr08T/NDTX1oSJfHk90c",
|
"CyLsf7hdkIRxRm215hl",
|
||||||
"i",
|
"7xHvLi2tOYP0Y92b",
|
||||||
|
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||||
|
"1UI3",
|
||||||
|
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||||
|
"ihtqpG6FMt65+Xk+tWUH2",
|
||||||
|
"NhXXU9rg4XXdzo7u5o",
|
||||||
}
|
}
|
||||||
|
|
||||||
var PCAlgorithms = []string{
|
var PCAlgorithms = []string{
|
||||||
@ -63,17 +61,17 @@ var PCAlgorithms = []string{
|
|||||||
const (
|
const (
|
||||||
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
AndroidClientID = "YNxT9w7GMdWvEOKa"
|
||||||
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
AndroidClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||||
AndroidClientVersion = "1.49.3"
|
AndroidClientVersion = "1.53.2"
|
||||||
AndroidPackageName = "com.pikcloud.pikpak"
|
AndroidPackageName = "com.pikcloud.pikpak"
|
||||||
AndroidSdkVersion = "2.0.4.204101"
|
AndroidSdkVersion = "2.0.6.206003"
|
||||||
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
WebClientID = "YUMx5nI8ZU8Ap8pm"
|
||||||
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
WebClientSecret = "dbw2OtmVEeuUvIptb1Coyg"
|
||||||
WebClientVersion = "undefined"
|
WebClientVersion = "2.0.0"
|
||||||
WebPackageName = "drive.mypikpak.com"
|
WebPackageName = "drive.mypikpak.com"
|
||||||
WebSdkVersion = "8.0.3"
|
WebSdkVersion = "8.0.3"
|
||||||
PCClientID = "YvtoWO6GNHiuCl7x"
|
PCClientID = "YvtoWO6GNHiuCl7x"
|
||||||
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
PCClientSecret = "1NIH5R1IEe2pAxZE3hv3uA"
|
||||||
PCClientVersion = "undefined" // 2.5.6.4831
|
PCClientVersion = "undefined" // 2.6.11.4955
|
||||||
PCPackageName = "mypikpak.com"
|
PCPackageName = "mypikpak.com"
|
||||||
PCSdkVersion = "8.0.3"
|
PCSdkVersion = "8.0.3"
|
||||||
)
|
)
|
||||||
|
@ -74,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
|||||||
"Referer": []string{d.conf.referer},
|
"Referer": []string{d.conf.referer},
|
||||||
"User-Agent": []string{ua},
|
"User-Agent": []string{ua},
|
||||||
},
|
},
|
||||||
Concurrency: 2,
|
Concurrency: 3,
|
||||||
PartSize: 10 * utils.MB,
|
PartSize: 10 * utils.MB,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -125,7 +125,6 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
||||||
files := &model.Link{}
|
|
||||||
var fileLink FileLink
|
var fileLink FileLink
|
||||||
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
|
_, err := d.request(ctx, "/file", "GET", func(req *resty.Request) {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
@ -139,8 +138,12 @@ func (d *QuarkUCTV) Link(ctx context.Context, file model.Obj, args model.LinkArg
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
files.URL = fileLink.Data.DownloadURL
|
|
||||||
return files, nil
|
return &model.Link{
|
||||||
|
URL: fileLink.Data.DownloadURL,
|
||||||
|
Concurrency: 3,
|
||||||
|
PartSize: 10 * utils.MB,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
func (d *QuarkUCTV) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
|
||||||
|
@ -316,7 +316,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||||||
// if the file already exists in Quqi server, there is no need to actually upload it
|
// if the file already exists in Quqi server, there is no need to actually upload it
|
||||||
if uploadInitResp.Data.Exist {
|
if uploadInitResp.Data.Exist {
|
||||||
// the file name returned by Quqi does not include the extension name
|
// the file name returned by Quqi does not include the extension name
|
||||||
nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName())
|
nodeName, nodeExt := uploadInitResp.Data.NodeName, utils.Ext(stream.GetName())
|
||||||
if nodeExt != "" {
|
if nodeExt != "" {
|
||||||
nodeName = nodeName + "." + nodeExt
|
nodeName = nodeName + "." + nodeExt
|
||||||
}
|
}
|
||||||
@ -432,7 +432,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// the file name returned by Quqi does not include the extension name
|
// the file name returned by Quqi does not include the extension name
|
||||||
nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName())
|
nodeName, nodeExt := uploadFinishResp.Data.NodeName, utils.Ext(stream.GetName())
|
||||||
if nodeExt != "" {
|
if nodeExt != "" {
|
||||||
nodeName = nodeName + "." + nodeExt
|
nodeName = nodeName + "." + nodeExt
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -115,16 +114,6 @@ func (d *Quqi) checkLogin() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// rawExt 保留扩展名大小写
|
|
||||||
func rawExt(name string) string {
|
|
||||||
ext := stdpath.Ext(name)
|
|
||||||
if strings.HasPrefix(ext, ".") {
|
|
||||||
ext = ext[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
return ext
|
|
||||||
}
|
|
||||||
|
|
||||||
// decryptKey 获取密码
|
// decryptKey 获取密码
|
||||||
func decryptKey(encodeKey string) []byte {
|
func decryptKey(encodeKey string) []byte {
|
||||||
// 移除非法字符
|
// 移除非法字符
|
||||||
|
@ -8,9 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -151,7 +149,7 @@ func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) (
|
|||||||
form := base.Json{
|
form := base.Json{
|
||||||
"signature": signature,
|
"signature": signature,
|
||||||
"videoName": stream.GetName(),
|
"videoName": stream.GetName(),
|
||||||
"videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""),
|
"videoType": utils.Ext(stream.GetName()),
|
||||||
"videoSize": stream.GetSize(),
|
"videoSize": stream.GetSize(),
|
||||||
}
|
}
|
||||||
var resps RspApplyUploadUGC
|
var resps RspApplyUploadUGC
|
||||||
|
36
go.mod
36
go.mod
@ -1,8 +1,6 @@
|
|||||||
module github.com/alist-org/alist/v3
|
module github.com/alist-org/alist/v3
|
||||||
|
|
||||||
go 1.23
|
go 1.23.4
|
||||||
|
|
||||||
toolchain go1.23.1
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
github.com/KirCute/ftpserverlib-pasvportmap v1.25.0
|
||||||
@ -67,10 +65,10 @@ require (
|
|||||||
github.com/xhofe/wopan-sdk-go v0.1.3
|
github.com/xhofe/wopan-sdk-go v0.1.3
|
||||||
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
github.com/yeka/zip v0.0.0-20231116150916-03d6312748a9
|
||||||
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
github.com/zzzhr1990/go-common-entity v0.0.0-20221216044934-fd1c571e3a22
|
||||||
golang.org/x/crypto v0.31.0
|
golang.org/x/crypto v0.36.0
|
||||||
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
|
golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e
|
||||||
golang.org/x/image v0.19.0
|
golang.org/x/image v0.19.0
|
||||||
golang.org/x/net v0.28.0
|
golang.org/x/net v0.37.0
|
||||||
golang.org/x/oauth2 v0.22.0
|
golang.org/x/oauth2 v0.22.0
|
||||||
golang.org/x/time v0.8.0
|
golang.org/x/time v0.8.0
|
||||||
google.golang.org/appengine v1.6.8
|
google.golang.org/appengine v1.6.8
|
||||||
@ -81,13 +79,19 @@ require (
|
|||||||
gorm.io/gorm v1.25.11
|
gorm.io/gorm v1.25.11
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
||||||
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||||
github.com/aymerick/douceur v0.2.0 // indirect
|
github.com/aymerick/douceur v0.2.0 // indirect
|
||||||
github.com/blevesearch/go-faiss v1.0.20 // indirect
|
github.com/blevesearch/go-faiss v1.0.20 // indirect
|
||||||
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
|
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
|
||||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
github.com/bodgit/sevenzip v1.6.0
|
||||||
github.com/bodgit/windows v1.0.1 // indirect
|
github.com/bodgit/windows v1.0.1 // indirect
|
||||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||||
github.com/charmbracelet/x/ansi v0.2.3 // indirect
|
github.com/charmbracelet/x/ansi v0.2.3 // indirect
|
||||||
@ -107,14 +111,16 @@ require (
|
|||||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
|
||||||
github.com/microcosm-cc/bluemonday v1.0.27
|
github.com/microcosm-cc/bluemonday v1.0.27
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78
|
||||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||||
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
|
||||||
github.com/therootcompany/xz v1.0.1 // indirect
|
github.com/therootcompany/xz v1.0.1 // indirect
|
||||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||||
github.com/yuin/goldmark v1.7.8
|
github.com/xhofe/115-sdk-go v0.1.5
|
||||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
github.com/yuin/goldmark v1.7.8
|
||||||
|
go4.org v0.0.0-20230225012048-214862532bf5
|
||||||
|
resty.dev/v3 v3.0.0-beta.2 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@ -246,10 +252,10 @@ require (
|
|||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
go.etcd.io/bbolt v1.3.8 // indirect
|
go.etcd.io/bbolt v1.3.8 // indirect
|
||||||
golang.org/x/arch v0.8.0 // indirect
|
golang.org/x/arch v0.8.0 // indirect
|
||||||
golang.org/x/sync v0.10.0
|
golang.org/x/sync v0.12.0
|
||||||
golang.org/x/sys v0.28.0 // indirect
|
golang.org/x/sys v0.31.0 // indirect
|
||||||
golang.org/x/term v0.27.0 // indirect
|
golang.org/x/term v0.30.0 // indirect
|
||||||
golang.org/x/text v0.21.0
|
golang.org/x/text v0.23.0
|
||||||
golang.org/x/tools v0.24.0 // indirect
|
golang.org/x/tools v0.24.0 // indirect
|
||||||
google.golang.org/api v0.169.0 // indirect
|
google.golang.org/api v0.169.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect
|
||||||
@ -261,3 +267,5 @@ require (
|
|||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
lukechampine.com/blake3 v1.1.7 // indirect
|
lukechampine.com/blake3 v1.1.7 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// replace github.com/xhofe/115-sdk-go => ../../xhofe/115-sdk-go
|
||||||
|
34
go.sum
34
go.sum
@ -19,6 +19,12 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+
|
|||||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||||
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
@ -606,6 +612,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo
|
|||||||
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
|
github.com/xhofe/115-sdk-go v0.1.5 h1:2+E92l6AX0+ABAkrdmDa9PE5ONN7wVLCaKkK80zETOg=
|
||||||
|
github.com/xhofe/115-sdk-go v0.1.5/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
|
||||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
|
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
|
||||||
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
|
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
|
||||||
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=
|
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=
|
||||||
@ -663,8 +671,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
|||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
@ -731,8 +739,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
|||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -752,8 +760,8 @@ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -793,8 +801,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|||||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
@ -807,8 +815,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
|||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||||
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
|
||||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -825,8 +833,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
|||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
@ -953,6 +961,8 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
|
|||||||
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
|
lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0=
|
||||||
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
|
||||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||||
|
resty.dev/v3 v3.0.0-beta.2 h1:xu4mGAdbCLuc3kbk7eddWfWm4JfhwDtdapwss5nCjnQ=
|
||||||
|
resty.dev/v3 v3.0.0-beta.2/go.mod h1:OgkqiPvTDtOuV4MGZuUDhwOpkY8enjOsjjMzeOHefy4=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
|
@ -3,5 +3,7 @@ package archive
|
|||||||
import (
|
import (
|
||||||
_ "github.com/alist-org/alist/v3/internal/archive/archives"
|
_ "github.com/alist-org/alist/v3/internal/archive/archives"
|
||||||
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
|
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
|
||||||
|
_ "github.com/alist-org/alist/v3/internal/archive/rardecode"
|
||||||
|
_ "github.com/alist-org/alist/v3/internal/archive/sevenzip"
|
||||||
_ "github.com/alist-org/alist/v3/internal/archive/zip"
|
_ "github.com/alist-org/alist/v3/internal/archive/zip"
|
||||||
)
|
)
|
||||||
|
@ -16,14 +16,18 @@ import (
|
|||||||
type Archives struct {
|
type Archives struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Archives) AcceptedExtensions() []string {
|
func (Archives) AcceptedExtensions() []string {
|
||||||
return []string{
|
return []string{
|
||||||
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z",
|
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
fsys, err := getFs(ss, args)
|
return map[string]tool.MultipartExtension{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
fsys, err := getFs(ss[0], args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -47,8 +51,8 @@ func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (mod
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -69,8 +73,8 @@ func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
@ -85,8 +89,8 @@ func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs)
|
|||||||
return file, stat.Size(), nil
|
return file, stat.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
fsys, err := getFs(ss[0], args.ArchiveArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -133,5 +137,5 @@ func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args m
|
|||||||
var _ tool.Tool = (*Archives)(nil)
|
var _ tool.Tool = (*Archives)(nil)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
tool.RegisterTool(&Archives{})
|
tool.RegisterTool(Archives{})
|
||||||
}
|
}
|
||||||
|
@ -14,19 +14,23 @@ import (
|
|||||||
type ISO9660 struct {
|
type ISO9660 struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ISO9660) AcceptedExtensions() []string {
|
func (ISO9660) AcceptedExtensions() []string {
|
||||||
return []string{".iso"}
|
return []string{".iso"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
|
return map[string]tool.MultipartExtension{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
return &model.ArchiveMetaInfo{
|
return &model.ArchiveMetaInfo{
|
||||||
Comment: "",
|
Comment: "",
|
||||||
Encrypted: false,
|
Encrypted: false,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
img, err := getImage(ss)
|
img, err := getImage(ss[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -48,8 +52,8 @@ func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (
|
|||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
img, err := getImage(ss)
|
img, err := getImage(ss[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
@ -63,8 +67,8 @@ func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs
|
|||||||
return io.NopCloser(obj.Reader()), obj.Size(), nil
|
return io.NopCloser(obj.Reader()), obj.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
img, err := getImage(ss)
|
img, err := getImage(ss[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -92,5 +96,5 @@ func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args
|
|||||||
var _ tool.Tool = (*ISO9660)(nil)
|
var _ tool.Tool = (*ISO9660)(nil)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
tool.RegisterTool(&ISO9660{})
|
tool.RegisterTool(ISO9660{})
|
||||||
}
|
}
|
||||||
|
140
internal/archive/rardecode/rardecode.go
Normal file
140
internal/archive/rardecode/rardecode.go
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package rardecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/nwaples/rardecode/v2"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
stdpath "path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RarDecoder struct{}
|
||||||
|
|
||||||
|
func (RarDecoder) AcceptedExtensions() []string {
|
||||||
|
return []string{".rar"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
|
return map[string]tool.MultipartExtension{
|
||||||
|
".part1.rar": {".part%d.rar", 2},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
l, err := list(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, tree := tool.GenerateMetaTreeFromFolderTraversal(l)
|
||||||
|
return &model.ArchiveMetaInfo{
|
||||||
|
Comment: "",
|
||||||
|
Encrypted: false,
|
||||||
|
Tree: tree,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
|
reader, err := getReader(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
|
for {
|
||||||
|
var header *rardecode.FileHeader
|
||||||
|
header, err = reader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
if header.Name == innerPath {
|
||||||
|
if header.IsDir {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return io.NopCloser(reader), header.UnPackedSize, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, 0, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
|
reader, err := getReader(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if args.InnerPath == "/" {
|
||||||
|
for {
|
||||||
|
var header *rardecode.FileHeader
|
||||||
|
header, err = reader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
name := header.Name
|
||||||
|
if header.IsDir {
|
||||||
|
name = name + "/"
|
||||||
|
}
|
||||||
|
err = decompress(reader, header, name, outputPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
|
innerBase := stdpath.Base(innerPath)
|
||||||
|
createdBaseDir := false
|
||||||
|
for {
|
||||||
|
var header *rardecode.FileHeader
|
||||||
|
header, err = reader.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
name := header.Name
|
||||||
|
if header.IsDir {
|
||||||
|
name = name + "/"
|
||||||
|
}
|
||||||
|
if name == innerPath {
|
||||||
|
err = _decompress(reader, header, outputPath, up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||||
|
targetPath := stdpath.Join(outputPath, innerBase)
|
||||||
|
if !createdBaseDir {
|
||||||
|
err = os.Mkdir(targetPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
createdBaseDir = true
|
||||||
|
}
|
||||||
|
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||||
|
err = decompress(reader, header, restPath, targetPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ tool.Tool = (*RarDecoder)(nil)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
tool.RegisterTool(RarDecoder{})
|
||||||
|
}
|
225
internal/archive/rardecode/utils.go
Normal file
225
internal/archive/rardecode/utils.go
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
package rardecode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/nwaples/rardecode/v2"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
stdpath "path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type VolumeFile struct {
|
||||||
|
stream.SStreamReadAtSeeker
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Name() string {
|
||||||
|
return v.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Size() int64 {
|
||||||
|
return v.SStreamReadAtSeeker.GetRawStream().GetSize()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Mode() fs.FileMode {
|
||||||
|
return 0644
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) ModTime() time.Time {
|
||||||
|
return v.SStreamReadAtSeeker.GetRawStream().ModTime()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) IsDir() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Sys() any {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Stat() (fs.FileInfo, error) {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFile) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type VolumeFs struct {
|
||||||
|
parts map[string]*VolumeFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *VolumeFs) Open(name string) (fs.File, error) {
|
||||||
|
file, ok := v.parts[name]
|
||||||
|
if !ok {
|
||||||
|
return nil, fs.ErrNotExist
|
||||||
|
}
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
|
||||||
|
if len(ss) == 1 {
|
||||||
|
reader, err := stream.NewReadAtSeeker(ss[0], 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
fileName := "file.rar"
|
||||||
|
fsys := &VolumeFs{parts: map[string]*VolumeFile{
|
||||||
|
fileName: {SStreamReadAtSeeker: reader, name: fileName},
|
||||||
|
}}
|
||||||
|
return fileName, rardecode.FileSystem(fsys), nil
|
||||||
|
} else {
|
||||||
|
parts := make(map[string]*VolumeFile, len(ss))
|
||||||
|
for i, s := range ss {
|
||||||
|
reader, err := stream.NewReadAtSeeker(s, 0)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
fileName := fmt.Sprintf("file.part%d.rar", i+1)
|
||||||
|
parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName}
|
||||||
|
}
|
||||||
|
return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapReader struct {
|
||||||
|
files []*rardecode.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *WrapReader) Files() []tool.SubFile {
|
||||||
|
ret := make([]tool.SubFile, 0, len(r.files))
|
||||||
|
for _, f := range r.files {
|
||||||
|
ret = append(ret, &WrapFile{File: f})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapFile struct {
|
||||||
|
*rardecode.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) Name() string {
|
||||||
|
if f.File.IsDir {
|
||||||
|
return f.File.Name + "/"
|
||||||
|
}
|
||||||
|
return f.File.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||||
|
return &WrapFileInfo{File: f.File}
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapFileInfo struct {
|
||||||
|
*rardecode.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) Name() string {
|
||||||
|
return stdpath.Base(f.File.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) Size() int64 {
|
||||||
|
return f.File.UnPackedSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) ModTime() time.Time {
|
||||||
|
return f.File.ModificationTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) IsDir() bool {
|
||||||
|
return f.File.IsDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) Sys() any {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) {
|
||||||
|
fileName, fsOpt, err := makeOpts(ss)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opts := []rardecode.Option{fsOpt}
|
||||||
|
if password != "" {
|
||||||
|
opts = append(opts, rardecode.Password(password))
|
||||||
|
}
|
||||||
|
files, err := rardecode.List(fileName, opts...)
|
||||||
|
// rardecode输出文件列表的顺序不一定是父目录在前,子目录在后
|
||||||
|
// 父路径的长度一定比子路径短,排序后的files可保证父路径在前
|
||||||
|
sort.Slice(files, func(i, j int) bool {
|
||||||
|
return len(files[i].Name) < len(files[j].Name)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, filterPassword(err)
|
||||||
|
}
|
||||||
|
return &WrapReader{files: files}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) {
|
||||||
|
fileName, fsOpt, err := makeOpts(ss)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
opts := []rardecode.Option{fsOpt}
|
||||||
|
if password != "" {
|
||||||
|
opts = append(opts, rardecode.Password(password))
|
||||||
|
}
|
||||||
|
rc, err := rardecode.OpenReader(fileName, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, filterPassword(err)
|
||||||
|
}
|
||||||
|
ss[0].Closers.Add(rc)
|
||||||
|
return &rc.Reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
|
||||||
|
targetPath := outputPath
|
||||||
|
dir, base := stdpath.Split(filePath)
|
||||||
|
if dir != "" {
|
||||||
|
targetPath = stdpath.Join(targetPath, dir)
|
||||||
|
err := os.MkdirAll(targetPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if base != "" {
|
||||||
|
err := _decompress(reader, header, targetPath, func(_ float64) {})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
|
||||||
|
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = f.Close() }()
|
||||||
|
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
|
Reader: reader,
|
||||||
|
Size: header.UnPackedSize,
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterPassword(err error) error {
|
||||||
|
if err != nil && strings.Contains(err.Error(), "password") {
|
||||||
|
return errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
72
internal/archive/sevenzip/sevenzip.go
Normal file
72
internal/archive/sevenzip/sevenzip.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package sevenzip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SevenZip struct{}
|
||||||
|
|
||||||
|
func (SevenZip) AcceptedExtensions() []string {
|
||||||
|
return []string{".7z"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
|
return map[string]tool.MultipartExtension{
|
||||||
|
".7z.001": {".7z.%.3d", 2},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
reader, err := getReader(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader})
|
||||||
|
return &model.ArchiveMetaInfo{
|
||||||
|
Comment: "",
|
||||||
|
Encrypted: args.Password != "",
|
||||||
|
Tree: tree,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
return nil, errs.NotSupport
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
|
reader, err := getReader(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
|
for _, file := range reader.File {
|
||||||
|
if file.Name == innerPath {
|
||||||
|
r, e := file.Open()
|
||||||
|
if e != nil {
|
||||||
|
return nil, 0, e
|
||||||
|
}
|
||||||
|
return r, file.FileInfo().Size(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, 0, errs.ObjectNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
|
reader, err := getReader(ss, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ tool.Tool = (*SevenZip)(nil)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
tool.RegisterTool(SevenZip{})
|
||||||
|
}
|
61
internal/archive/sevenzip/utils.go
Normal file
61
internal/archive/sevenzip/utils.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
package sevenzip
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/bodgit/sevenzip"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WrapReader struct {
|
||||||
|
Reader *sevenzip.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *WrapReader) Files() []tool.SubFile {
|
||||||
|
ret := make([]tool.SubFile, 0, len(r.Reader.File))
|
||||||
|
for _, f := range r.Reader.File {
|
||||||
|
ret = append(ret, &WrapFile{f: f})
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapFile struct {
|
||||||
|
f *sevenzip.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) Name() string {
|
||||||
|
return f.f.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||||
|
return f.f.FileInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||||
|
return f.f.Open()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) {
|
||||||
|
readerAt, err := stream.NewMultiReaderAt(ss)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password)
|
||||||
|
if err != nil {
|
||||||
|
return nil, filterPassword(err)
|
||||||
|
}
|
||||||
|
return sr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filterPassword(err error) error {
|
||||||
|
if err != nil {
|
||||||
|
var e *sevenzip.ReadError
|
||||||
|
if errors.As(err, &e) && e.Encrypted {
|
||||||
|
return errs.WrongArchivePassword
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
@ -6,10 +6,16 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type MultipartExtension struct {
|
||||||
|
PartFileFormat string
|
||||||
|
SecondPartIndex int
|
||||||
|
}
|
||||||
|
|
||||||
type Tool interface {
|
type Tool interface {
|
||||||
AcceptedExtensions() []string
|
AcceptedExtensions() []string
|
||||||
GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
AcceptedMultipartExtensions() map[string]MultipartExtension
|
||||||
List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||||
Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||||
Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
||||||
|
Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
||||||
}
|
}
|
||||||
|
204
internal/archive/tool/helper.go
Normal file
204
internal/archive/tool/helper.go
Normal file
@ -0,0 +1,204 @@
|
|||||||
|
package tool
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
"os"
|
||||||
|
stdpath "path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SubFile interface {
|
||||||
|
Name() string
|
||||||
|
FileInfo() fs.FileInfo
|
||||||
|
Open() (io.ReadCloser, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type CanEncryptSubFile interface {
|
||||||
|
IsEncrypted() bool
|
||||||
|
SetPassword(password string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ArchiveReader interface {
|
||||||
|
Files() []SubFile
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) {
|
||||||
|
encrypted := false
|
||||||
|
dirMap := make(map[string]*model.ObjectTree)
|
||||||
|
for _, file := range r.Files() {
|
||||||
|
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
|
||||||
|
encrypted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
name := strings.TrimPrefix(file.Name(), "/")
|
||||||
|
var dir string
|
||||||
|
var dirObj *model.ObjectTree
|
||||||
|
isNewFolder := false
|
||||||
|
if !file.FileInfo().IsDir() {
|
||||||
|
// 先将 文件 添加到 所在的文件夹
|
||||||
|
dir = stdpath.Dir(name)
|
||||||
|
dirObj = dirMap[dir]
|
||||||
|
if dirObj == nil {
|
||||||
|
isNewFolder = dir != "."
|
||||||
|
dirObj = &model.ObjectTree{}
|
||||||
|
dirObj.IsFolder = true
|
||||||
|
dirObj.Name = stdpath.Base(dir)
|
||||||
|
dirObj.Modified = file.FileInfo().ModTime()
|
||||||
|
dirMap[dir] = dirObj
|
||||||
|
}
|
||||||
|
dirObj.Children = append(
|
||||||
|
dirObj.Children, &model.ObjectTree{
|
||||||
|
Object: *MakeModelObj(file.FileInfo()),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
dir = strings.TrimSuffix(name, "/")
|
||||||
|
dirObj = dirMap[dir]
|
||||||
|
if dirObj == nil {
|
||||||
|
isNewFolder = dir != "."
|
||||||
|
dirObj = &model.ObjectTree{}
|
||||||
|
dirMap[dir] = dirObj
|
||||||
|
}
|
||||||
|
dirObj.IsFolder = true
|
||||||
|
dirObj.Name = stdpath.Base(dir)
|
||||||
|
dirObj.Modified = file.FileInfo().ModTime()
|
||||||
|
}
|
||||||
|
if isNewFolder {
|
||||||
|
// 将 文件夹 添加到 父文件夹
|
||||||
|
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
||||||
|
// 循环创建所有父文件夹
|
||||||
|
parentDir := stdpath.Dir(dir)
|
||||||
|
for {
|
||||||
|
parentDirObj := dirMap[parentDir]
|
||||||
|
if parentDirObj == nil {
|
||||||
|
parentDirObj = &model.ObjectTree{}
|
||||||
|
if parentDir != "." {
|
||||||
|
parentDirObj.IsFolder = true
|
||||||
|
parentDirObj.Name = stdpath.Base(parentDir)
|
||||||
|
parentDirObj.Modified = file.FileInfo().ModTime()
|
||||||
|
}
|
||||||
|
dirMap[parentDir] = parentDirObj
|
||||||
|
}
|
||||||
|
parentDirObj.Children = append(parentDirObj.Children, dirObj)
|
||||||
|
|
||||||
|
parentDir = stdpath.Dir(parentDir)
|
||||||
|
if dirMap[parentDir] != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
dirObj = parentDirObj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(dirMap) > 0 {
|
||||||
|
return encrypted, dirMap["."].GetChildren()
|
||||||
|
} else {
|
||||||
|
return encrypted, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func MakeModelObj(file os.FileInfo) *model.Object {
|
||||||
|
return &model.Object{
|
||||||
|
Name: file.Name(),
|
||||||
|
Size: file.Size(),
|
||||||
|
Modified: file.ModTime(),
|
||||||
|
IsFolder: file.IsDir(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapFileInfo struct {
|
||||||
|
model.Obj
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
|
var err error
|
||||||
|
files := r.Files()
|
||||||
|
if args.InnerPath == "/" {
|
||||||
|
for i, file := range files {
|
||||||
|
name := file.Name()
|
||||||
|
err = decompress(file, name, outputPath, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
up(float64(i+1) * 100.0 / float64(len(files)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||||
|
innerBase := stdpath.Base(innerPath)
|
||||||
|
createdBaseDir := false
|
||||||
|
for _, file := range files {
|
||||||
|
name := file.Name()
|
||||||
|
if name == innerPath {
|
||||||
|
err = _decompress(file, outputPath, args.Password, up)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
break
|
||||||
|
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||||
|
targetPath := stdpath.Join(outputPath, innerBase)
|
||||||
|
if !createdBaseDir {
|
||||||
|
err = os.Mkdir(targetPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
createdBaseDir = true
|
||||||
|
}
|
||||||
|
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||||
|
err = decompress(file, restPath, targetPath, args.Password)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decompress(file SubFile, filePath, outputPath, password string) error {
|
||||||
|
targetPath := outputPath
|
||||||
|
dir, base := stdpath.Split(filePath)
|
||||||
|
if dir != "" {
|
||||||
|
targetPath = stdpath.Join(targetPath, dir)
|
||||||
|
err := os.MkdirAll(targetPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if base != "" {
|
||||||
|
err := _decompress(file, targetPath, password, func(_ float64) {})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error {
|
||||||
|
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
|
||||||
|
encrypt.SetPassword(password)
|
||||||
|
}
|
||||||
|
rc, err := file.Open()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = rc.Close() }()
|
||||||
|
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() { _ = f.Close() }()
|
||||||
|
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
|
Reader: rc,
|
||||||
|
Size: file.FileInfo().Size(),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -5,19 +5,28 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
Tools = make(map[string]Tool)
|
Tools = make(map[string]Tool)
|
||||||
|
MultipartExtensions = make(map[string]MultipartExtension)
|
||||||
)
|
)
|
||||||
|
|
||||||
func RegisterTool(tool Tool) {
|
func RegisterTool(tool Tool) {
|
||||||
for _, ext := range tool.AcceptedExtensions() {
|
for _, ext := range tool.AcceptedExtensions() {
|
||||||
Tools[ext] = tool
|
Tools[ext] = tool
|
||||||
}
|
}
|
||||||
|
for mainFile, ext := range tool.AcceptedMultipartExtensions() {
|
||||||
|
MultipartExtensions[mainFile] = ext
|
||||||
|
Tools[mainFile] = tool
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetArchiveTool(ext string) (Tool, error) {
|
func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) {
|
||||||
t, ok := Tools[ext]
|
t, ok := Tools[ext]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errs.UnknownArchiveFormat
|
return nil, nil, errs.UnknownArchiveFormat
|
||||||
}
|
}
|
||||||
return t, nil
|
partExt, ok := MultipartExtensions[ext]
|
||||||
|
if !ok {
|
||||||
|
return nil, t, nil
|
||||||
|
}
|
||||||
|
return &partExt, t, nil
|
||||||
}
|
}
|
||||||
|
@ -2,8 +2,13 @@ package zip
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/fs"
|
||||||
|
stdpath "path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/saintfish/chardet"
|
"github.com/saintfish/chardet"
|
||||||
"github.com/yeka/zip"
|
"github.com/yeka/zip"
|
||||||
@ -16,65 +21,62 @@ import (
|
|||||||
"golang.org/x/text/encoding/unicode"
|
"golang.org/x/text/encoding/unicode"
|
||||||
"golang.org/x/text/encoding/unicode/utf32"
|
"golang.org/x/text/encoding/unicode/utf32"
|
||||||
"golang.org/x/text/transform"
|
"golang.org/x/text/transform"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
stdpath "path"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func toModelObj(file os.FileInfo) *model.Object {
|
type WrapReader struct {
|
||||||
return &model.Object{
|
Reader *zip.Reader
|
||||||
Name: decodeName(file.Name()),
|
|
||||||
Size: file.Size(),
|
|
||||||
Modified: file.ModTime(),
|
|
||||||
IsFolder: file.IsDir(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func decompress(file *zip.File, filePath, outputPath, password string) error {
|
func (r *WrapReader) Files() []tool.SubFile {
|
||||||
targetPath := outputPath
|
ret := make([]tool.SubFile, 0, len(r.Reader.File))
|
||||||
dir, base := stdpath.Split(filePath)
|
for _, f := range r.Reader.File {
|
||||||
if dir != "" {
|
ret = append(ret, &WrapFile{f: f})
|
||||||
targetPath = stdpath.Join(targetPath, dir)
|
|
||||||
err := os.MkdirAll(targetPath, 0700)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if base != "" {
|
return ret
|
||||||
err := _decompress(file, targetPath, password, func(_ float64) {})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error {
|
type WrapFileInfo struct {
|
||||||
if file.IsEncrypted() {
|
fs.FileInfo
|
||||||
file.SetPassword(password)
|
}
|
||||||
|
|
||||||
|
func (f *WrapFileInfo) Name() string {
|
||||||
|
return decodeName(f.FileInfo.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
type WrapFile struct {
|
||||||
|
f *zip.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) Name() string {
|
||||||
|
return decodeName(f.f.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) FileInfo() fs.FileInfo {
|
||||||
|
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) Open() (io.ReadCloser, error) {
|
||||||
|
return f.f.Open()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) IsEncrypted() bool {
|
||||||
|
return f.f.IsEncrypted()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *WrapFile) SetPassword(password string) {
|
||||||
|
f.f.SetPassword(password)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
|
||||||
|
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
|
||||||
|
// FIXME: Incorrect parsing method for standard multipart zip format
|
||||||
|
ss = append(ss[1:], ss[0])
|
||||||
}
|
}
|
||||||
rc, err := file.Open()
|
reader, err := stream.NewMultiReaderAt(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer rc.Close()
|
return zip.NewReader(reader, reader.Size())
|
||||||
f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
|
||||||
Reader: &stream.SimpleReaderWithSize{
|
|
||||||
Reader: rc,
|
|
||||||
Size: file.FileInfo().Size(),
|
|
||||||
},
|
|
||||||
UpdateProgress: up,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func filterPassword(err error) error {
|
func filterPassword(err error) error {
|
||||||
|
@ -2,7 +2,6 @@ package zip
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -10,106 +9,37 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/yeka/zip"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Zip struct {
|
type Zip struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Zip) AcceptedExtensions() []string {
|
func (Zip) AcceptedExtensions() []string {
|
||||||
return []string{".zip"}
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
|
||||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
return map[string]tool.MultipartExtension{
|
||||||
|
".zip": {".z%.2d", 1},
|
||||||
|
".zip.001": {".zip.%.3d", 2},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
zipReader, err := getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
encrypted := false
|
|
||||||
dirMap := make(map[string]*model.ObjectTree)
|
|
||||||
dirMap["."] = &model.ObjectTree{}
|
|
||||||
for _, file := range zipReader.File {
|
|
||||||
if file.IsEncrypted() {
|
|
||||||
encrypted = true
|
|
||||||
}
|
|
||||||
|
|
||||||
name := strings.TrimPrefix(decodeName(file.Name), "/")
|
|
||||||
var dir string
|
|
||||||
var dirObj *model.ObjectTree
|
|
||||||
isNewFolder := false
|
|
||||||
if !file.FileInfo().IsDir() {
|
|
||||||
// 先将 文件 添加到 所在的文件夹
|
|
||||||
dir = stdpath.Dir(name)
|
|
||||||
dirObj = dirMap[dir]
|
|
||||||
if dirObj == nil {
|
|
||||||
isNewFolder = true
|
|
||||||
dirObj = &model.ObjectTree{}
|
|
||||||
dirObj.IsFolder = true
|
|
||||||
dirObj.Name = stdpath.Base(dir)
|
|
||||||
dirObj.Modified = file.ModTime()
|
|
||||||
dirMap[dir] = dirObj
|
|
||||||
}
|
|
||||||
dirObj.Children = append(
|
|
||||||
dirObj.Children, &model.ObjectTree{
|
|
||||||
Object: *toModelObj(file.FileInfo()),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
dir = strings.TrimSuffix(name, "/")
|
|
||||||
dirObj = dirMap[dir]
|
|
||||||
if dirObj == nil {
|
|
||||||
isNewFolder = true
|
|
||||||
dirObj = &model.ObjectTree{}
|
|
||||||
dirMap[dir] = dirObj
|
|
||||||
}
|
|
||||||
dirObj.IsFolder = true
|
|
||||||
dirObj.Name = stdpath.Base(dir)
|
|
||||||
dirObj.Modified = file.ModTime()
|
|
||||||
dirObj.Children = make([]model.ObjTree, 0)
|
|
||||||
}
|
|
||||||
if isNewFolder {
|
|
||||||
// 将 文件夹 添加到 父文件夹
|
|
||||||
dir = stdpath.Dir(dir)
|
|
||||||
pDirObj := dirMap[dir]
|
|
||||||
if pDirObj != nil {
|
|
||||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
// 考虑压缩包仅记录文件的路径,不记录文件夹
|
|
||||||
pDirObj = &model.ObjectTree{}
|
|
||||||
pDirObj.IsFolder = true
|
|
||||||
pDirObj.Name = stdpath.Base(dir)
|
|
||||||
pDirObj.Modified = file.ModTime()
|
|
||||||
dirMap[dir] = pDirObj
|
|
||||||
pDirObj.Children = append(pDirObj.Children, dirObj)
|
|
||||||
dir = stdpath.Dir(dir)
|
|
||||||
if dirMap[dir] != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
dirObj = pDirObj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &model.ArchiveMetaInfo{
|
return &model.ArchiveMetaInfo{
|
||||||
Comment: zipReader.Comment,
|
Comment: zipReader.Comment,
|
||||||
Encrypted: encrypted,
|
Encrypted: encrypted,
|
||||||
Tree: dirMap["."].GetChildren(),
|
Tree: tree,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
zipReader, err := getReader(ss)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -134,13 +64,13 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
|||||||
if dir == nil && len(strs) == 2 {
|
if dir == nil && len(strs) == 2 {
|
||||||
dir = &model.Object{
|
dir = &model.Object{
|
||||||
Name: strs[0],
|
Name: strs[0],
|
||||||
Modified: ss.ModTime(),
|
Modified: ss[0].ModTime(),
|
||||||
IsFolder: true,
|
IsFolder: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
ret = append(ret, toModelObj(file.FileInfo()))
|
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
|
||||||
}
|
}
|
||||||
if len(ret) == 0 && dir != nil {
|
if len(ret) == 0 && dir != nil {
|
||||||
ret = append(ret, dir)
|
ret = append(ret, dir)
|
||||||
@ -157,7 +87,7 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
exist = true
|
exist = true
|
||||||
ret = append(ret, toModelObj(file.FileInfo()))
|
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
|
||||||
}
|
}
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, errs.ObjectNotFound
|
return nil, errs.ObjectNotFound
|
||||||
@ -166,12 +96,8 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
zipReader, err := getReader(ss)
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
@ -191,58 +117,16 @@ func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.
|
|||||||
return nil, 0, errs.ObjectNotFound
|
return nil, 0, errs.ObjectNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
zipReader, err := getReader(ss)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if args.InnerPath == "/" {
|
|
||||||
for i, file := range zipReader.File {
|
|
||||||
name := decodeName(file.Name)
|
|
||||||
err = decompress(file, name, outputPath, args.Password)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
up(float64(i+1) * 100.0 / float64(len(zipReader.File)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
|
||||||
innerBase := stdpath.Base(innerPath)
|
|
||||||
createdBaseDir := false
|
|
||||||
for _, file := range zipReader.File {
|
|
||||||
name := decodeName(file.Name)
|
|
||||||
if name == innerPath {
|
|
||||||
err = _decompress(file, outputPath, args.Password, up)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
break
|
|
||||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
|
||||||
targetPath := stdpath.Join(outputPath, innerBase)
|
|
||||||
if !createdBaseDir {
|
|
||||||
err = os.Mkdir(targetPath, 0700)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
createdBaseDir = true
|
|
||||||
}
|
|
||||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
|
||||||
err = decompress(file, restPath, targetPath, args.Password)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ tool.Tool = (*Zip)(nil)
|
var _ tool.Tool = (*Zip)(nil)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
tool.RegisterTool(&Zip{})
|
tool.RegisterTool(Zip{})
|
||||||
}
|
}
|
||||||
|
@ -68,8 +68,13 @@ func InitDB() {
|
|||||||
{
|
{
|
||||||
dsn := database.DSN
|
dsn := database.DSN
|
||||||
if dsn == "" {
|
if dsn == "" {
|
||||||
dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
|
if database.Password != "" {
|
||||||
database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
|
dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
|
||||||
|
database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
|
||||||
|
} else {
|
||||||
|
dsn = fmt.Sprintf("host=%s user=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
|
||||||
|
database.Host, database.User, database.Name, database.Port, database.SSLMode)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
dB, err = gorm.Open(postgres.Open(dsn), gormConfig)
|
dB, err = gorm.Open(postgres.Open(dsn), gormConfig)
|
||||||
}
|
}
|
||||||
|
@ -79,13 +79,13 @@ type Remove interface {
|
|||||||
type Put interface {
|
type Put interface {
|
||||||
// Put a file (provided as a FileStreamer) into the driver
|
// Put a file (provided as a FileStreamer) into the driver
|
||||||
// Besides the most basic upload functionality, the following features also need to be implemented:
|
// Besides the most basic upload functionality, the following features also need to be implemented:
|
||||||
// 1. Canceling (when `<-ctx.Done()` returns), by the following methods:
|
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
|
||||||
// (1) Use request methods that carry context, such as the following:
|
// (1) Use request methods that carry context, such as the following:
|
||||||
// a. http.NewRequestWithContext
|
// a. http.NewRequestWithContext
|
||||||
// b. resty.Request.SetContext
|
// b. resty.Request.SetContext
|
||||||
// c. s3manager.Uploader.UploadWithContext
|
// c. s3manager.Uploader.UploadWithContext
|
||||||
// d. utils.CopyWithCtx
|
// d. utils.CopyWithCtx
|
||||||
// (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream`
|
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
|
||||||
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
|
||||||
// this is typically applicable to chunked uploads.
|
// this is typically applicable to chunked uploads.
|
||||||
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
|
||||||
|
@ -4,17 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
|
||||||
"github.com/alist-org/alist/v3/internal/task"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/xhofe/tache"
|
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"mime"
|
"mime"
|
||||||
@ -25,6 +14,17 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
|
"github.com/alist-org/alist/v3/internal/task"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/xhofe/tache"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ArchiveDownloadTask struct {
|
type ArchiveDownloadTask struct {
|
||||||
@ -37,7 +37,6 @@ type ArchiveDownloadTask struct {
|
|||||||
dstStorage driver.Driver
|
dstStorage driver.Driver
|
||||||
SrcStorageMp string
|
SrcStorageMp string
|
||||||
DstStorageMp string
|
DstStorageMp string
|
||||||
Tool tool.Tool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *ArchiveDownloadTask) GetName() string {
|
func (t *ArchiveDownloadTask) GetName() string {
|
||||||
@ -67,33 +66,39 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
|||||||
if t.srcStorage == nil {
|
if t.srcStorage == nil {
|
||||||
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
|
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
|
||||||
}
|
}
|
||||||
l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
|
srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
|
||||||
Header: http.Header{},
|
Header: http.Header{},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs := stream.FileStream{
|
|
||||||
Obj: srcObj,
|
|
||||||
Ctx: t.Ctx(),
|
|
||||||
}
|
|
||||||
ss, err := stream.NewSeekableStream(fs, l)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := ss.Close(); err != nil {
|
var e error
|
||||||
log.Errorf("failed to close file streamer, %v", err)
|
for _, s := range ss {
|
||||||
|
e = stderrors.Join(e, s.Close())
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
log.Errorf("failed to close file streamer, %v", e)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
var decompressUp model.UpdateProgress
|
var decompressUp model.UpdateProgress
|
||||||
if t.CacheFull {
|
if t.CacheFull {
|
||||||
t.SetTotalBytes(srcObj.GetSize())
|
var total, cur int64 = 0, 0
|
||||||
t.status = "getting src object"
|
for _, s := range ss {
|
||||||
_, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress)
|
total += s.GetSize()
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
t.SetTotalBytes(total)
|
||||||
|
t.status = "getting src object"
|
||||||
|
for _, s := range ss {
|
||||||
|
_, err = s.CacheFullInTempFileAndUpdateProgress(func(p float64) {
|
||||||
|
t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
|
||||||
|
})
|
||||||
|
cur += s.GetSize()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.SetProgress(100.0)
|
||||||
decompressUp = func(_ float64) {}
|
decompressUp = func(_ float64) {}
|
||||||
} else {
|
} else {
|
||||||
decompressUp = t.SetProgress
|
decompressUp = t.SetProgress
|
||||||
@ -103,7 +108,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
|
err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -344,11 +349,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ext := stdpath.Ext(srcObjActualPath)
|
|
||||||
t, err := tool.GetArchiveTool(ext)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
|
||||||
}
|
|
||||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||||
tsk := &ArchiveDownloadTask{
|
tsk := &ArchiveDownloadTask{
|
||||||
TaskExtension: task.TaskExtension{
|
TaskExtension: task.TaskExtension{
|
||||||
@ -361,7 +361,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
|
|||||||
DstDirPath: dstDirActualPath,
|
DstDirPath: dstDirActualPath,
|
||||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||||
Tool: t,
|
|
||||||
}
|
}
|
||||||
if ctx.Value(conf.NoTaskKey) != nil {
|
if ctx.Value(conf.NoTaskKey) != nil {
|
||||||
uploadTask, err := tsk.RunWithoutPushUploadTask()
|
uploadTask, err := tsk.RunWithoutPushUploadTask()
|
||||||
|
@ -2,12 +2,15 @@ package fs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"io"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
"github.com/alist-org/alist/v3/internal/task"
|
"github.com/alist-org/alist/v3/internal/task"
|
||||||
log "github.com/sirupsen/logrus"
|
"github.com/pkg/errors"
|
||||||
"io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// the param named path of functions in this package is a mount path
|
// the param named path of functions in this package is a mount path
|
||||||
@ -168,3 +171,19 @@ func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
return res, err
|
return res, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PutURL(ctx context.Context, path, dstName, urlStr string) error {
|
||||||
|
storage, dstDirActualPath, err := op.GetStorageAndActualPath(path)
|
||||||
|
if err != nil {
|
||||||
|
return errors.WithMessage(err, "failed get storage")
|
||||||
|
}
|
||||||
|
if storage.Config().NoUpload {
|
||||||
|
return errors.WithStack(errs.UploadNotSupported)
|
||||||
|
}
|
||||||
|
_, ok := storage.(driver.PutURL)
|
||||||
|
_, okResult := storage.(driver.PutURLResult)
|
||||||
|
if !ok && !okResult {
|
||||||
|
return errs.NotImplement
|
||||||
|
}
|
||||||
|
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
||||||
|
}
|
||||||
|
@ -619,10 +619,9 @@ type Buf struct {
|
|||||||
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
||||||
// when read is faster write, immediately feed data to read after written
|
// when read is faster write, immediately feed data to read after written
|
||||||
func NewBuf(ctx context.Context, maxSize int) *Buf {
|
func NewBuf(ctx context.Context, maxSize int) *Buf {
|
||||||
d := make([]byte, 0, maxSize)
|
|
||||||
return &Buf{
|
return &Buf{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
buffer: bytes.NewBuffer(d),
|
buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
|
||||||
size: maxSize,
|
size: maxSize,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -677,5 +676,5 @@ func (br *Buf) Write(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (br *Buf) Close() {
|
func (br *Buf) Close() {
|
||||||
br.buffer.Reset()
|
br.buffer = nil
|
||||||
}
|
}
|
||||||
|
@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time
|
|||||||
|
|
||||||
// 使用请求的Context
|
// 使用请求的Context
|
||||||
// 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞
|
// 不然从sendContent读不到数据,即使请求断开CopyBuffer也会一直堵塞
|
||||||
ctx := context.WithValue(r.Context(), "request_header", &r.Header)
|
ctx := context.WithValue(r.Context(), "request_header", r.Header)
|
||||||
switch {
|
switch {
|
||||||
case len(ranges) == 0:
|
case len(ranges) == 0:
|
||||||
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1})
|
||||||
|
@ -2,20 +2,20 @@ package tool
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/url"
|
||||||
|
stdpath "path"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
_115 "github.com/alist-org/alist/v3/drivers/115"
|
_115 "github.com/alist-org/alist/v3/drivers/115"
|
||||||
"github.com/alist-org/alist/v3/drivers/pikpak"
|
"github.com/alist-org/alist/v3/drivers/pikpak"
|
||||||
"github.com/alist-org/alist/v3/drivers/thunder"
|
"github.com/alist-org/alist/v3/drivers/thunder"
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
|
||||||
"github.com/alist-org/alist/v3/internal/task"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/fs"
|
||||||
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/setting"
|
||||||
|
"github.com/alist-org/alist/v3/internal/task"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@ -59,8 +59,11 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// try putting url
|
// try putting url
|
||||||
if args.Tool == "SimpleHttp" && tryPutUrl(ctx, storage, dstDirActualPath, args.URL) {
|
if args.Tool == "SimpleHttp" {
|
||||||
return nil, nil
|
err = tryPutUrl(ctx, args.DstDirPath, args.URL)
|
||||||
|
if err == nil || !errors.Is(err, errs.NotImplement) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// get tool
|
// get tool
|
||||||
@ -118,17 +121,13 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tryPutUrl(ctx context.Context, storage driver.Driver, dstDirActualPath, urlStr string) bool {
|
func tryPutUrl(ctx context.Context, path, urlStr string) error {
|
||||||
_, ok := storage.(driver.PutURL)
|
var dstName string
|
||||||
_, okResult := storage.(driver.PutURLResult)
|
|
||||||
if !ok && !okResult {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
u, err := url.Parse(urlStr)
|
u, err := url.Parse(urlStr)
|
||||||
if err != nil {
|
if err == nil {
|
||||||
return false
|
dstName = stdpath.Base(u.Path)
|
||||||
|
} else {
|
||||||
|
dstName = "UnnamedURL"
|
||||||
}
|
}
|
||||||
dstName := path.Base(u.Path)
|
return fs.PutURL(ctx, path, dstName, urlStr)
|
||||||
err = op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
|
|
||||||
return err == nil
|
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package op
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
stderrors "errors"
|
stderrors "errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
"strings"
|
||||||
@ -54,21 +55,76 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
|||||||
return meta, err
|
return meta, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) {
|
func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) {
|
||||||
l, obj, err := Link(ctx, storage, path, args)
|
l, obj, err := Link(ctx, storage, path, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||||
}
|
}
|
||||||
ext := stdpath.Ext(obj.GetName())
|
baseName, ext, found := strings.Cut(obj.GetName(), ".")
|
||||||
t, err := tool.GetArchiveTool(ext)
|
if !found {
|
||||||
|
if l.MFile != nil {
|
||||||
|
_ = l.MFile.Close()
|
||||||
|
}
|
||||||
|
if l.RangeReadCloser != nil {
|
||||||
|
_ = l.RangeReadCloser.Close()
|
||||||
|
}
|
||||||
|
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
|
||||||
|
}
|
||||||
|
partExt, t, err := tool.GetArchiveTool("." + ext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
var e error
|
||||||
|
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
|
||||||
|
if e != nil {
|
||||||
|
if l.MFile != nil {
|
||||||
|
_ = l.MFile.Close()
|
||||||
|
}
|
||||||
|
if l.RangeReadCloser != nil {
|
||||||
|
_ = l.RangeReadCloser.Close()
|
||||||
|
}
|
||||||
|
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if l.MFile != nil {
|
||||||
|
_ = l.MFile.Close()
|
||||||
|
}
|
||||||
|
if l.RangeReadCloser != nil {
|
||||||
|
_ = l.RangeReadCloser.Close()
|
||||||
|
}
|
||||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||||
}
|
}
|
||||||
return obj, t, ss, nil
|
ret := []*stream.SeekableStream{ss}
|
||||||
|
if partExt == nil {
|
||||||
|
return obj, t, ret, nil
|
||||||
|
} else {
|
||||||
|
index := partExt.SecondPartIndex
|
||||||
|
dir := stdpath.Dir(path)
|
||||||
|
for {
|
||||||
|
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
|
||||||
|
var o model.Obj
|
||||||
|
l, o, err = Link(ctx, storage, p, args)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
|
||||||
|
if err != nil {
|
||||||
|
if l.MFile != nil {
|
||||||
|
_ = l.MFile.Close()
|
||||||
|
}
|
||||||
|
if l.RangeReadCloser != nil {
|
||||||
|
_ = l.RangeReadCloser.Close()
|
||||||
|
}
|
||||||
|
for _, s := range ret {
|
||||||
|
_ = s.Close()
|
||||||
|
}
|
||||||
|
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||||
|
}
|
||||||
|
ret = append(ret, ss)
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
return obj, t, ret, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||||
@ -84,7 +140,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
|||||||
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
|
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
|
||||||
if !errors.Is(err, errs.NotImplement) {
|
if !errors.Is(err, errs.NotImplement) {
|
||||||
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
|
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
|
||||||
if meta.GetTree() != nil {
|
if meta != nil && meta.GetTree() != nil {
|
||||||
archiveMetaProvider.Sort = &storage.GetStorage().Sort
|
archiveMetaProvider.Sort = &storage.GetStorage().Sort
|
||||||
}
|
}
|
||||||
if !storage.Config().NoCache {
|
if !storage.Config().NoCache {
|
||||||
@ -94,13 +150,17 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
|||||||
return obj, archiveMetaProvider, err
|
return obj, archiveMetaProvider, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := ss.Close(); err != nil {
|
var e error
|
||||||
log.Errorf("failed to close file streamer, %v", err)
|
for _, s := range ss {
|
||||||
|
e = stderrors.Join(e, s.Close())
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
log.Errorf("failed to close file streamer, %v", e)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
meta, err := t.GetMeta(ss, args.ArchiveArgs)
|
meta, err := t.GetMeta(ss, args.ArchiveArgs)
|
||||||
@ -114,9 +174,9 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
|
|||||||
if !storage.Config().NoCache {
|
if !storage.Config().NoCache {
|
||||||
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
|
||||||
archiveMetaProvider.Expiration = &Expiration
|
archiveMetaProvider.Expiration = &Expiration
|
||||||
} else if ss.Link.MFile == nil {
|
} else if ss[0].Link.MFile == nil {
|
||||||
// alias、crypt 驱动
|
// alias、crypt 驱动
|
||||||
archiveMetaProvider.Expiration = ss.Link.Expiration
|
archiveMetaProvider.Expiration = ss[0].Link.Expiration
|
||||||
}
|
}
|
||||||
return obj, archiveMetaProvider, err
|
return obj, archiveMetaProvider, err
|
||||||
}
|
}
|
||||||
@ -188,13 +248,17 @@ func _listArchive(ctx context.Context, storage driver.Driver, path string, args
|
|||||||
return obj, files, err
|
return obj, files, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := ss.Close(); err != nil {
|
var e error
|
||||||
log.Errorf("failed to close file streamer, %v", err)
|
for _, s := range ss {
|
||||||
|
e = stderrors.Join(e, s.Close())
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
|
log.Errorf("failed to close file streamer, %v", e)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
files, err := t.List(ss, args.ArchiveInnerArgs)
|
files, err := t.List(ss, args.ArchiveInnerArgs)
|
||||||
@ -378,8 +442,8 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
|
|||||||
}
|
}
|
||||||
|
|
||||||
type streamWithParent struct {
|
type streamWithParent struct {
|
||||||
rc io.ReadCloser
|
rc io.ReadCloser
|
||||||
parent *stream.SeekableStream
|
parents []*stream.SeekableStream
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *streamWithParent) Read(p []byte) (int, error) {
|
func (s *streamWithParent) Read(p []byte) (int, error) {
|
||||||
@ -387,24 +451,31 @@ func (s *streamWithParent) Read(p []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *streamWithParent) Close() error {
|
func (s *streamWithParent) Close() error {
|
||||||
err1 := s.rc.Close()
|
err := s.rc.Close()
|
||||||
err2 := s.parent.Close()
|
for _, ss := range s.parents {
|
||||||
return stderrors.Join(err1, err2)
|
err = stderrors.Join(err, ss.Close())
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||||
_, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
_, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
rc, size, err := t.Extract(ss, args)
|
rc, size, err := t.Extract(ss, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e := ss.Close(); e != nil {
|
var e error
|
||||||
|
for _, s := range ss {
|
||||||
|
e = stderrors.Join(e, s.Close())
|
||||||
|
}
|
||||||
|
if e != nil {
|
||||||
log.Errorf("failed to close file streamer, %v", e)
|
log.Errorf("failed to close file streamer, %v", e)
|
||||||
|
err = stderrors.Join(err, e)
|
||||||
}
|
}
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
return &streamWithParent{rc: rc, parent: ss}, size, nil
|
return &streamWithParent{rc: rc, parents: ss}, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
|
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
|
||||||
|
@ -17,7 +17,6 @@ func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err, false
|
return err, false
|
||||||
}
|
}
|
||||||
k.KeyStr = string(pubKey.Marshal())
|
|
||||||
k.Fingerprint = ssh.FingerprintSHA256(pubKey)
|
k.Fingerprint = ssh.FingerprintSHA256(pubKey)
|
||||||
k.AddedTime = time.Now()
|
k.AddedTime = time.Now()
|
||||||
k.LastUsedTime = k.AddedTime
|
k.LastUsedTime = k.AddedTime
|
||||||
|
@ -139,7 +139,7 @@ type RateLimitRangeReadCloser struct {
|
|||||||
Limiter Limiter
|
Limiter Limiter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
|
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"go4.org/readerutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileStream struct {
|
type FileStream struct {
|
||||||
@ -159,6 +160,10 @@ var _ model.FileStreamer = (*FileStream)(nil)
|
|||||||
//var _ seekableStream = (*FileStream)(nil)
|
//var _ seekableStream = (*FileStream)(nil)
|
||||||
|
|
||||||
// for most internal stream, which is either RangeReadCloser or MFile
|
// for most internal stream, which is either RangeReadCloser or MFile
|
||||||
|
// Any functionality implemented based on SeekableStream should implement a Close method,
|
||||||
|
// whose only purpose is to close the SeekableStream object. If such functionality has
|
||||||
|
// additional resources that need to be closed, they should be added to the Closer property of
|
||||||
|
// the SeekableStream object and be closed together when the SeekableStream object is closed.
|
||||||
type SeekableStream struct {
|
type SeekableStream struct {
|
||||||
FileStream
|
FileStream
|
||||||
Link *model.Link
|
Link *model.Link
|
||||||
@ -196,7 +201,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
|||||||
return &ss, nil
|
return &ss, nil
|
||||||
}
|
}
|
||||||
if ss.Link.RangeReadCloser != nil {
|
if ss.Link.RangeReadCloser != nil {
|
||||||
ss.rangeReadCloser = RateLimitRangeReadCloser{
|
ss.rangeReadCloser = &RateLimitRangeReadCloser{
|
||||||
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
RangeReadCloserIF: ss.Link.RangeReadCloser,
|
||||||
Limiter: ServerDownloadLimit,
|
Limiter: ServerDownloadLimit,
|
||||||
}
|
}
|
||||||
@ -208,7 +213,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rrc = RateLimitRangeReadCloser{
|
rrc = &RateLimitRangeReadCloser{
|
||||||
RangeReadCloserIF: rrc,
|
RangeReadCloserIF: rrc,
|
||||||
Limiter: ServerDownloadLimit,
|
Limiter: ServerDownloadLimit,
|
||||||
}
|
}
|
||||||
@ -364,7 +369,7 @@ type RangeReadReadAtSeeker struct {
|
|||||||
ss *SeekableStream
|
ss *SeekableStream
|
||||||
masterOff int64
|
masterOff int64
|
||||||
readers []*readerCur
|
readers []*readerCur
|
||||||
*headCache
|
headCache *headCache
|
||||||
}
|
}
|
||||||
|
|
||||||
type headCache struct {
|
type headCache struct {
|
||||||
@ -406,7 +411,7 @@ func (c *headCache) read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (r *headCache) close() error {
|
func (r *headCache) Close() error {
|
||||||
for i := range r.bufs {
|
for i := range r.bufs {
|
||||||
r.bufs[i] = nil
|
r.bufs[i] = nil
|
||||||
}
|
}
|
||||||
@ -419,6 +424,7 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
|
|||||||
reader := r.readers[0]
|
reader := r.readers[0]
|
||||||
r.readers = r.readers[1:]
|
r.readers = r.readers[1:]
|
||||||
r.headCache = &headCache{readerCur: reader}
|
r.headCache = &headCache{readerCur: reader}
|
||||||
|
r.ss.Closers.Add(r.headCache)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,6 +455,18 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
|
||||||
|
readers := make([]readerutil.SizeReaderAt, 0, len(ss))
|
||||||
|
for _, s := range ss {
|
||||||
|
ra, err := NewReadAtSeeker(s, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize()))
|
||||||
|
}
|
||||||
|
return readerutil.NewMultiReaderAt(readers...), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
||||||
return r.ss
|
return r.ss
|
||||||
}
|
}
|
||||||
@ -559,9 +577,6 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *RangeReadReadAtSeeker) Close() error {
|
func (r *RangeReadReadAtSeeker) Close() error {
|
||||||
if r.headCache != nil {
|
|
||||||
_ = r.headCache.close()
|
|
||||||
}
|
|
||||||
return r.ss.Close()
|
return r.ss.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,11 +19,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
|
|||||||
}
|
}
|
||||||
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
|
rangeReaderFunc := func(ctx context.Context, r http_range.Range) (io.ReadCloser, error) {
|
||||||
if link.Concurrency != 0 || link.PartSize != 0 {
|
if link.Concurrency != 0 || link.PartSize != 0 {
|
||||||
requestHeader := ctx.Value("request_header")
|
header := net.ProcessHeader(nil, link.Header)
|
||||||
if requestHeader == nil {
|
|
||||||
requestHeader = &http.Header{}
|
|
||||||
}
|
|
||||||
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
|
|
||||||
down := net.NewDownloader(func(d *net.Downloader) {
|
down := net.NewDownloader(func(d *net.Downloader) {
|
||||||
d.Concurrency = link.Concurrency
|
d.Concurrency = link.Concurrency
|
||||||
d.PartSize = link.PartSize
|
d.PartSize = link.PartSize
|
||||||
@ -64,11 +60,7 @@ func GetRangeReadCloserFromLink(size int64, link *model.Link) (model.RangeReadCl
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
|
func RequestRangedHttp(ctx context.Context, link *model.Link, offset, length int64) (*http.Response, error) {
|
||||||
requestHeader := ctx.Value("request_header")
|
header := net.ProcessHeader(nil, link.Header)
|
||||||
if requestHeader == nil {
|
|
||||||
requestHeader = &http.Header{}
|
|
||||||
}
|
|
||||||
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
|
|
||||||
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
|
||||||
|
|
||||||
return net.RequestHttp(ctx, "GET", header, link.URL)
|
return net.RequestHttp(ctx, "GET", header, link.URL)
|
||||||
|
@ -45,7 +45,7 @@ func IsSubPath(path string, subPath string) bool {
|
|||||||
|
|
||||||
func Ext(path string) string {
|
func Ext(path string) string {
|
||||||
ext := stdpath.Ext(path)
|
ext := stdpath.Ext(path)
|
||||||
if strings.HasPrefix(ext, ".") {
|
if len(ext) > 0 && ext[0] == '.' {
|
||||||
ext = ext[1:]
|
ext = ext[1:]
|
||||||
}
|
}
|
||||||
return strings.ToLower(ext)
|
return strings.ToLower(ext)
|
||||||
|
@ -1,87 +1,25 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"maps"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/net"
|
"github.com/alist-org/alist/v3/internal/net"
|
||||||
"github.com/alist-org/alist/v3/internal/setting"
|
|
||||||
"github.com/alist-org/alist/v3/internal/stream"
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/microcosm-cc/bluemonday"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/yuin/goldmark"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func processMarkdown(content []byte) ([]byte, error) {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := goldmark.New().Convert(content, &buf); err != nil {
|
|
||||||
return nil, fmt.Errorf("markdown conversion failed: %w", err)
|
|
||||||
}
|
|
||||||
return bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
|
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
|
||||||
|
|
||||||
//优先处理md文件
|
|
||||||
if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
|
|
||||||
var markdownContent []byte
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if link.MFile != nil {
|
|
||||||
defer link.MFile.Close()
|
|
||||||
attachHeader(w, file)
|
|
||||||
markdownContent, err = io.ReadAll(link.MFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read markdown content: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
header := net.ProcessHeader(r.Header, link.Header)
|
|
||||||
res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
for h, v := range res.Header {
|
|
||||||
w.Header()[h] = v
|
|
||||||
}
|
|
||||||
w.WriteHeader(res.StatusCode)
|
|
||||||
if r.Method == http.MethodHead {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
markdownContent, err = io.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read markdown content: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
safeHTML, err := processMarkdown(markdownContent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
safeHTMLReader := bytes.NewReader(safeHTML)
|
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(int64(len(safeHTML)), 10))
|
|
||||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
|
||||||
_, err = utils.CopyWithBuffer(w, safeHTMLReader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if link.MFile != nil {
|
if link.MFile != nil {
|
||||||
defer link.MFile.Close()
|
defer link.MFile.Close()
|
||||||
attachHeader(w, file)
|
attachHeader(w, file)
|
||||||
@ -112,9 +50,9 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
|||||||
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
rangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
|
||||||
requestHeader := ctx.Value("request_header")
|
requestHeader := ctx.Value("request_header")
|
||||||
if requestHeader == nil {
|
if requestHeader == nil {
|
||||||
requestHeader = &http.Header{}
|
requestHeader = http.Header{}
|
||||||
}
|
}
|
||||||
header := net.ProcessHeader(*(requestHeader.(*http.Header)), link.Header)
|
header := net.ProcessHeader(requestHeader.(http.Header), link.Header)
|
||||||
down := net.NewDownloader(func(d *net.Downloader) {
|
down := net.NewDownloader(func(d *net.Downloader) {
|
||||||
d.Concurrency = link.Concurrency
|
d.Concurrency = link.Concurrency
|
||||||
d.PartSize = link.PartSize
|
d.PartSize = link.PartSize
|
||||||
@ -142,9 +80,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
|
|||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
|
|
||||||
for h, v := range res.Header {
|
maps.Copy(w.Header(), res.Header)
|
||||||
w.Header()[h] = v
|
|
||||||
}
|
|
||||||
w.WriteHeader(res.StatusCode)
|
w.WriteHeader(res.StatusCode)
|
||||||
if r.Method == http.MethodHead {
|
if r.Method == http.MethodHead {
|
||||||
return nil
|
return nil
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package handles
|
package handles
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/task"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
@ -208,14 +209,30 @@ func FsArchiveList(c *gin.Context) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type StringOrArray []string
|
||||||
|
|
||||||
|
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
|
||||||
|
var value string
|
||||||
|
if err := json.Unmarshal(data, &value); err == nil {
|
||||||
|
*s = []string{value}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var sliceValue []string
|
||||||
|
if err := json.Unmarshal(data, &sliceValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*s = sliceValue
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type ArchiveDecompressReq struct {
|
type ArchiveDecompressReq struct {
|
||||||
SrcDir string `json:"src_dir" form:"src_dir"`
|
SrcDir string `json:"src_dir" form:"src_dir"`
|
||||||
DstDir string `json:"dst_dir" form:"dst_dir"`
|
DstDir string `json:"dst_dir" form:"dst_dir"`
|
||||||
Name string `json:"name" form:"name"`
|
Name StringOrArray `json:"name" form:"name"`
|
||||||
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
ArchivePass string `json:"archive_pass" form:"archive_pass"`
|
||||||
InnerPath string `json:"inner_path" form:"inner_path"`
|
InnerPath string `json:"inner_path" form:"inner_path"`
|
||||||
CacheFull bool `json:"cache_full" form:"cache_full"`
|
CacheFull bool `json:"cache_full" form:"cache_full"`
|
||||||
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func FsArchiveDecompress(c *gin.Context) {
|
func FsArchiveDecompress(c *gin.Context) {
|
||||||
@ -229,41 +246,51 @@ func FsArchiveDecompress(c *gin.Context) {
|
|||||||
common.ErrorResp(c, errs.PermissionDenied, 403)
|
common.ErrorResp(c, errs.PermissionDenied, 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, req.Name))
|
srcPaths := make([]string, 0, len(req.Name))
|
||||||
if err != nil {
|
for _, name := range req.Name {
|
||||||
common.ErrorResp(c, err, 403)
|
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name))
|
||||||
return
|
if err != nil {
|
||||||
|
common.ErrorResp(c, err, 403)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srcPaths = append(srcPaths, srcPath)
|
||||||
}
|
}
|
||||||
dstDir, err := user.JoinPath(req.DstDir)
|
dstDir, err := user.JoinPath(req.DstDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 403)
|
common.ErrorResp(c, err, 403)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t, err := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
|
tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths))
|
||||||
ArchiveInnerArgs: model.ArchiveInnerArgs{
|
for _, srcPath := range srcPaths {
|
||||||
ArchiveArgs: model.ArchiveArgs{
|
t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
|
||||||
LinkArgs: model.LinkArgs{
|
ArchiveInnerArgs: model.ArchiveInnerArgs{
|
||||||
Header: c.Request.Header,
|
ArchiveArgs: model.ArchiveArgs{
|
||||||
Type: c.Query("type"),
|
LinkArgs: model.LinkArgs{
|
||||||
HttpReq: c.Request,
|
Header: c.Request.Header,
|
||||||
|
Type: c.Query("type"),
|
||||||
|
HttpReq: c.Request,
|
||||||
|
},
|
||||||
|
Password: req.ArchivePass,
|
||||||
},
|
},
|
||||||
Password: req.ArchivePass,
|
InnerPath: utils.FixAndCleanPath(req.InnerPath),
|
||||||
},
|
},
|
||||||
InnerPath: utils.FixAndCleanPath(req.InnerPath),
|
CacheFull: req.CacheFull,
|
||||||
},
|
PutIntoNewDir: req.PutIntoNewDir,
|
||||||
CacheFull: req.CacheFull,
|
})
|
||||||
PutIntoNewDir: req.PutIntoNewDir,
|
if e != nil {
|
||||||
})
|
if errors.Is(e, errs.WrongArchivePassword) {
|
||||||
if err != nil {
|
common.ErrorResp(c, e, 202)
|
||||||
if errors.Is(err, errs.WrongArchivePassword) {
|
} else {
|
||||||
common.ErrorResp(c, err, 202)
|
common.ErrorResp(c, e, 500)
|
||||||
} else {
|
}
|
||||||
common.ErrorResp(c, err, 500)
|
return
|
||||||
|
}
|
||||||
|
if t != nil {
|
||||||
|
tasks = append(tasks, t)
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
common.SuccessResp(c, gin.H{
|
common.SuccessResp(c, gin.H{
|
||||||
"task": getTaskInfo(t),
|
"task": getTaskInfos(tasks),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -376,7 +403,7 @@ func ArchiveInternalExtract(c *gin.Context) {
|
|||||||
func ArchiveExtensions(c *gin.Context) {
|
func ArchiveExtensions(c *gin.Context) {
|
||||||
var ext []string
|
var ext []string
|
||||||
for key := range tool.Tools {
|
for key := range tool.Tools {
|
||||||
ext = append(ext, strings.TrimPrefix(key, "."))
|
ext = append(ext, key)
|
||||||
}
|
}
|
||||||
common.SuccessResp(c, ext)
|
common.SuccessResp(c, ext)
|
||||||
}
|
}
|
||||||
|
@ -1,9 +1,12 @@
|
|||||||
package handles
|
package handles
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net/http"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
@ -15,7 +18,9 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/microcosm-cc/bluemonday"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/yuin/goldmark"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Down(c *gin.Context) {
|
func Down(c *gin.Context) {
|
||||||
@ -124,7 +129,34 @@ func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange boo
|
|||||||
if proxyRange {
|
if proxyRange {
|
||||||
common.ProxyRange(link, file.GetSize())
|
common.ProxyRange(link, file.GetSize())
|
||||||
}
|
}
|
||||||
err = common.Proxy(c.Writer, c.Request, link, file)
|
|
||||||
|
//优先处理md文件
|
||||||
|
if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
|
||||||
|
w := c.Writer
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, file.GetSize()))
|
||||||
|
err = common.Proxy(&proxyResponseWriter{ResponseWriter: w, Writer: buf}, c.Request, link, file)
|
||||||
|
if err == nil && buf.Len() > 0 {
|
||||||
|
if w.Status() < 200 || w.Status() > 300 {
|
||||||
|
w.Write(buf.Bytes())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var html bytes.Buffer
|
||||||
|
if err = goldmark.Convert(buf.Bytes(), &html); err != nil {
|
||||||
|
err = fmt.Errorf("markdown conversion failed: %w", err)
|
||||||
|
} else {
|
||||||
|
buf.Reset()
|
||||||
|
err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf)
|
||||||
|
if err == nil {
|
||||||
|
w.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10))
|
||||||
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||||
|
_, err = utils.CopyWithBuffer(c.Writer, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = common.Proxy(c.Writer, c.Request, link, file)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
common.ErrorResp(c, err, 500, true)
|
common.ErrorResp(c, err, 500, true)
|
||||||
return
|
return
|
||||||
@ -150,3 +182,12 @@ func canProxy(storage driver.Driver, filename string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type proxyResponseWriter struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pw *proxyResponseWriter) Write(p []byte) (int, error) {
|
||||||
|
return pw.Writer.Write(p)
|
||||||
|
}
|
||||||
|
@ -33,6 +33,8 @@ type DirReq struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ObjResp struct {
|
type ObjResp struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Path string `json:"path"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
IsDir bool `json:"is_dir"`
|
IsDir bool `json:"is_dir"`
|
||||||
@ -210,6 +212,8 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
|
|||||||
for _, obj := range objs {
|
for _, obj := range objs {
|
||||||
thumb, _ := model.GetThumb(obj)
|
thumb, _ := model.GetThumb(obj)
|
||||||
resp = append(resp, ObjResp{
|
resp = append(resp, ObjResp{
|
||||||
|
Id: obj.GetID(),
|
||||||
|
Path: obj.GetPath(),
|
||||||
Name: obj.GetName(),
|
Name: obj.GetName(),
|
||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
IsDir: obj.IsDir(),
|
IsDir: obj.IsDir(),
|
||||||
@ -326,6 +330,8 @@ func FsGet(c *gin.Context) {
|
|||||||
thumb, _ := model.GetThumb(obj)
|
thumb, _ := model.GetThumb(obj)
|
||||||
common.SuccessResp(c, FsGetResp{
|
common.SuccessResp(c, FsGetResp{
|
||||||
ObjResp: ObjResp{
|
ObjResp: ObjResp{
|
||||||
|
Id: obj.GetID(),
|
||||||
|
Path: obj.GetPath(),
|
||||||
Name: obj.GetName(),
|
Name: obj.GetName(),
|
||||||
Size: obj.GetSize(),
|
Size: obj.GetSize(),
|
||||||
IsDir: obj.IsDir(),
|
IsDir: obj.IsDir(),
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/server/common"
|
"github.com/alist-org/alist/v3/server/common"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SSHKeyAddReq struct {
|
type SSHKeyAddReq struct {
|
||||||
@ -30,7 +31,7 @@ func AddMyPublicKey(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
key := &model.SSHPublicKey{
|
key := &model.SSHPublicKey{
|
||||||
Title: req.Title,
|
Title: req.Title,
|
||||||
KeyStr: req.Key,
|
KeyStr: strings.TrimSpace(req.Key),
|
||||||
UserId: userObj.ID,
|
UserId: userObj.ID,
|
||||||
}
|
}
|
||||||
err, parsed := op.CreateSSHPublicKey(key)
|
err, parsed := op.CreateSSHPublicKey(key)
|
||||||
|
@ -113,11 +113,15 @@ func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s
|
|||||||
}
|
}
|
||||||
marshal := string(key.Marshal())
|
marshal := string(key.Marshal())
|
||||||
for _, sk := range keys {
|
for _, sk := range keys {
|
||||||
if marshal == sk.KeyStr {
|
if marshal != sk.KeyStr {
|
||||||
sk.LastUsedTime = time.Now()
|
pubKey, _, _, _, e := ssh.ParseAuthorizedKey([]byte(sk.KeyStr))
|
||||||
_ = op.UpdateSSHPublicKey(&sk)
|
if e != nil || marshal != string(pubKey.Marshal()) {
|
||||||
return nil, nil
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
sk.LastUsedTime = time.Now()
|
||||||
|
_ = op.UpdateSSHPublicKey(&sk)
|
||||||
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, errors.New("public key refused")
|
return nil, errors.New("public key refused")
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user