Compare commits

...

16 Commits

Author SHA1 Message Date
0cde4e73d6 feat(ipfs): better ipfs support (#8225)
* feat:  better ipfs support

fixed mfs crud, added ipns support

* Update driver.go

clean up
2025-03-27 23:25:23 +08:00
7b62dcb88c fix(baidu_netdisk): deplicate retry (#8210 redo #7972, link #8180) 2025-03-27 23:22:55 +08:00
c38dc6df7c fix(115_open): support multipart upload (#8229)
Co-authored-by: neverlee <neverlea@formail.com>
2025-03-27 23:22:08 +08:00
5668e4a4ea feat(doubao): add Doubao driver (#8232 closes #8020 #8206)
* feat(doubao): implement List()

* feat(doubao): implement Link()

* feat(doubao): implement MakeDir()

* refactor(doubao): add type Object to store key

* feat(doubao): implement Move()

* feat(doubao): implement Rename()

* feat(doubao): implement Remove()
2025-03-27 23:21:42 +08:00
1335f80362 feat(archive): support multipart archives (#8184 close #8015)
* feat(archive): multipart support & sevenzip tool

* feat(archive): rardecode tool

* feat(archive): support decompress multi-selected

* fix(archive): decompress response filter internal

* feat(archive): support multipart zip

* fix: more applicable AcceptedMultipartExtensions interface
2025-03-27 23:20:44 +08:00
704d3854df feat(alist_v3): support forward archive requests (#8230)
* feat(alist_v3): support forward archive requests

* fix: encode all inner path
2025-03-27 23:18:34 +08:00
44cc71d354 fix(cloudreve): enable SetContentLength for uploading to local policy (#8228 close #8174)
* fix(cloudreve): upload failure to return error msg instead of deletion success

* fix(cloudreve): enable SetContentLength for uploading to local policy

* refactor(cloudreve): move local policy upload logic to utils for better error handling

* refactor(cloudreve): unified upload code style

* refactor(cloudreve): improve user agent handling
2025-03-27 23:18:15 +08:00
9a9aee9ac6 feat(alias): support writing to non-ambiguous paths (#8216)
* feat(alias): support writing to non-ambiguous paths

* feat(alias): support extract concurrency

* fix(alias): extract url no pass query
2025-03-27 23:17:45 +08:00
4fcc3a187e fix(traffic): duplicate semaphore release when uploading (#8211 close #8180) 2025-03-27 23:15:47 +08:00
10a76c701d fix(db): support postgres trust/peer mode (#8198 close #8066) 2025-03-27 23:15:04 +08:00
6e13923225 fix(sftp-server): postgre cannot store control characters (#8188 close #8186) 2025-03-27 23:14:36 +08:00
32890da29f fix(115_open): upgrade 115-sdk-go dependency to v0.1.4 2025-03-21 19:06:09 +08:00
758554a40f fix(115_open): upgrade 115-sdk-go dependency to v0.1.3 (close #8169) 2025-03-19 21:47:42 +08:00
4563aea47e fix(115_open): rename delay to take effect (close #8156) 2025-03-18 22:25:04 +08:00
35d6f3b8fc fix(115_open): upgrade sdk (close #8151) 2025-03-18 22:21:50 +08:00
b4e6ab12d9 refactor: FilterReadMeScripts (#8154 close #8150)
* refactor: FilterReadMeScripts

* .
2025-03-18 22:02:33 +08:00
55 changed files with 2208 additions and 581 deletions

View File

@ -2,7 +2,6 @@ package _115_open
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
@ -16,7 +15,6 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
sdk "github.com/xhofe/115-sdk-go"
)
@ -149,6 +147,10 @@ func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string)
if err != nil {
return nil, err
}
obj, ok := srcObj.(*Obj)
if ok {
obj.Fn = newName
}
return srcObj, nil
}
@ -261,18 +263,7 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
return err
}
// 4. upload
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(resp.Bucket)
if err != nil {
return err
}
err = bucket.PutObject(resp.Object, tempF,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(resp.Callback.Value.CallbackVar))),
)
err = d.multpartUpload(ctx, tempF, file, up, tokenResp, resp)
if err != nil {
return err
}

140
drivers/115_open/upload.go Normal file
View File

@ -0,0 +1,140 @@
package _115_open
import (
"context"
"encoding/base64"
"io"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
sdk "github.com/xhofe/115-sdk-go"
)
func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * utils.MB
if fileSize > partSize {
if fileSize > 1*utils.TB { // file Size over 1TB
partSize = 5 * utils.GB // file part size 5GB
} else if fileSize > 768*utils.GB { // over 768GB
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
} else if fileSize > 512*utils.GB { // over 512GB
partSize = 82463373 // ≈ 78.6432MB
} else if fileSize > 384*utils.GB { // over 384GB
partSize = 54975582 // ≈ 52.4288MB
} else if fileSize > 256*utils.GB { // over 256GB
partSize = 41231687 // ≈ 39.3216MB
} else if fileSize > 128*utils.GB { // over 128GB
partSize = 27487791 // ≈ 26.2144MB
}
}
return partSize
}
func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(initResp.Bucket)
if err != nil {
return err
}
err = bucket.PutObject(initResp.Object, tempF,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
)
return err
}
// type CallbackResult struct {
// State bool `json:"state"`
// Code int `json:"code"`
// Message string `json:"message"`
// Data struct {
// PickCode string `json:"pick_code"`
// FileName string `json:"file_name"`
// FileSize int64 `json:"file_size"`
// FileID string `json:"file_id"`
// ThumbURL string `json:"thumb_url"`
// Sha1 string `json:"sha1"`
// Aid int `json:"aid"`
// Cid string `json:"cid"`
// } `json:"data"`
// }
func (d *Open115) multpartUpload(ctx context.Context, tempF model.File, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
fileSize := stream.GetSize()
chunkSize := calPartSize(fileSize)
ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
if err != nil {
return err
}
bucket, err := ossClient.Bucket(initResp.Bucket)
if err != nil {
return err
}
imur, err := bucket.InitiateMultipartUpload(initResp.Object, oss.Sequential())
if err != nil {
return err
}
partNum := (stream.GetSize() + chunkSize - 1) / chunkSize
parts := make([]oss.UploadPart, partNum)
offset := int64(0)
for i := int64(1); i <= partNum; i++ {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
partSize := chunkSize
if i == partNum {
partSize = fileSize - (i-1)*chunkSize
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
err = retry.Do(func() error {
_ = rd.Reset()
rateLimitedRd := driver.NewLimitedUploadStream(ctx, rd)
part, err := bucket.UploadPart(imur, rateLimitedRd, partSize, int(i))
if err != nil {
return err
}
parts[i-1] = part
return nil
},
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
if err != nil {
return err
}
if i == partNum {
offset = fileSize
} else {
offset += partSize
}
up(float64(offset) / float64(fileSize))
}
// callbackRespBytes := make([]byte, 1024)
_, err = bucket.CompleteMultipartUpload(
imur,
parts,
oss.Callback(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.Callback))),
oss.CallbackVar(base64.StdEncoding.EncodeToString([]byte(initResp.Callback.Value.CallbackVar))),
// oss.CallbackResult(&callbackRespBytes),
)
if err != nil {
return err
}
return nil
}

View File

@ -520,9 +520,6 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
byteData := make([]byte, sliceSize)
if i == count {
byteData = byteData[:lastPartSize]
@ -541,6 +538,9 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
partInfo := fmt.Sprintf("%d-%s", i, base64.StdEncoding.EncodeToString(md5Bytes))
threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
uploadUrls, err := y.GetMultiUploadUrls(ctx, isFamily, initMultiUpload.Data.UploadFileID, partInfo)
if err != nil {

View File

@ -3,6 +3,7 @@ package alias
import (
"context"
"errors"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/driver"
@ -126,8 +127,46 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, errs.ObjectNotFound
}
func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, parentDir, true)
if err == nil {
return fs.MakeDir(ctx, stdpath.Join(*reqPath, dirName))
}
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot make sub-dir")
}
return err
}
func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be moved")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be moved to")
}
if err != nil {
return err
}
return fs.Move(ctx, *srcPath, *dstPath)
}
func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
reqPath, err := d.getReqPath(ctx, srcObj)
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, srcObj, false)
if err == nil {
return fs.Rename(ctx, *reqPath, newName)
}
@ -137,8 +176,33 @@ func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) er
return err
}
func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be copied")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be copied to")
}
if err != nil {
return err
}
_, err = fs.Copy(ctx, *srcPath, *dstPath)
return err
}
func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
reqPath, err := d.getReqPath(ctx, obj)
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, obj, false)
if err == nil {
return fs.Remove(ctx, *reqPath)
}
@ -148,4 +212,110 @@ func (d *Alias) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
return fs.PutDirectly(ctx, *reqPath, s)
}
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be Put")
}
return err
}
func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error {
if !d.Writable {
return errs.PermissionDenied
}
reqPath, err := d.getReqPath(ctx, dstDir, true)
if err == nil {
return fs.PutURL(ctx, *reqPath, name, url)
}
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot offline download")
}
return err
}
func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
meta, err := d.getArchiveMeta(ctx, dst, sub, args)
if err == nil {
return meta, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
l, err := d.listArchive(ctx, dst, sub, args)
if err == nil {
return l, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// alias的两个驱动一个支持驱动提取一个不支持如何兼容
// 如果访问的是不支持驱动提取的驱动内的压缩文件GetArchiveMeta就会返回errs.NotImplement提取URL前缀就会是/aeExtract就不会被调用
// 如果访问的是支持驱动提取的驱动内的压缩文件GetArchiveMeta就会返回有效值提取URL前缀就会是/adExtract就会被调用
root, sub := d.getRootAndPath(obj.GetPath())
dsts, ok := d.pathMap[root]
if !ok {
return nil, errs.ObjectNotFound
}
for _, dst := range dsts {
link, err := d.extract(ctx, dst, sub, args)
if err == nil {
if !args.Redirect && len(link.URL) > 0 {
if d.DownloadConcurrency > 0 {
link.Concurrency = d.DownloadConcurrency
}
if d.DownloadPartSize > 0 {
link.PartSize = d.DownloadPartSize * utils.KB
}
}
return link, nil
}
}
return nil, errs.NotImplement
}
func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
if !d.Writable {
return errs.PermissionDenied
}
srcPath, err := d.getReqPath(ctx, srcObj, false)
if errs.IsNotImplement(err) {
return errors.New("same-name files cannot be decompressed")
}
if err != nil {
return err
}
dstPath, err := d.getReqPath(ctx, dstDir, true)
if errs.IsNotImplement(err) {
return errors.New("same-name dirs cannot be decompressed to")
}
if err != nil {
return err
}
_, err = fs.ArchiveDecompress(ctx, *srcPath, *dstPath, args)
return err
}
var _ driver.Driver = (*Alias)(nil)

View File

@ -13,13 +13,14 @@ type Addition struct {
ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"`
DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"`
DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"`
Writable bool `json:"writable" type:"bool" default:"false"`
}
var config = driver.Config{
Name: "Alias",
LocalSort: true,
NoCache: true,
NoUpload: true,
NoUpload: false,
DefaultRoot: "/",
ProxyRangeOption: true,
}

View File

@ -3,9 +3,11 @@ package alias
import (
"context"
"fmt"
"net/url"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
@ -125,9 +127,9 @@ func (d *Alias) link(ctx context.Context, dst, sub string, args model.LinkArgs)
return link, err
}
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error) {
func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) (*string, error) {
root, sub := d.getRootAndPath(obj.GetPath())
if sub == "" {
if sub == "" && !isParent {
return nil, errs.NotSupport
}
dsts, ok := d.pathMap[root]
@ -156,3 +158,68 @@ func (d *Alias) getReqPath(ctx context.Context, obj model.Obj) (*string, error)
}
return reqPath, nil
}
func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
return op.GetArchiveMeta(ctx, storage, reqActualPath, model.ArchiveMetaArgs{
ArchiveArgs: args,
Refresh: true,
})
}
return nil, errs.NotImplement
}
func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
return op.ListArchive(ctx, storage, reqActualPath, model.ArchiveListArgs{
ArchiveInnerArgs: args,
Refresh: true,
})
}
return nil, errs.NotImplement
}
func (d *Alias) extract(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) (*model.Link, error) {
reqPath := stdpath.Join(dst, sub)
storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath)
if err != nil {
return nil, err
}
if _, ok := storage.(driver.ArchiveReader); ok {
if _, ok := storage.(*Alias); !ok && !args.Redirect {
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
_, err = fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true})
if err != nil {
return nil, err
}
if common.ShouldProxy(storage, stdpath.Base(sub)) {
link := &model.Link{
URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s",
common.GetApiUrl(args.HttpReq),
utils.EncodePath(reqPath, true),
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
sign.SignArchive(reqPath)),
}
if args.HttpReq != nil && d.ProxyRange {
link.RangeReadCloser = common.NoProxyRange
}
return link, nil
}
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
return nil, errs.NotImplement
}

View File

@ -5,12 +5,14 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
@ -34,7 +36,7 @@ func (d *AListV3) GetAddition() driver.Additional {
func (d *AListV3) Init(ctx context.Context) error {
d.Addition.Address = strings.TrimSuffix(d.Addition.Address, "/")
var resp common.Resp[MeResp]
_, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
_, _, err := d.request("/me", http.MethodGet, func(req *resty.Request) {
req.SetResult(&resp)
})
if err != nil {
@ -48,15 +50,15 @@ func (d *AListV3) Init(ctx context.Context) error {
}
}
// re-get the user info
_, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
_, _, err = d.request("/me", http.MethodGet, func(req *resty.Request) {
req.SetResult(&resp)
})
if err != nil {
return err
}
if resp.Data.Role == model.GUEST {
url := d.Address + "/api/public/settings"
res, err := base.RestyClient.R().Get(url)
u := d.Address + "/api/public/settings"
res, err := base.RestyClient.R().Get(u)
if err != nil {
return err
}
@ -74,7 +76,7 @@ func (d *AListV3) Drop(ctx context.Context) error {
func (d *AListV3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var resp common.Resp[FsListResp]
_, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ListReq{
PageReq: model.PageReq{
Page: 1,
@ -116,7 +118,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
userAgent = base.UserAgent
}
}
_, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/get", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(FsGetReq{
Path: file.GetPath(),
Password: d.MetaPassword,
@ -131,7 +133,7 @@ func (d *AListV3) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
}
func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
_, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/mkdir", http.MethodPost, func(req *resty.Request) {
req.SetBody(MkdirOrLinkReq{
Path: path.Join(parentDir.GetPath(), dirName),
})
@ -140,7 +142,7 @@ func (d *AListV3) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri
}
func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/move", http.MethodPost, func(req *resty.Request) {
req.SetBody(MoveCopyReq{
SrcDir: path.Dir(srcObj.GetPath()),
DstDir: dstDir.GetPath(),
@ -151,7 +153,7 @@ func (d *AListV3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
_, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/rename", http.MethodPost, func(req *resty.Request) {
req.SetBody(RenameReq{
Path: srcObj.GetPath(),
Name: newName,
@ -161,7 +163,7 @@ func (d *AListV3) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
_, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/copy", http.MethodPost, func(req *resty.Request) {
req.SetBody(MoveCopyReq{
SrcDir: path.Dir(srcObj.GetPath()),
DstDir: dstDir.GetPath(),
@ -172,7 +174,7 @@ func (d *AListV3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
}
func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
_, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/fs/remove", http.MethodPost, func(req *resty.Request) {
req.SetBody(RemoveReq{
Dir: path.Dir(obj.GetPath()),
Names: []string{obj.GetName()},
@ -232,6 +234,127 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreame
return nil
}
func (d *AListV3) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveMetaResp]
_, code, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var tree []model.ObjTree
if resp.Data.Content != nil {
tree = make([]model.ObjTree, 0, len(resp.Data.Content))
for _, content := range resp.Data.Content {
tree = append(tree, &content)
}
}
return &model.ArchiveMetaInfo{
Comment: resp.Data.Comment,
Encrypted: resp.Data.Encrypted,
Tree: tree,
}, nil
}
func (d *AListV3) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotImplement
}
var resp common.Resp[ArchiveListResp]
_, code, err := d.request("/fs/archive/list", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveListReq{
ArchiveMetaReq: ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
},
PageReq: model.PageReq{
Page: 1,
PerPage: 0,
},
InnerPath: args.InnerPath,
})
})
if code == 202 {
return nil, errs.WrongArchivePassword
}
if err != nil {
return nil, err
}
var files []model.Obj
for _, f := range resp.Data.Content {
file := model.ObjThumb{
Object: model.Object{
Name: f.Name,
Modified: f.Modified,
Ctime: f.Created,
Size: f.Size,
IsFolder: f.IsDir,
HashInfo: utils.FromString(f.HashInfo),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumb},
}
files = append(files, &file)
}
return files, nil
}
func (d *AListV3) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
if !d.ForwardArchiveReq {
return nil, errs.NotSupport
}
var resp common.Resp[ArchiveMetaResp]
_, _, err := d.request("/fs/archive/meta", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(ArchiveMetaReq{
ArchivePass: args.Password,
Password: d.MetaPassword,
Path: obj.GetPath(),
Refresh: false,
})
})
if err != nil {
return nil, err
}
return &model.Link{
URL: fmt.Sprintf("%s?inner=%s&pass=%s&sign=%s",
resp.Data.RawURL,
utils.EncodePath(args.InnerPath, true),
url.QueryEscape(args.Password),
resp.Data.Sign),
}, nil
}
func (d *AListV3) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error {
if !d.ForwardArchiveReq {
return errs.NotImplement
}
dir, name := path.Split(srcObj.GetPath())
_, _, err := d.request("/fs/archive/decompress", http.MethodPost, func(req *resty.Request) {
req.SetBody(DecompressReq{
ArchivePass: args.Password,
CacheFull: args.CacheFull,
DstDir: dstDir.GetPath(),
InnerPath: args.InnerPath,
Name: []string{name},
PutIntoNewDir: args.PutIntoNewDir,
SrcDir: dir,
})
})
return err
}
//func (d *AList) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}

View File

@ -7,12 +7,13 @@ import (
type Addition struct {
driver.RootPath
Address string `json:"url" required:"true"`
MetaPassword string `json:"meta_password"`
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
Address string `json:"url" required:"true"`
MetaPassword string `json:"meta_password"`
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
PassUAToUpsteam bool `json:"pass_ua_to_upsteam" default:"true"`
ForwardArchiveReq bool `json:"forward_archive_requests" default:"true"`
}
var config = driver.Config{

View File

@ -4,6 +4,7 @@ import (
"time"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
)
type ListReq struct {
@ -81,3 +82,89 @@ type MeResp struct {
SsoId string `json:"sso_id"`
Otp bool `json:"otp"`
}
type ArchiveMetaReq struct {
ArchivePass string `json:"archive_pass"`
Password string `json:"password"`
Path string `json:"path"`
Refresh bool `json:"refresh"`
}
type TreeResp struct {
ObjResp
Children []TreeResp `json:"children"`
hashCache *utils.HashInfo
}
func (t *TreeResp) GetSize() int64 {
return t.Size
}
func (t *TreeResp) GetName() string {
return t.Name
}
func (t *TreeResp) ModTime() time.Time {
return t.Modified
}
func (t *TreeResp) CreateTime() time.Time {
return t.Created
}
func (t *TreeResp) IsDir() bool {
return t.ObjResp.IsDir
}
func (t *TreeResp) GetHash() utils.HashInfo {
return utils.FromString(t.HashInfo)
}
func (t *TreeResp) GetID() string {
return ""
}
func (t *TreeResp) GetPath() string {
return ""
}
func (t *TreeResp) GetChildren() []model.ObjTree {
ret := make([]model.ObjTree, 0, len(t.Children))
for _, child := range t.Children {
ret = append(ret, &child)
}
return ret
}
func (t *TreeResp) Thumb() string {
return t.ObjResp.Thumb
}
type ArchiveMetaResp struct {
Comment string `json:"comment"`
Encrypted bool `json:"encrypted"`
Content []TreeResp `json:"content"`
RawURL string `json:"raw_url"`
Sign string `json:"sign"`
}
type ArchiveListReq struct {
model.PageReq
ArchiveMetaReq
InnerPath string `json:"inner_path"`
}
type ArchiveListResp struct {
Content []ObjResp `json:"content"`
Total int64 `json:"total"`
}
type DecompressReq struct {
ArchivePass string `json:"archive_pass"`
CacheFull bool `json:"cache_full"`
DstDir string `json:"dst_dir"`
InnerPath string `json:"inner_path"`
Name []string `json:"name"`
PutIntoNewDir bool `json:"put_into_new_dir"`
SrcDir string `json:"src_dir"`
}

View File

@ -17,7 +17,7 @@ func (d *AListV3) login() error {
return nil
}
var resp common.Resp[LoginResp]
_, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
_, _, err := d.request("/auth/login", http.MethodPost, func(req *resty.Request) {
req.SetResult(&resp).SetBody(base.Json{
"username": d.Username,
"password": d.Password,
@ -31,7 +31,7 @@ func (d *AListV3) login() error {
return nil
}
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, error) {
func (d *AListV3) request(api, method string, callback base.ReqCallback, retry ...bool) ([]byte, int, error) {
url := d.Address + "/api" + api
req := base.RestyClient.R()
req.SetHeader("Authorization", d.Token)
@ -40,22 +40,26 @@ func (d *AListV3) request(api, method string, callback base.ReqCallback, retry .
}
res, err := req.Execute(method, url)
if err != nil {
return nil, err
code := 0
if res != nil {
code = res.StatusCode()
}
return nil, code, err
}
log.Debugf("[alist_v3] response body: %s", res.String())
if res.StatusCode() >= 400 {
return nil, fmt.Errorf("request failed, status: %s", res.Status())
return nil, res.StatusCode(), fmt.Errorf("request failed, status: %s", res.Status())
}
code := utils.Json.Get(res.Body(), "code").ToInt()
if code != 200 {
if (code == 401 || code == 403) && !utils.IsBool(retry...) {
err = d.login()
if err != nil {
return nil, err
return nil, code, err
}
return d.request(api, method, callback, true)
}
return nil, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
return nil, code, fmt.Errorf("request failed,code: %d, message: %s", code, utils.Json.Get(res.Body(), "message").ToString())
}
return res.Body(), nil
return res.Body(), 200, nil
}

View File

@ -22,6 +22,7 @@ import (
_ "github.com/alist-org/alist/v3/drivers/chaoxing"
_ "github.com/alist-org/alist/v3/drivers/cloudreve"
_ "github.com/alist-org/alist/v3/drivers/crypt"
_ "github.com/alist-org/alist/v3/drivers/doubao"
_ "github.com/alist-org/alist/v3/drivers/dropbox"
_ "github.com/alist-org/alist/v3/drivers/febbox"
_ "github.com/alist-org/alist/v3/drivers/ftp"

View File

@ -20,6 +20,7 @@ import (
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/errgroup"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/avast/retry-go"
log "github.com/sirupsen/logrus"
)
@ -260,21 +261,24 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
}
}
// step.2 上传分片
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread)
threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
retry.Attempts(1),
retry.Delay(time.Second),
retry.DelayType(retry.BackOffDelay))
sem := semaphore.NewWeighted(3)
for i, partseq := range precreateResp.BlockList {
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*sliceSize, sliceSize
if partseq+1 == count {
byteSize = lastBlockSize
}
threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
params := map[string]string{
"method": "upload",

View File

@ -321,9 +321,6 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, partseq, offset, byteSize := i, partseq, int64(partseq)*DEFAULT, DEFAULT
if partseq+1 == count {
@ -331,6 +328,9 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
}
threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
uploadParams := map[string]string{
"method": "upload",

View File

@ -1,13 +1,10 @@
package cloudreve
import (
"bytes"
"context"
"errors"
"io"
"net/http"
"path"
"strconv"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
@ -168,39 +165,13 @@ func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.File
case "remote": // 从机存储
err = d.upRemote(ctx, stream, u, up)
case "local": // 本机存储
var chunkSize = u.ChunkSize
var buf []byte
var chunk int
for {
var n int
buf = make([]byte, chunkSize)
n, err = io.ReadAtLeast(stream, buf, chunkSize)
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
if err == io.EOF {
return nil
}
return err
}
if n == 0 {
break
}
buf = buf[:n]
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetHeader("Content-Length", strconv.Itoa(n))
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewReader(buf)))
}, nil)
if err != nil {
break
}
chunk++
}
err = d.upLocal(ctx, stream, u, up)
default:
err = errs.NotImplement
}
if err != nil {
// 删除失败的会话
err = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
_ = d.request(http.MethodDelete, "/file/upload/"+u.SessionID, nil, nil)
return err
}
return nil

View File

@ -27,17 +27,20 @@ import (
const loginPath = "/user/session"
func (d *Cloudreve) getUA() string {
if d.CustomUA != "" {
return d.CustomUA
}
return base.UserAgent
}
func (d *Cloudreve) request(method string, path string, callback base.ReqCallback, out interface{}) error {
u := d.Address + "/api/v3" + path
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.RestyClient.R()
req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "application/json, text/plain, */*",
"User-Agent": ua,
"User-Agent": d.getUA(),
})
var r Resp
@ -161,15 +164,11 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
if !d.Addition.EnableThumbAndFolderSize {
return model.Thumbnail{}, nil
}
ua := d.CustomUA
if ua == "" {
ua = base.UserAgent
}
req := base.NoRedirectClient.R()
req.SetHeaders(map[string]string{
"Cookie": "cloudreve-session=" + d.Cookie,
"Accept": "image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"User-Agent": ua,
"User-Agent": d.getUA(),
})
resp, err := req.Execute(http.MethodGet, d.Address+"/api/v3/file/thumb/"+file.Id)
if err != nil {
@ -180,6 +179,43 @@ func (d *Cloudreve) GetThumb(file Object) (model.Thumbnail, error) {
}, nil
}
func (d *Cloudreve) upLocal(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
var finish int64 = 0
var chunk int = 0
DEFAULT := int64(u.ChunkSize)
for finish < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
utils.Log.Debugf("[Cloudreve-Local] upload: %d", finish)
var byteSize = DEFAULT
left := stream.GetSize() - finish
if left < DEFAULT {
byteSize = left
}
byteData := make([]byte, byteSize)
n, err := io.ReadFull(stream, byteData)
utils.Log.Debug(err, n)
if err != nil {
return err
}
err = d.request(http.MethodPost, "/file/upload/"+u.SessionID+"/"+strconv.Itoa(chunk), func(req *resty.Request) {
req.SetHeader("Content-Type", "application/octet-stream")
req.SetContentLength(true)
req.SetHeader("Content-Length", strconv.FormatInt(byteSize, 10))
req.SetHeader("User-Agent", d.getUA())
req.SetBody(driver.NewLimitedUploadStream(ctx, bytes.NewBuffer(byteData)))
}, nil)
if err != nil {
break
}
finish += byteSize
up(float64(finish) * 100 / float64(stream.GetSize()))
chunk++
}
return nil
}
func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u UploadInfo, up driver.UpdateProgress) error {
uploadUrl := u.UploadURLs[0]
credential := u.Credential
@ -211,6 +247,7 @@ func (d *Cloudreve) upRemote(ctx context.Context, stream model.FileStreamer, u U
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Authorization", fmt.Sprint(credential))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {
@ -251,6 +288,7 @@ func (d *Cloudreve) upOneDrive(ctx context.Context, stream model.FileStreamer, u
req.ContentLength = byteSize
// req.Header.Set("Content-Length", strconv.Itoa(int(byteSize)))
req.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", finish, finish+byteSize-1, stream.GetSize()))
req.Header.Set("User-Agent", d.getUA())
finish += byteSize
res, err := base.HttpClient.Do(req)
if err != nil {

174
drivers/doubao/driver.go Normal file
View File

@ -0,0 +1,174 @@
package doubao
import (
"context"
"errors"
"time"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
)
type Doubao struct {
model.Storage
Addition
}
func (d *Doubao) Config() driver.Config {
return config
}
func (d *Doubao) GetAddition() driver.Additional {
return &d.Addition
}
func (d *Doubao) Init(ctx context.Context) error {
// TODO login / refresh token
//op.MustSaveDriverStorage(d)
return nil
}
func (d *Doubao) Drop(ctx context.Context) error {
return nil
}
func (d *Doubao) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
var files []model.Obj
var r NodeInfoResp
_, err := d.request("/samantha/aispace/node_info", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"node_id": dir.GetID(),
"need_full_path": false,
})
}, &r)
if err != nil {
return nil, err
}
for _, child := range r.Data.Children {
files = append(files, &Object{
Object: model.Object{
ID: child.ID,
Path: child.ParentID,
Name: child.Name,
Size: int64(child.Size),
Modified: time.Unix(int64(child.UpdateTime), 0),
Ctime: time.Unix(int64(child.CreateTime), 0),
IsFolder: child.NodeType == 1,
},
Key: child.Key,
})
}
return files, nil
}
func (d *Doubao) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if u, ok := file.(*Object); ok {
var r GetFileUrlResp
_, err := d.request("/alice/message/get_file_url", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"uris": []string{u.Key},
"type": "file",
})
}, &r)
if err != nil {
return nil, err
}
return &model.Link{
URL: r.Data.FileUrls[0].MainURL,
}, nil
}
return nil, errors.New("can't convert obj to URL")
}
func (d *Doubao) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
var r UploadNodeResp
_, err := d.request("/samantha/aispace/upload_node", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"node_list": []base.Json{
{
"local_id": uuid.New().String(),
"name": dirName,
"parent_id": parentDir.GetID(),
"node_type": 1,
},
},
})
}, &r)
return err
}
func (d *Doubao) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
var r UploadNodeResp
_, err := d.request("/samantha/aispace/move_node", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"node_list": []base.Json{
{"id": srcObj.GetID()},
},
"current_parent_id": srcObj.GetPath(),
"target_parent_id": dstDir.GetID(),
})
}, &r)
return err
}
func (d *Doubao) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
var r BaseResp
_, err := d.request("/samantha/aispace/rename_node", "POST", func(req *resty.Request) {
req.SetBody(base.Json{
"node_id": srcObj.GetID(),
"node_name": newName,
})
}, &r)
return err
}
func (d *Doubao) Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error) {
// TODO copy obj, optional
return nil, errs.NotImplement
}
func (d *Doubao) Remove(ctx context.Context, obj model.Obj) error {
var r BaseResp
_, err := d.request("/samantha/aispace/delete_node", "POST", func(req *resty.Request) {
req.SetBody(base.Json{"node_list": []base.Json{{"id": obj.GetID()}}})
}, &r)
return err
}
func (d *Doubao) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// TODO upload file, optional
return nil, errs.NotImplement
}
func (d *Doubao) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Doubao) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Doubao) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
return nil, errs.NotImplement
}
func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
// return errs.NotImplement to use an internal archive tool
return nil, errs.NotImplement
}
//func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
// return nil, errs.NotSupport
//}
var _ driver.Driver = (*Doubao)(nil)

34
drivers/doubao/meta.go Normal file
View File

@ -0,0 +1,34 @@
package doubao
import (
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/op"
)
type Addition struct {
// Usually one of two
// driver.RootPath
driver.RootID
// define other
Cookie string `json:"cookie" type:"text"`
}
var config = driver.Config{
Name: "Doubao",
LocalSort: true,
OnlyLocal: false,
OnlyProxy: false,
NoCache: false,
NoUpload: true,
NeedMs: false,
DefaultRoot: "0",
CheckStatus: false,
Alert: "",
NoOverwriteUpload: false,
}
func init() {
op.RegisterDriver(func() driver.Driver {
return &Doubao{}
})
}

64
drivers/doubao/types.go Normal file
View File

@ -0,0 +1,64 @@
package doubao
import "github.com/alist-org/alist/v3/internal/model"
type BaseResp struct {
Code int `json:"code"`
Msg string `json:"msg"`
}
type NodeInfoResp struct {
BaseResp
Data struct {
NodeInfo NodeInfo `json:"node_info"`
Children []NodeInfo `json:"children"`
NextCursor string `json:"next_cursor"`
HasMore bool `json:"has_more"`
} `json:"data"`
}
type NodeInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
Size int `json:"size"`
Source int `json:"source"`
NameReviewStatus int `json:"name_review_status"`
ContentReviewStatus int `json:"content_review_status"`
RiskReviewStatus int `json:"risk_review_status"`
ConversationID string `json:"conversation_id"`
ParentID string `json:"parent_id"`
CreateTime int `json:"create_time"`
UpdateTime int `json:"update_time"`
}
type GetFileUrlResp struct {
BaseResp
Data struct {
FileUrls []struct {
URI string `json:"uri"`
MainURL string `json:"main_url"`
BackURL string `json:"back_url"`
} `json:"file_urls"`
} `json:"data"`
}
type UploadNodeResp struct {
BaseResp
Data struct {
NodeList []struct {
LocalID string `json:"local_id"`
ID string `json:"id"`
ParentID string `json:"parent_id"`
Name string `json:"name"`
Key string `json:"key"`
NodeType int `json:"node_type"` // 0: 文件, 1: 文件夹
} `json:"node_list"`
} `json:"data"`
}
type Object struct {
model.Object
Key string
}

38
drivers/doubao/util.go Normal file
View File

@ -0,0 +1,38 @@
package doubao
import (
"errors"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/utils"
log "github.com/sirupsen/logrus"
)
// do others that not defined in Driver interface
func (d *Doubao) request(path string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
url := "https://www.doubao.com" + path
req := base.RestyClient.R()
req.SetHeader("Cookie", d.Cookie)
if callback != nil {
callback(req)
}
var r BaseResp
req.SetResult(&r)
res, err := req.Execute(method, url)
log.Debugln(res.String())
if err != nil {
return nil, err
}
// 业务状态码检查优先于HTTP状态码
if r.Code != 0 {
return res.Body(), errors.New(r.Msg)
}
if resp != nil {
err = utils.Json.Unmarshal(res.Body(), resp)
if err != nil {
return nil, err
}
}
return res.Body(), nil
}

View File

@ -4,13 +4,13 @@ import (
"context"
"fmt"
"net/url"
stdpath "path"
"path/filepath"
"strings"
shell "github.com/ipfs/go-ipfs-api"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
shell "github.com/ipfs/go-ipfs-api"
)
type IPFS struct {
@ -44,27 +44,32 @@ func (d *IPFS) Drop(ctx context.Context) error {
func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
path := dir.GetPath()
if path[len(path):] != "/" {
path += "/"
switch d.Mode {
case "ipfs":
path, _ = url.JoinPath("/ipfs", path)
case "ipns":
path, _ = url.JoinPath("/ipns", path)
case "mfs":
fileStat, err := d.sh.FilesStat(ctx, path)
if err != nil {
return nil, err
}
path, _ = url.JoinPath("/ipfs", fileStat.Hash)
default:
return nil, fmt.Errorf("mode error")
}
path_cid, err := d.sh.FilesStat(ctx, path)
if err != nil {
return nil, err
}
dirs, err := d.sh.List(path_cid.Hash)
dirs, err := d.sh.List(path)
if err != nil {
return nil, err
}
objlist := []model.Obj{}
for _, file := range dirs {
gateurl := *d.gateURL
gateurl.Path = "ipfs/" + file.Hash
gateurl := *d.gateURL.JoinPath("/ipfs/" + file.Hash)
gateurl.RawQuery = "filename=" + url.PathEscape(file.Name)
objlist = append(objlist, &model.ObjectURL{
Object: model.Object{ID: file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
Object: model.Object{ID: "/ipfs/" + file.Hash, Name: file.Name, Size: int64(file.Size), IsFolder: file.Type == 1},
Url: model.Url{Url: gateurl.String()},
})
}
@ -73,11 +78,15 @@ func (d *IPFS) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
}
func (d *IPFS) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
link := d.Gateway + "/ipfs/" + file.GetID() + "/?filename=" + url.PathEscape(file.GetName())
return &model.Link{URL: link}, nil
gateurl := d.gateURL.JoinPath(file.GetID())
gateurl.RawQuery = "filename=" + url.PathEscape(file.GetName())
return &model.Link{URL: gateurl.String()}, nil
}
func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
path := parentDir.GetPath()
if path[len(path):] != "/" {
path += "/"
@ -86,42 +95,48 @@ func (d *IPFS) MakeDir(ctx context.Context, parentDir model.Obj, dirName string)
}
func (d *IPFS) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
return d.sh.FilesMv(ctx, srcObj.GetPath(), dstDir.GetPath())
}
func (d *IPFS) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
newFileName := filepath.Dir(srcObj.GetPath()) + "/" + newName
return d.sh.FilesMv(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
}
func (d *IPFS) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
// TODO copy obj, optional
fmt.Println(srcObj.GetPath())
fmt.Println(dstDir.GetPath())
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
newFileName := dstDir.GetPath() + "/" + filepath.Base(srcObj.GetPath())
fmt.Println(newFileName)
return d.sh.FilesCp(ctx, srcObj.GetPath(), strings.ReplaceAll(newFileName, "\\", "/"))
}
func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
// TODO remove obj, optional
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
return d.sh.FilesRm(ctx, obj.GetPath(), true)
}
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
// TODO upload file, optional
_, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
if d.Mode != "mfs" {
return fmt.Errorf("only write in mfs mode")
}
outHash, err := d.sh.Add(driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
}), ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
return err
}
func ToFiles(dstDir string) shell.AddOpts {
return func(rb *shell.RequestBuilder) error {
rb.Option("to-files", dstDir)
return nil
}))
if err != nil {
return err
}
err = d.sh.FilesCp(ctx, "/ipfs/"+outHash, dstDir.GetPath()+"/"+strings.ReplaceAll(s.GetName(), "\\", "/"))
return err
}
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -8,14 +8,16 @@ import (
type Addition struct {
// Usually one of two
driver.RootPath
Mode string `json:"mode" options:"ipfs,ipns,mfs" type:"select" required:"true"`
Endpoint string `json:"endpoint" default:"http://127.0.0.1:5001"`
Gateway string `json:"gateway" default:"https://ipfs.io"`
Gateway string `json:"gateway" default:"http://127.0.0.1:8080"`
}
var config = driver.Config{
Name: "IPFS API",
DefaultRoot: "/",
LocalSort: true,
OnlyProxy: false,
}
func init() {

View File

@ -315,9 +315,6 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
if utils.IsCanceled(upCtx) {
break
}
if err = sem.Acquire(ctx, 1); err != nil {
break
}
i, part, byteSize := i, part, initUpdload.PartSize
if part.PartNumber == uploadPartData.PartTotal {
byteSize = initUpdload.LastPartSize
@ -325,6 +322,9 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
// step.4
threadG.Go(func(ctx context.Context) error {
if err = sem.Acquire(ctx, 1); err != nil {
return err
}
defer sem.Release(1)
reader := io.NewSectionReader(file, int64(part.PartNumber-1)*initUpdload.PartSize, byteSize)
req, err := part.NewRequest(ctx, driver.NewLimitedUploadStream(ctx, reader))

View File

@ -316,7 +316,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
// if the file already exists in Quqi server, there is no need to actually upload it
if uploadInitResp.Data.Exist {
// the file name returned by Quqi does not include the extension name
nodeName, nodeExt := uploadInitResp.Data.NodeName, rawExt(stream.GetName())
nodeName, nodeExt := uploadInitResp.Data.NodeName, utils.Ext(stream.GetName())
if nodeExt != "" {
nodeName = nodeName + "." + nodeExt
}
@ -432,7 +432,7 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
return nil, err
}
// the file name returned by Quqi does not include the extension name
nodeName, nodeExt := uploadFinishResp.Data.NodeName, rawExt(stream.GetName())
nodeName, nodeExt := uploadFinishResp.Data.NodeName, utils.Ext(stream.GetName())
if nodeExt != "" {
nodeName = nodeName + "." + nodeExt
}

View File

@ -9,7 +9,6 @@ import (
"io"
"net/http"
"net/url"
stdpath "path"
"strings"
"time"
@ -115,16 +114,6 @@ func (d *Quqi) checkLogin() bool {
return true
}
// rawExt 保留扩展名大小写
func rawExt(name string) string {
ext := stdpath.Ext(name)
if strings.HasPrefix(ext, ".") {
ext = ext[1:]
}
return ext
}
// decryptKey 获取密码
func decryptKey(encodeKey string) []byte {
// 移除非法字符

View File

@ -8,9 +8,7 @@ import (
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
@ -151,7 +149,7 @@ func (d *Vtencent) ApplyUploadUGC(signature string, stream model.FileStreamer) (
form := base.Json{
"signature": signature,
"videoName": stream.GetName(),
"videoType": strings.ReplaceAll(path.Ext(stream.GetName()), ".", ""),
"videoType": utils.Ext(stream.GetName()),
"videoSize": stream.GetSize(),
}
var resps RspApplyUploadUGC

8
go.mod
View File

@ -85,7 +85,7 @@ require (
github.com/blevesearch/go-faiss v1.0.20 // indirect
github.com/blevesearch/zapx/v16 v16.1.5 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.0 // indirect
github.com/bodgit/sevenzip v1.6.0
github.com/bodgit/windows v1.0.1 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/charmbracelet/x/ansi v0.2.3 // indirect
@ -106,14 +106,14 @@ require (
github.com/kr/text v0.2.0 // indirect
github.com/matoous/go-nanoid/v2 v2.1.0 // indirect
github.com/microcosm-cc/bluemonday v1.0.27
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78
github.com/sorairolake/lzip-go v0.3.5 // indirect
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/xhofe/115-sdk-go v0.1.1
github.com/xhofe/115-sdk-go v0.1.4
github.com/yuin/goldmark v1.7.8
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
go4.org v0.0.0-20230225012048-214862532bf5
resty.dev/v3 v3.0.0-beta.2 // indirect
)

4
go.sum
View File

@ -606,8 +606,8 @@ github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5 h1:jxZvjx8Ve5sOXo
github.com/winfsp/cgofuse v1.5.1-0.20230130140708-f87f5db493b5/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhofe/115-sdk-go v0.1.1 h1:eMQIuCyhWZHQApqdCIt7bTA3S5MYQnANeLJbWYSDv6A=
github.com/xhofe/115-sdk-go v0.1.1/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/115-sdk-go v0.1.4 h1:erIWuWH+kZQOEHM+YZK8Y6sWQ2s/SFJIFh/WeCtjiiY=
github.com/xhofe/115-sdk-go v0.1.4/go.mod h1:MIdpe/4Kw4ODrPld7E11bANc4JsCuXcm5ZZBHSiOI0U=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25 h1:eDfebW/yfq9DtG9RO3KP7BT2dot2CvJGIvrB0NEoDXI=
github.com/xhofe/gsync v0.0.0-20230917091818-2111ceb38a25/go.mod h1:fH4oNm5F9NfI5dLi0oIMtsLNKQOirUDbEMCIBb/7SU0=
github.com/xhofe/tache v0.1.5 h1:ezDcgim7tj7KNMXliQsmf8BJQbaZtitfyQA9Nt+B4WM=

View File

@ -3,5 +3,7 @@ package archive
import (
_ "github.com/alist-org/alist/v3/internal/archive/archives"
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
_ "github.com/alist-org/alist/v3/internal/archive/rardecode"
_ "github.com/alist-org/alist/v3/internal/archive/sevenzip"
_ "github.com/alist-org/alist/v3/internal/archive/zip"
)

View File

@ -16,14 +16,18 @@ import (
type Archives struct {
}
func (*Archives) AcceptedExtensions() []string {
func (Archives) AcceptedExtensions() []string {
return []string{
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z",
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar",
}
}
func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
fsys, err := getFs(ss, args)
func (Archives) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{}
}
func (Archives) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
fsys, err := getFs(ss[0], args)
if err != nil {
return nil, err
}
@ -47,8 +51,8 @@ func (*Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (mod
}, nil
}
func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
fsys, err := getFs(ss, args.ArchiveArgs)
func (Archives) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
fsys, err := getFs(ss[0], args.ArchiveArgs)
if err != nil {
return nil, err
}
@ -69,8 +73,8 @@ func (*Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([
})
}
func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
fsys, err := getFs(ss, args.ArchiveArgs)
func (Archives) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
fsys, err := getFs(ss[0], args.ArchiveArgs)
if err != nil {
return nil, 0, err
}
@ -85,8 +89,8 @@ func (*Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs)
return file, stat.Size(), nil
}
func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
fsys, err := getFs(ss, args.ArchiveArgs)
func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
fsys, err := getFs(ss[0], args.ArchiveArgs)
if err != nil {
return err
}
@ -133,5 +137,5 @@ func (*Archives) Decompress(ss *stream.SeekableStream, outputPath string, args m
var _ tool.Tool = (*Archives)(nil)
func init() {
tool.RegisterTool(&Archives{})
tool.RegisterTool(Archives{})
}

View File

@ -14,19 +14,23 @@ import (
type ISO9660 struct {
}
func (t *ISO9660) AcceptedExtensions() []string {
func (ISO9660) AcceptedExtensions() []string {
return []string{".iso"}
}
func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
func (ISO9660) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{}
}
func (ISO9660) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
return &model.ArchiveMetaInfo{
Comment: "",
Encrypted: false,
}, nil
}
func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
img, err := getImage(ss)
func (ISO9660) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
img, err := getImage(ss[0])
if err != nil {
return nil, err
}
@ -48,8 +52,8 @@ func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (
return ret, nil
}
func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
img, err := getImage(ss)
func (ISO9660) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
img, err := getImage(ss[0])
if err != nil {
return nil, 0, err
}
@ -63,8 +67,8 @@ func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs
return io.NopCloser(obj.Reader()), obj.Size(), nil
}
func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
img, err := getImage(ss)
func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
img, err := getImage(ss[0])
if err != nil {
return err
}
@ -92,5 +96,5 @@ func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args
var _ tool.Tool = (*ISO9660)(nil)
func init() {
tool.RegisterTool(&ISO9660{})
tool.RegisterTool(ISO9660{})
}

View File

@ -0,0 +1,140 @@
package rardecode
import (
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/nwaples/rardecode/v2"
"io"
"os"
stdpath "path"
"strings"
)
type RarDecoder struct{}
func (RarDecoder) AcceptedExtensions() []string {
return []string{".rar"}
}
func (RarDecoder) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".part1.rar": {".part%d.rar", 2},
}
}
func (RarDecoder) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
l, err := list(ss, args.Password)
if err != nil {
return nil, err
}
_, tree := tool.GenerateMetaTreeFromFolderTraversal(l)
return &model.ArchiveMetaInfo{
Comment: "",
Encrypted: false,
Tree: tree,
}, nil
}
func (RarDecoder) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
func (RarDecoder) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
reader, err := getReader(ss, args.Password)
if err != nil {
return nil, 0, err
}
innerPath := strings.TrimPrefix(args.InnerPath, "/")
for {
var header *rardecode.FileHeader
header, err = reader.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, 0, err
}
if header.Name == innerPath {
if header.IsDir {
break
}
return io.NopCloser(reader), header.UnPackedSize, nil
}
}
return nil, 0, errs.ObjectNotFound
}
func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
reader, err := getReader(ss, args.Password)
if err != nil {
return err
}
if args.InnerPath == "/" {
for {
var header *rardecode.FileHeader
header, err = reader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
name := header.Name
if header.IsDir {
name = name + "/"
}
err = decompress(reader, header, name, outputPath)
if err != nil {
return err
}
}
} else {
innerPath := strings.TrimPrefix(args.InnerPath, "/")
innerBase := stdpath.Base(innerPath)
createdBaseDir := false
for {
var header *rardecode.FileHeader
header, err = reader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
name := header.Name
if header.IsDir {
name = name + "/"
}
if name == innerPath {
err = _decompress(reader, header, outputPath, up)
if err != nil {
return err
}
break
} else if strings.HasPrefix(name, innerPath+"/") {
targetPath := stdpath.Join(outputPath, innerBase)
if !createdBaseDir {
err = os.Mkdir(targetPath, 0700)
if err != nil {
return err
}
createdBaseDir = true
}
restPath := strings.TrimPrefix(name, innerPath+"/")
err = decompress(reader, header, restPath, targetPath)
if err != nil {
return err
}
}
}
}
return nil
}
var _ tool.Tool = (*RarDecoder)(nil)
func init() {
tool.RegisterTool(RarDecoder{})
}

View File

@ -0,0 +1,225 @@
package rardecode
import (
"fmt"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/nwaples/rardecode/v2"
"io"
"io/fs"
"os"
stdpath "path"
"sort"
"strings"
"time"
)
type VolumeFile struct {
stream.SStreamReadAtSeeker
name string
}
func (v *VolumeFile) Name() string {
return v.name
}
func (v *VolumeFile) Size() int64 {
return v.SStreamReadAtSeeker.GetRawStream().GetSize()
}
func (v *VolumeFile) Mode() fs.FileMode {
return 0644
}
func (v *VolumeFile) ModTime() time.Time {
return v.SStreamReadAtSeeker.GetRawStream().ModTime()
}
func (v *VolumeFile) IsDir() bool {
return false
}
func (v *VolumeFile) Sys() any {
return nil
}
func (v *VolumeFile) Stat() (fs.FileInfo, error) {
return v, nil
}
func (v *VolumeFile) Close() error {
return nil
}
type VolumeFs struct {
parts map[string]*VolumeFile
}
func (v *VolumeFs) Open(name string) (fs.File, error) {
file, ok := v.parts[name]
if !ok {
return nil, fs.ErrNotExist
}
return file, nil
}
func makeOpts(ss []*stream.SeekableStream) (string, rardecode.Option, error) {
if len(ss) == 1 {
reader, err := stream.NewReadAtSeeker(ss[0], 0)
if err != nil {
return "", nil, err
}
fileName := "file.rar"
fsys := &VolumeFs{parts: map[string]*VolumeFile{
fileName: {SStreamReadAtSeeker: reader, name: fileName},
}}
return fileName, rardecode.FileSystem(fsys), nil
} else {
parts := make(map[string]*VolumeFile, len(ss))
for i, s := range ss {
reader, err := stream.NewReadAtSeeker(s, 0)
if err != nil {
return "", nil, err
}
fileName := fmt.Sprintf("file.part%d.rar", i+1)
parts[fileName] = &VolumeFile{SStreamReadAtSeeker: reader, name: fileName}
}
return "file.part1.rar", rardecode.FileSystem(&VolumeFs{parts: parts}), nil
}
}
type WrapReader struct {
files []*rardecode.File
}
func (r *WrapReader) Files() []tool.SubFile {
ret := make([]tool.SubFile, 0, len(r.files))
for _, f := range r.files {
ret = append(ret, &WrapFile{File: f})
}
return ret
}
type WrapFile struct {
*rardecode.File
}
func (f *WrapFile) Name() string {
if f.File.IsDir {
return f.File.Name + "/"
}
return f.File.Name
}
func (f *WrapFile) FileInfo() fs.FileInfo {
return &WrapFileInfo{File: f.File}
}
type WrapFileInfo struct {
*rardecode.File
}
func (f *WrapFileInfo) Name() string {
return stdpath.Base(f.File.Name)
}
func (f *WrapFileInfo) Size() int64 {
return f.File.UnPackedSize
}
func (f *WrapFileInfo) ModTime() time.Time {
return f.File.ModificationTime
}
func (f *WrapFileInfo) IsDir() bool {
return f.File.IsDir
}
func (f *WrapFileInfo) Sys() any {
return nil
}
func list(ss []*stream.SeekableStream, password string) (*WrapReader, error) {
fileName, fsOpt, err := makeOpts(ss)
if err != nil {
return nil, err
}
opts := []rardecode.Option{fsOpt}
if password != "" {
opts = append(opts, rardecode.Password(password))
}
files, err := rardecode.List(fileName, opts...)
// rardecode输出文件列表的顺序不一定是父目录在前子目录在后
// 父路径的长度一定比子路径短排序后的files可保证父路径在前
sort.Slice(files, func(i, j int) bool {
return len(files[i].Name) < len(files[j].Name)
})
if err != nil {
return nil, filterPassword(err)
}
return &WrapReader{files: files}, nil
}
func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, error) {
fileName, fsOpt, err := makeOpts(ss)
if err != nil {
return nil, err
}
opts := []rardecode.Option{fsOpt}
if password != "" {
opts = append(opts, rardecode.Password(password))
}
rc, err := rardecode.OpenReader(fileName, opts...)
if err != nil {
return nil, filterPassword(err)
}
ss[0].Closers.Add(rc)
return &rc.Reader, nil
}
func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error {
targetPath := outputPath
dir, base := stdpath.Split(filePath)
if dir != "" {
targetPath = stdpath.Join(targetPath, dir)
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
}
}
if base != "" {
err := _decompress(reader, header, targetPath, func(_ float64) {})
if err != nil {
return err
}
}
return nil
}
func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error {
f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
Reader: &stream.SimpleReaderWithSize{
Reader: reader,
Size: header.UnPackedSize,
},
UpdateProgress: up,
})
if err != nil {
return err
}
return nil
}
func filterPassword(err error) error {
if err != nil && strings.Contains(err.Error(), "password") {
return errs.WrongArchivePassword
}
return err
}

View File

@ -0,0 +1,72 @@
package sevenzip
import (
"io"
"strings"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
)
type SevenZip struct{}
func (SevenZip) AcceptedExtensions() []string {
return []string{".7z"}
}
func (SevenZip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".7z.001": {".7z.%.3d", 2},
}
}
func (SevenZip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
reader, err := getReader(ss, args.Password)
if err != nil {
return nil, err
}
_, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: reader})
return &model.ArchiveMetaInfo{
Comment: "",
Encrypted: args.Password != "",
Tree: tree,
}, nil
}
func (SevenZip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
return nil, errs.NotSupport
}
func (SevenZip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
reader, err := getReader(ss, args.Password)
if err != nil {
return nil, 0, err
}
innerPath := strings.TrimPrefix(args.InnerPath, "/")
for _, file := range reader.File {
if file.Name == innerPath {
r, e := file.Open()
if e != nil {
return nil, 0, e
}
return r, file.FileInfo().Size(), nil
}
}
return nil, 0, errs.ObjectNotFound
}
func (SevenZip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
reader, err := getReader(ss, args.Password)
if err != nil {
return err
}
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: reader}, outputPath, args, up)
}
var _ tool.Tool = (*SevenZip)(nil)
func init() {
tool.RegisterTool(SevenZip{})
}

View File

@ -0,0 +1,61 @@
package sevenzip
import (
"errors"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/bodgit/sevenzip"
"io"
"io/fs"
)
type WrapReader struct {
Reader *sevenzip.Reader
}
func (r *WrapReader) Files() []tool.SubFile {
ret := make([]tool.SubFile, 0, len(r.Reader.File))
for _, f := range r.Reader.File {
ret = append(ret, &WrapFile{f: f})
}
return ret
}
type WrapFile struct {
f *sevenzip.File
}
func (f *WrapFile) Name() string {
return f.f.Name
}
func (f *WrapFile) FileInfo() fs.FileInfo {
return f.f.FileInfo()
}
func (f *WrapFile) Open() (io.ReadCloser, error) {
return f.f.Open()
}
func getReader(ss []*stream.SeekableStream, password string) (*sevenzip.Reader, error) {
readerAt, err := stream.NewMultiReaderAt(ss)
if err != nil {
return nil, err
}
sr, err := sevenzip.NewReaderWithPassword(readerAt, readerAt.Size(), password)
if err != nil {
return nil, filterPassword(err)
}
return sr, nil
}
func filterPassword(err error) error {
if err != nil {
var e *sevenzip.ReadError
if errors.As(err, &e) && e.Encrypted {
return errs.WrongArchivePassword
}
}
return err
}

View File

@ -6,10 +6,16 @@ import (
"io"
)
type MultipartExtension struct {
PartFileFormat string
SecondPartIndex int
}
type Tool interface {
AcceptedExtensions() []string
GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
AcceptedMultipartExtensions() map[string]MultipartExtension
GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
}

View File

@ -0,0 +1,201 @@
package tool
import (
"io"
"io/fs"
"os"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
)
type SubFile interface {
Name() string
FileInfo() fs.FileInfo
Open() (io.ReadCloser, error)
}
type CanEncryptSubFile interface {
IsEncrypted() bool
SetPassword(password string)
}
type ArchiveReader interface {
Files() []SubFile
}
func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree) {
encrypted := false
dirMap := make(map[string]*model.ObjectTree)
dirMap["."] = &model.ObjectTree{}
for _, file := range r.Files() {
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
encrypted = true
}
name := strings.TrimPrefix(file.Name(), "/")
var dir string
var dirObj *model.ObjectTree
isNewFolder := false
if !file.FileInfo().IsDir() {
// 先将 文件 添加到 所在的文件夹
dir = stdpath.Dir(name)
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.FileInfo().ModTime()
dirMap[dir] = dirObj
}
dirObj.Children = append(
dirObj.Children, &model.ObjectTree{
Object: *MakeModelObj(file.FileInfo()),
},
)
} else {
dir = strings.TrimSuffix(name, "/")
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirMap[dir] = dirObj
}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.FileInfo().ModTime()
dirObj.Children = make([]model.ObjTree, 0)
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹
dir = stdpath.Dir(dir)
pDirObj := dirMap[dir]
if pDirObj != nil {
pDirObj.Children = append(pDirObj.Children, dirObj)
continue
}
for {
// 考虑压缩包仅记录文件的路径,不记录文件夹
pDirObj = &model.ObjectTree{}
pDirObj.IsFolder = true
pDirObj.Name = stdpath.Base(dir)
pDirObj.Modified = file.FileInfo().ModTime()
dirMap[dir] = pDirObj
pDirObj.Children = append(pDirObj.Children, dirObj)
dir = stdpath.Dir(dir)
if dirMap[dir] != nil {
break
}
dirObj = pDirObj
}
}
}
return encrypted, dirMap["."].GetChildren()
}
func MakeModelObj(file os.FileInfo) *model.Object {
return &model.Object{
Name: file.Name(),
Size: file.Size(),
Modified: file.ModTime(),
IsFolder: file.IsDir(),
}
}
type WrapFileInfo struct {
model.Obj
}
func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
var err error
files := r.Files()
if args.InnerPath == "/" {
for i, file := range files {
name := file.Name()
err = decompress(file, name, outputPath, args.Password)
if err != nil {
return err
}
up(float64(i+1) * 100.0 / float64(len(files)))
}
} else {
innerPath := strings.TrimPrefix(args.InnerPath, "/")
innerBase := stdpath.Base(innerPath)
createdBaseDir := false
for _, file := range files {
name := file.Name()
if name == innerPath {
err = _decompress(file, outputPath, args.Password, up)
if err != nil {
return err
}
break
} else if strings.HasPrefix(name, innerPath+"/") {
targetPath := stdpath.Join(outputPath, innerBase)
if !createdBaseDir {
err = os.Mkdir(targetPath, 0700)
if err != nil {
return err
}
createdBaseDir = true
}
restPath := strings.TrimPrefix(name, innerPath+"/")
err = decompress(file, restPath, targetPath, args.Password)
if err != nil {
return err
}
}
}
}
return nil
}
func decompress(file SubFile, filePath, outputPath, password string) error {
targetPath := outputPath
dir, base := stdpath.Split(filePath)
if dir != "" {
targetPath = stdpath.Join(targetPath, dir)
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
}
}
if base != "" {
err := _decompress(file, targetPath, password, func(_ float64) {})
if err != nil {
return err
}
}
return nil
}
func _decompress(file SubFile, targetPath, password string, up model.UpdateProgress) error {
if encrypt, ok := file.(CanEncryptSubFile); ok && encrypt.IsEncrypted() {
encrypt.SetPassword(password)
}
rc, err := file.Open()
if err != nil {
return err
}
defer func() { _ = rc.Close() }()
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
defer func() { _ = f.Close() }()
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
Reader: &stream.SimpleReaderWithSize{
Reader: rc,
Size: file.FileInfo().Size(),
},
UpdateProgress: up,
})
if err != nil {
return err
}
return nil
}

View File

@ -5,19 +5,28 @@ import (
)
var (
Tools = make(map[string]Tool)
Tools = make(map[string]Tool)
MultipartExtensions = make(map[string]MultipartExtension)
)
func RegisterTool(tool Tool) {
for _, ext := range tool.AcceptedExtensions() {
Tools[ext] = tool
}
for mainFile, ext := range tool.AcceptedMultipartExtensions() {
MultipartExtensions[mainFile] = ext
Tools[mainFile] = tool
}
}
func GetArchiveTool(ext string) (Tool, error) {
func GetArchiveTool(ext string) (*MultipartExtension, Tool, error) {
t, ok := Tools[ext]
if !ok {
return nil, errs.UnknownArchiveFormat
return nil, nil, errs.UnknownArchiveFormat
}
return t, nil
partExt, ok := MultipartExtensions[ext]
if !ok {
return nil, t, nil
}
return &partExt, t, nil
}

View File

@ -2,8 +2,13 @@ package zip
import (
"bytes"
"io"
"io/fs"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/saintfish/chardet"
"github.com/yeka/zip"
@ -16,65 +21,62 @@ import (
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/encoding/unicode/utf32"
"golang.org/x/text/transform"
"io"
"os"
stdpath "path"
"strings"
)
func toModelObj(file os.FileInfo) *model.Object {
return &model.Object{
Name: decodeName(file.Name()),
Size: file.Size(),
Modified: file.ModTime(),
IsFolder: file.IsDir(),
}
type WrapReader struct {
Reader *zip.Reader
}
func decompress(file *zip.File, filePath, outputPath, password string) error {
targetPath := outputPath
dir, base := stdpath.Split(filePath)
if dir != "" {
targetPath = stdpath.Join(targetPath, dir)
err := os.MkdirAll(targetPath, 0700)
if err != nil {
return err
}
func (r *WrapReader) Files() []tool.SubFile {
ret := make([]tool.SubFile, 0, len(r.Reader.File))
for _, f := range r.Reader.File {
ret = append(ret, &WrapFile{f: f})
}
if base != "" {
err := _decompress(file, targetPath, password, func(_ float64) {})
if err != nil {
return err
}
}
return nil
return ret
}
func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error {
if file.IsEncrypted() {
file.SetPassword(password)
type WrapFileInfo struct {
fs.FileInfo
}
func (f *WrapFileInfo) Name() string {
return decodeName(f.FileInfo.Name())
}
type WrapFile struct {
f *zip.File
}
func (f *WrapFile) Name() string {
return decodeName(f.f.Name)
}
func (f *WrapFile) FileInfo() fs.FileInfo {
return &WrapFileInfo{FileInfo: f.f.FileInfo()}
}
func (f *WrapFile) Open() (io.ReadCloser, error) {
return f.f.Open()
}
func (f *WrapFile) IsEncrypted() bool {
return f.f.IsEncrypted()
}
func (f *WrapFile) SetPassword(password string) {
f.f.SetPassword(password)
}
func getReader(ss []*stream.SeekableStream) (*zip.Reader, error) {
if len(ss) > 1 && stdpath.Ext(ss[1].GetName()) == ".z01" {
// FIXME: Incorrect parsing method for standard multipart zip format
ss = append(ss[1:], ss[0])
}
rc, err := file.Open()
reader, err := stream.NewMultiReaderAt(ss)
if err != nil {
return err
return nil, err
}
defer rc.Close()
f, err := os.OpenFile(stdpath.Join(targetPath, decodeName(file.FileInfo().Name())), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
Reader: &stream.SimpleReaderWithSize{
Reader: rc,
Size: file.FileInfo().Size(),
},
UpdateProgress: up,
})
if err != nil {
return err
}
return nil
return zip.NewReader(reader, reader.Size())
}
func filterPassword(err error) error {

View File

@ -2,7 +2,6 @@ package zip
import (
"io"
"os"
stdpath "path"
"strings"
@ -10,106 +9,37 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/yeka/zip"
)
type Zip struct {
}
func (*Zip) AcceptedExtensions() []string {
return []string{".zip"}
func (Zip) AcceptedExtensions() []string {
return []string{}
}
func (*Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
reader, err := stream.NewReadAtSeeker(ss, 0)
func (Zip) AcceptedMultipartExtensions() map[string]tool.MultipartExtension {
return map[string]tool.MultipartExtension{
".zip": {".z%.2d", 1},
".zip.001": {".zip.%.3d", 2},
}
}
func (Zip) GetMeta(ss []*stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
zipReader, err := zip.NewReader(reader, ss.GetSize())
if err != nil {
return nil, err
}
encrypted := false
dirMap := make(map[string]*model.ObjectTree)
dirMap["."] = &model.ObjectTree{}
for _, file := range zipReader.File {
if file.IsEncrypted() {
encrypted = true
}
name := strings.TrimPrefix(decodeName(file.Name), "/")
var dir string
var dirObj *model.ObjectTree
isNewFolder := false
if !file.FileInfo().IsDir() {
// 先将 文件 添加到 所在的文件夹
dir = stdpath.Dir(name)
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.ModTime()
dirMap[dir] = dirObj
}
dirObj.Children = append(
dirObj.Children, &model.ObjectTree{
Object: *toModelObj(file.FileInfo()),
},
)
} else {
dir = strings.TrimSuffix(name, "/")
dirObj = dirMap[dir]
if dirObj == nil {
isNewFolder = true
dirObj = &model.ObjectTree{}
dirMap[dir] = dirObj
}
dirObj.IsFolder = true
dirObj.Name = stdpath.Base(dir)
dirObj.Modified = file.ModTime()
dirObj.Children = make([]model.ObjTree, 0)
}
if isNewFolder {
// 将 文件夹 添加到 父文件夹
dir = stdpath.Dir(dir)
pDirObj := dirMap[dir]
if pDirObj != nil {
pDirObj.Children = append(pDirObj.Children, dirObj)
continue
}
for {
// 考虑压缩包仅记录文件的路径,不记录文件夹
pDirObj = &model.ObjectTree{}
pDirObj.IsFolder = true
pDirObj.Name = stdpath.Base(dir)
pDirObj.Modified = file.ModTime()
dirMap[dir] = pDirObj
pDirObj.Children = append(pDirObj.Children, dirObj)
dir = stdpath.Dir(dir)
if dirMap[dir] != nil {
break
}
dirObj = pDirObj
}
}
}
encrypted, tree := tool.GenerateMetaTreeFromFolderTraversal(&WrapReader{Reader: zipReader})
return &model.ArchiveMetaInfo{
Comment: zipReader.Comment,
Encrypted: encrypted,
Tree: dirMap["."].GetChildren(),
Tree: tree,
}, nil
}
func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil {
return nil, err
}
zipReader, err := zip.NewReader(reader, ss.GetSize())
func (Zip) List(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, err
}
@ -134,13 +64,13 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
if dir == nil && len(strs) == 2 {
dir = &model.Object{
Name: strs[0],
Modified: ss.ModTime(),
Modified: ss[0].ModTime(),
IsFolder: true,
}
}
continue
}
ret = append(ret, toModelObj(file.FileInfo()))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{FileInfo: file.FileInfo()}))
}
if len(ret) == 0 && dir != nil {
ret = append(ret, dir)
@ -157,7 +87,7 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
continue
}
exist = true
ret = append(ret, toModelObj(file.FileInfo()))
ret = append(ret, tool.MakeModelObj(&WrapFileInfo{file.FileInfo()}))
}
if !exist {
return nil, errs.ObjectNotFound
@ -166,12 +96,8 @@ func (*Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]mode
}
}
func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
reader, err := stream.NewReadAtSeeker(ss, 0)
if err != nil {
return nil, 0, err
}
zipReader, err := zip.NewReader(reader, ss.GetSize())
func (Zip) Extract(ss []*stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
zipReader, err := getReader(ss)
if err != nil {
return nil, 0, err
}
@ -191,58 +117,16 @@ func (*Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.
return nil, 0, errs.ObjectNotFound
}
func (*Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
reader, err := stream.NewReadAtSeeker(ss, 0)
func (Zip) Decompress(ss []*stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
zipReader, err := getReader(ss)
if err != nil {
return err
}
zipReader, err := zip.NewReader(reader, ss.GetSize())
if err != nil {
return err
}
if args.InnerPath == "/" {
for i, file := range zipReader.File {
name := decodeName(file.Name)
err = decompress(file, name, outputPath, args.Password)
if err != nil {
return err
}
up(float64(i+1) * 100.0 / float64(len(zipReader.File)))
}
} else {
innerPath := strings.TrimPrefix(args.InnerPath, "/")
innerBase := stdpath.Base(innerPath)
createdBaseDir := false
for _, file := range zipReader.File {
name := decodeName(file.Name)
if name == innerPath {
err = _decompress(file, outputPath, args.Password, up)
if err != nil {
return err
}
break
} else if strings.HasPrefix(name, innerPath+"/") {
targetPath := stdpath.Join(outputPath, innerBase)
if !createdBaseDir {
err = os.Mkdir(targetPath, 0700)
if err != nil {
return err
}
createdBaseDir = true
}
restPath := strings.TrimPrefix(name, innerPath+"/")
err = decompress(file, restPath, targetPath, args.Password)
if err != nil {
return err
}
}
}
}
return nil
return tool.DecompressFromFolderTraversal(&WrapReader{Reader: zipReader}, outputPath, args, up)
}
var _ tool.Tool = (*Zip)(nil)
func init() {
tool.RegisterTool(&Zip{})
tool.RegisterTool(Zip{})
}

View File

@ -68,8 +68,13 @@ func InitDB() {
{
dsn := database.DSN
if dsn == "" {
dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
if database.Password != "" {
dsn = fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
database.Host, database.User, database.Password, database.Name, database.Port, database.SSLMode)
} else {
dsn = fmt.Sprintf("host=%s user=%s dbname=%s port=%d sslmode=%s TimeZone=Asia/Shanghai",
database.Host, database.User, database.Name, database.Port, database.SSLMode)
}
}
dB, err = gorm.Open(postgres.Open(dsn), gormConfig)
}

View File

@ -79,13 +79,13 @@ type Remove interface {
type Put interface {
// Put a file (provided as a FileStreamer) into the driver
// Besides the most basic upload functionality, the following features also need to be implemented:
// 1. Canceling (when `<-ctx.Done()` returns), by the following methods:
// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
// (1) Use request methods that carry context, such as the following:
// a. http.NewRequestWithContext
// b. resty.Request.SetContext
// c. s3manager.Uploader.UploadWithContext
// d. utils.CopyWithCtx
// (2) Use a `driver.ReaderWithCtx` or a `driver.NewLimitedUploadStream`
// (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
// (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
// this is typically applicable to chunked uploads.
// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:

View File

@ -4,17 +4,6 @@ import (
"context"
stderrors "errors"
"fmt"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
"io"
"math/rand"
"mime"
@ -25,6 +14,17 @@ import (
"strconv"
"strings"
"time"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/internal/task"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/xhofe/tache"
)
type ArchiveDownloadTask struct {
@ -37,7 +37,6 @@ type ArchiveDownloadTask struct {
dstStorage driver.Driver
SrcStorageMp string
DstStorageMp string
Tool tool.Tool
}
func (t *ArchiveDownloadTask) GetName() string {
@ -67,33 +66,39 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
if t.srcStorage == nil {
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
}
l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
srcObj, tool, ss, err := op.GetArchiveToolAndStream(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
Header: http.Header{},
})
if err != nil {
return nil, err
}
fs := stream.FileStream{
Obj: srcObj,
Ctx: t.Ctx(),
}
ss, err := stream.NewSeekableStream(fs, l)
if err != nil {
return nil, err
}
defer func() {
if err := ss.Close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
var e error
for _, s := range ss {
e = stderrors.Join(e, s.Close())
}
if e != nil {
log.Errorf("failed to close file streamer, %v", e)
}
}()
var decompressUp model.UpdateProgress
if t.CacheFull {
t.SetTotalBytes(srcObj.GetSize())
t.status = "getting src object"
_, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress)
if err != nil {
return nil, err
var total, cur int64 = 0, 0
for _, s := range ss {
total += s.GetSize()
}
t.SetTotalBytes(total)
t.status = "getting src object"
for _, s := range ss {
_, err = s.CacheFullInTempFileAndUpdateProgress(func(p float64) {
t.SetProgress((float64(cur) + float64(s.GetSize())*p/100.0) / float64(total))
})
cur += s.GetSize()
if err != nil {
return nil, err
}
}
t.SetProgress(100.0)
decompressUp = func(_ float64) {}
} else {
decompressUp = t.SetProgress
@ -103,7 +108,7 @@ func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadT
if err != nil {
return nil, err
}
err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
err = tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
if err != nil {
return nil, err
}
@ -344,11 +349,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
return nil, err
}
}
ext := stdpath.Ext(srcObjActualPath)
t, err := tool.GetArchiveTool(ext)
if err != nil {
return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
}
taskCreator, _ := ctx.Value("user").(*model.User)
tsk := &ArchiveDownloadTask{
TaskExtension: task.TaskExtension{
@ -361,7 +361,6 @@ func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args
DstDirPath: dstDirActualPath,
SrcStorageMp: srcStorage.GetStorage().MountPath,
DstStorageMp: dstStorage.GetStorage().MountPath,
Tool: t,
}
if ctx.Value(conf.NoTaskKey) != nil {
uploadTask, err := tsk.RunWithoutPushUploadTask()

View File

@ -2,12 +2,15 @@ package fs
import (
"context"
log "github.com/sirupsen/logrus"
"io"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/task"
log "github.com/sirupsen/logrus"
"io"
"github.com/pkg/errors"
)
// the param named path of functions in this package is a mount path
@ -168,3 +171,19 @@ func Other(ctx context.Context, args model.FsOtherArgs) (interface{}, error) {
}
return res, err
}
func PutURL(ctx context.Context, path, dstName, urlStr string) error {
storage, dstDirActualPath, err := op.GetStorageAndActualPath(path)
if err != nil {
return errors.WithMessage(err, "failed get storage")
}
if storage.Config().NoUpload {
return errors.WithStack(errs.UploadNotSupported)
}
_, ok := storage.(driver.PutURL)
_, okResult := storage.(driver.PutURLResult)
if !ok && !okResult {
return errs.NotImplement
}
return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
}

View File

@ -619,10 +619,9 @@ type Buf struct {
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
// when read is faster write, immediately feed data to read after written
func NewBuf(ctx context.Context, maxSize int) *Buf {
d := make([]byte, 0, maxSize)
return &Buf{
ctx: ctx,
buffer: bytes.NewBuffer(d),
buffer: bytes.NewBuffer(make([]byte, 0, maxSize)),
size: maxSize,
}
}
@ -677,5 +676,5 @@ func (br *Buf) Write(p []byte) (n int, err error) {
}
func (br *Buf) Close() {
br.buffer.Reset()
br.buffer = nil
}

View File

@ -2,20 +2,20 @@ package tool
import (
"context"
"net/url"
stdpath "path"
"path/filepath"
_115 "github.com/alist-org/alist/v3/drivers/115"
"github.com/alist-org/alist/v3/drivers/pikpak"
"github.com/alist-org/alist/v3/drivers/thunder"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/task"
"net/url"
"path"
"path/filepath"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/task"
"github.com/google/uuid"
"github.com/pkg/errors"
)
@ -59,8 +59,11 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro
}
}
// try putting url
if args.Tool == "SimpleHttp" && tryPutUrl(ctx, storage, dstDirActualPath, args.URL) {
return nil, nil
if args.Tool == "SimpleHttp" {
err = tryPutUrl(ctx, args.DstDirPath, args.URL)
if err == nil || !errors.Is(err, errs.NotImplement) {
return nil, err
}
}
// get tool
@ -118,17 +121,13 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro
return t, nil
}
func tryPutUrl(ctx context.Context, storage driver.Driver, dstDirActualPath, urlStr string) bool {
_, ok := storage.(driver.PutURL)
_, okResult := storage.(driver.PutURLResult)
if !ok && !okResult {
return false
}
func tryPutUrl(ctx context.Context, path, urlStr string) error {
var dstName string
u, err := url.Parse(urlStr)
if err != nil {
return false
if err == nil {
dstName = stdpath.Base(u.Path)
} else {
dstName = "UnnamedURL"
}
dstName := path.Base(u.Path)
err = op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr)
return err == nil
return fs.PutURL(ctx, path, dstName, urlStr)
}

View File

@ -3,6 +3,7 @@ package op
import (
"context"
stderrors "errors"
"fmt"
"io"
stdpath "path"
"strings"
@ -54,21 +55,76 @@ func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return meta, err
}
func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) {
func GetArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, []*stream.SeekableStream, error) {
l, obj, err := Link(ctx, storage, path, args)
if err != nil {
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
}
ext := stdpath.Ext(obj.GetName())
t, err := tool.GetArchiveTool(ext)
baseName, ext, found := strings.Cut(obj.GetName(), ".")
if !found {
if l.MFile != nil {
_ = l.MFile.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
return nil, nil, nil, errors.Errorf("failed get archive tool: the obj does not have an extension.")
}
partExt, t, err := tool.GetArchiveTool("." + ext)
if err != nil {
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
var e error
partExt, t, e = tool.GetArchiveTool(stdpath.Ext(obj.GetName()))
if e != nil {
if l.MFile != nil {
_ = l.MFile.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
return nil, nil, nil, errors.WithMessagef(stderrors.Join(err, e), "failed get archive tool: %s", ext)
}
}
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
if err != nil {
if l.MFile != nil {
_ = l.MFile.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
}
return obj, t, ss, nil
ret := []*stream.SeekableStream{ss}
if partExt == nil {
return obj, t, ret, nil
} else {
index := partExt.SecondPartIndex
dir := stdpath.Dir(path)
for {
p := stdpath.Join(dir, baseName+fmt.Sprintf(partExt.PartFileFormat, index))
var o model.Obj
l, o, err = Link(ctx, storage, p, args)
if err != nil {
break
}
ss, err = stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: o}, l)
if err != nil {
if l.MFile != nil {
_ = l.MFile.Close()
}
if l.RangeReadCloser != nil {
_ = l.RangeReadCloser.Close()
}
for _, s := range ret {
_ = s.Close()
}
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
}
ret = append(ret, ss)
index++
}
return obj, t, ret, nil
}
}
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
@ -84,7 +140,7 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
if !errors.Is(err, errs.NotImplement) {
archiveMetaProvider := &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}
if meta.GetTree() != nil {
if meta != nil && meta.GetTree() != nil {
archiveMetaProvider.Sort = &storage.GetStorage().Sort
}
if !storage.Config().NoCache {
@ -94,13 +150,17 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
return obj, archiveMetaProvider, err
}
}
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
if err != nil {
return nil, nil, err
}
defer func() {
if err := ss.Close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
var e error
for _, s := range ss {
e = stderrors.Join(e, s.Close())
}
if e != nil {
log.Errorf("failed to close file streamer, %v", e)
}
}()
meta, err := t.GetMeta(ss, args.ArchiveArgs)
@ -114,9 +174,9 @@ func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, arg
if !storage.Config().NoCache {
Expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration)
archiveMetaProvider.Expiration = &Expiration
} else if ss.Link.MFile == nil {
} else if ss[0].Link.MFile == nil {
// alias、crypt 驱动
archiveMetaProvider.Expiration = ss.Link.Expiration
archiveMetaProvider.Expiration = ss[0].Link.Expiration
}
return obj, archiveMetaProvider, err
}
@ -188,13 +248,17 @@ func _listArchive(ctx context.Context, storage driver.Driver, path string, args
return obj, files, err
}
}
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
obj, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
if err != nil {
return nil, nil, err
}
defer func() {
if err := ss.Close(); err != nil {
log.Errorf("failed to close file streamer, %v", err)
var e error
for _, s := range ss {
e = stderrors.Join(e, s.Close())
}
if e != nil {
log.Errorf("failed to close file streamer, %v", e)
}
}()
files, err := t.List(ss, args.ArchiveInnerArgs)
@ -378,8 +442,8 @@ func driverExtract(ctx context.Context, storage driver.Driver, path string, args
}
type streamWithParent struct {
rc io.ReadCloser
parent *stream.SeekableStream
rc io.ReadCloser
parents []*stream.SeekableStream
}
func (s *streamWithParent) Read(p []byte) (int, error) {
@ -387,24 +451,31 @@ func (s *streamWithParent) Read(p []byte) (int, error) {
}
func (s *streamWithParent) Close() error {
err1 := s.rc.Close()
err2 := s.parent.Close()
return stderrors.Join(err1, err2)
err := s.rc.Close()
for _, ss := range s.parents {
err = stderrors.Join(err, ss.Close())
}
return err
}
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
_, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
_, t, ss, err := GetArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
if err != nil {
return nil, 0, err
}
rc, size, err := t.Extract(ss, args)
if err != nil {
if e := ss.Close(); e != nil {
var e error
for _, s := range ss {
e = stderrors.Join(e, s.Close())
}
if e != nil {
log.Errorf("failed to close file streamer, %v", e)
err = stderrors.Join(err, e)
}
return nil, 0, err
}
return &streamWithParent{rc: rc, parent: ss}, size, nil
return &streamWithParent{rc: rc, parents: ss}, size, nil
}
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {

View File

@ -17,7 +17,6 @@ func CreateSSHPublicKey(k *model.SSHPublicKey) (error, bool) {
if err != nil {
return err, false
}
k.KeyStr = string(pubKey.Marshal())
k.Fingerprint = ssh.FingerprintSHA256(pubKey)
k.AddedTime = time.Now()
k.LastUsedTime = k.AddedTime

View File

@ -139,7 +139,7 @@ type RateLimitRangeReadCloser struct {
Limiter Limiter
}
func (rrc RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
func (rrc *RateLimitRangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
rc, err := rrc.RangeReadCloserIF.RangeRead(ctx, httpRange)
if err != nil {
return nil, err

View File

@ -14,6 +14,7 @@ import (
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/sirupsen/logrus"
"go4.org/readerutil"
)
type FileStream struct {
@ -159,6 +160,10 @@ var _ model.FileStreamer = (*FileStream)(nil)
//var _ seekableStream = (*FileStream)(nil)
// for most internal stream, which is either RangeReadCloser or MFile
// Any functionality implemented based on SeekableStream should implement a Close method,
// whose only purpose is to close the SeekableStream object. If such functionality has
// additional resources that need to be closed, they should be added to the Closer property of
// the SeekableStream object and be closed together when the SeekableStream object is closed.
type SeekableStream struct {
FileStream
Link *model.Link
@ -196,7 +201,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
return &ss, nil
}
if ss.Link.RangeReadCloser != nil {
ss.rangeReadCloser = RateLimitRangeReadCloser{
ss.rangeReadCloser = &RateLimitRangeReadCloser{
RangeReadCloserIF: ss.Link.RangeReadCloser,
Limiter: ServerDownloadLimit,
}
@ -208,7 +213,7 @@ func NewSeekableStream(fs FileStream, link *model.Link) (*SeekableStream, error)
if err != nil {
return nil, err
}
rrc = RateLimitRangeReadCloser{
rrc = &RateLimitRangeReadCloser{
RangeReadCloserIF: rrc,
Limiter: ServerDownloadLimit,
}
@ -364,7 +369,7 @@ type RangeReadReadAtSeeker struct {
ss *SeekableStream
masterOff int64
readers []*readerCur
*headCache
headCache *headCache
}
type headCache struct {
@ -406,7 +411,7 @@ func (c *headCache) read(p []byte) (n int, err error) {
}
return
}
func (r *headCache) close() error {
func (r *headCache) Close() error {
for i := range r.bufs {
r.bufs[i] = nil
}
@ -419,6 +424,7 @@ func (r *RangeReadReadAtSeeker) InitHeadCache() {
reader := r.readers[0]
r.readers = r.readers[1:]
r.headCache = &headCache{readerCur: reader}
r.ss.Closers.Add(r.headCache)
}
}
@ -449,6 +455,18 @@ func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStr
return r, nil
}
func NewMultiReaderAt(ss []*SeekableStream) (readerutil.SizeReaderAt, error) {
readers := make([]readerutil.SizeReaderAt, 0, len(ss))
for _, s := range ss {
ra, err := NewReadAtSeeker(s, 0)
if err != nil {
return nil, err
}
readers = append(readers, io.NewSectionReader(ra, 0, s.GetSize()))
}
return readerutil.NewMultiReaderAt(readers...), nil
}
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
return r.ss
}
@ -559,9 +577,6 @@ func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
}
func (r *RangeReadReadAtSeeker) Close() error {
if r.headCache != nil {
_ = r.headCache.close()
}
return r.ss.Close()
}

View File

@ -45,7 +45,7 @@ func IsSubPath(path string, subPath string) bool {
func Ext(path string) string {
ext := stdpath.Ext(path)
if strings.HasPrefix(ext, ".") {
if len(ext) > 0 && ext[0] == '.' {
ext = ext[1:]
}
return strings.ToLower(ext)

View File

@ -1,97 +1,25 @@
package common
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"github.com/alist-org/alist/v3/internal/conf"
"maps"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/internal/setting"
"github.com/alist-org/alist/v3/internal/stream"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/microcosm-cc/bluemonday"
log "github.com/sirupsen/logrus"
"github.com/yuin/goldmark"
)
func processMarkdown(content []byte) ([]byte, error) {
var buf bytes.Buffer
if err := goldmark.New().Convert(content, &buf); err != nil {
return nil, fmt.Errorf("markdown conversion failed: %w", err)
}
return bluemonday.UGCPolicy().SanitizeBytes(buf.Bytes()), nil
}
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
//优先处理md文件
if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
var markdownContent []byte
var err error
if link.MFile != nil {
defer link.MFile.Close()
attachHeader(w, file)
markdownContent, err = io.ReadAll(link.MFile)
if err != nil {
return fmt.Errorf("failed to read markdown content: %w", err)
}
} else if link.RangeReadCloser != nil {
attachHeader(w, file)
rrc, err := link.RangeReadCloser.RangeRead(r.Context(), http_range.Range{Start: 0, Length: -1})
if err != nil {
return err
}
defer rrc.Close()
markdownContent, err = io.ReadAll(rrc)
if err != nil {
return fmt.Errorf("failed to read markdown content: %w", err)
}
} else {
header := net.ProcessHeader(r.Header, link.Header)
res, err := net.RequestHttp(r.Context(), r.Method, header, link.URL)
if err != nil {
return err
}
defer res.Body.Close()
for h, v := range res.Header {
w.Header()[h] = v
}
w.WriteHeader(res.StatusCode)
if r.Method == http.MethodHead {
return nil
}
markdownContent, err = io.ReadAll(res.Body)
if err != nil {
return fmt.Errorf("failed to read markdown content: %w", err)
}
}
safeHTML, err := processMarkdown(markdownContent)
if err != nil {
return err
}
safeHTMLReader := bytes.NewReader(safeHTML)
w.Header().Set("Content-Length", strconv.FormatInt(int64(len(safeHTML)), 10))
w.Header().Set("Content-Type", "text/html; charset=utf-8")
_, err = utils.CopyWithBuffer(w, safeHTMLReader)
if err != nil {
return err
}
return nil
}
if link.MFile != nil {
defer link.MFile.Close()
attachHeader(w, file)
@ -152,9 +80,7 @@ func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.
}
defer res.Body.Close()
for h, v := range res.Header {
w.Header()[h] = v
}
maps.Copy(w.Header(), res.Header)
w.WriteHeader(res.StatusCode)
if r.Method == http.MethodHead {
return nil

View File

@ -1,10 +1,11 @@
package handles
import (
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/internal/task"
"net/url"
stdpath "path"
"strings"
"github.com/alist-org/alist/v3/internal/archive/tool"
"github.com/alist-org/alist/v3/internal/conf"
@ -208,14 +209,30 @@ func FsArchiveList(c *gin.Context) {
})
}
type StringOrArray []string
func (s *StringOrArray) UnmarshalJSON(data []byte) error {
var value string
if err := json.Unmarshal(data, &value); err == nil {
*s = []string{value}
return nil
}
var sliceValue []string
if err := json.Unmarshal(data, &sliceValue); err != nil {
return err
}
*s = sliceValue
return nil
}
type ArchiveDecompressReq struct {
SrcDir string `json:"src_dir" form:"src_dir"`
DstDir string `json:"dst_dir" form:"dst_dir"`
Name string `json:"name" form:"name"`
ArchivePass string `json:"archive_pass" form:"archive_pass"`
InnerPath string `json:"inner_path" form:"inner_path"`
CacheFull bool `json:"cache_full" form:"cache_full"`
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
SrcDir string `json:"src_dir" form:"src_dir"`
DstDir string `json:"dst_dir" form:"dst_dir"`
Name StringOrArray `json:"name" form:"name"`
ArchivePass string `json:"archive_pass" form:"archive_pass"`
InnerPath string `json:"inner_path" form:"inner_path"`
CacheFull bool `json:"cache_full" form:"cache_full"`
PutIntoNewDir bool `json:"put_into_new_dir" form:"put_into_new_dir"`
}
func FsArchiveDecompress(c *gin.Context) {
@ -229,41 +246,51 @@ func FsArchiveDecompress(c *gin.Context) {
common.ErrorResp(c, errs.PermissionDenied, 403)
return
}
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, req.Name))
if err != nil {
common.ErrorResp(c, err, 403)
return
srcPaths := make([]string, 0, len(req.Name))
for _, name := range req.Name {
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name))
if err != nil {
common.ErrorResp(c, err, 403)
return
}
srcPaths = append(srcPaths, srcPath)
}
dstDir, err := user.JoinPath(req.DstDir)
if err != nil {
common.ErrorResp(c, err, 403)
return
}
t, err := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
ArchiveInnerArgs: model.ArchiveInnerArgs{
ArchiveArgs: model.ArchiveArgs{
LinkArgs: model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
tasks := make([]task.TaskExtensionInfo, 0, len(srcPaths))
for _, srcPath := range srcPaths {
t, e := fs.ArchiveDecompress(c, srcPath, dstDir, model.ArchiveDecompressArgs{
ArchiveInnerArgs: model.ArchiveInnerArgs{
ArchiveArgs: model.ArchiveArgs{
LinkArgs: model.LinkArgs{
Header: c.Request.Header,
Type: c.Query("type"),
HttpReq: c.Request,
},
Password: req.ArchivePass,
},
Password: req.ArchivePass,
InnerPath: utils.FixAndCleanPath(req.InnerPath),
},
InnerPath: utils.FixAndCleanPath(req.InnerPath),
},
CacheFull: req.CacheFull,
PutIntoNewDir: req.PutIntoNewDir,
})
if err != nil {
if errors.Is(err, errs.WrongArchivePassword) {
common.ErrorResp(c, err, 202)
} else {
common.ErrorResp(c, err, 500)
CacheFull: req.CacheFull,
PutIntoNewDir: req.PutIntoNewDir,
})
if e != nil {
if errors.Is(e, errs.WrongArchivePassword) {
common.ErrorResp(c, e, 202)
} else {
common.ErrorResp(c, e, 500)
}
return
}
if t != nil {
tasks = append(tasks, t)
}
return
}
common.SuccessResp(c, gin.H{
"task": getTaskInfo(t),
"task": getTaskInfos(tasks),
})
}
@ -376,7 +403,7 @@ func ArchiveInternalExtract(c *gin.Context) {
func ArchiveExtensions(c *gin.Context) {
var ext []string
for key := range tool.Tools {
ext = append(ext, strings.TrimPrefix(key, "."))
ext = append(ext, key)
}
common.SuccessResp(c, ext)
}

View File

@ -1,9 +1,12 @@
package handles
import (
"bytes"
"fmt"
"io"
"net/http"
stdpath "path"
"strconv"
"strings"
"github.com/alist-org/alist/v3/internal/conf"
@ -15,7 +18,9 @@ import (
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
"github.com/microcosm-cc/bluemonday"
log "github.com/sirupsen/logrus"
"github.com/yuin/goldmark"
)
func Down(c *gin.Context) {
@ -124,7 +129,34 @@ func localProxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange boo
if proxyRange {
common.ProxyRange(link, file.GetSize())
}
err = common.Proxy(c.Writer, c.Request, link, file)
//优先处理md文件
if utils.Ext(file.GetName()) == "md" && setting.GetBool(conf.FilterReadMeScripts) {
w := c.Writer
buf := bytes.NewBuffer(make([]byte, 0, file.GetSize()))
err = common.Proxy(&proxyResponseWriter{ResponseWriter: w, Writer: buf}, c.Request, link, file)
if err == nil && buf.Len() > 0 {
if w.Status() < 200 || w.Status() > 300 {
w.Write(buf.Bytes())
return
}
var html bytes.Buffer
if err = goldmark.Convert(buf.Bytes(), &html); err != nil {
err = fmt.Errorf("markdown conversion failed: %w", err)
} else {
buf.Reset()
err = bluemonday.UGCPolicy().SanitizeReaderToWriter(&html, buf)
if err == nil {
w.Header().Set("Content-Length", strconv.FormatInt(int64(buf.Len()), 10))
w.Header().Set("Content-Type", "text/html; charset=utf-8")
_, err = utils.CopyWithBuffer(c.Writer, buf)
}
}
}
} else {
err = common.Proxy(c.Writer, c.Request, link, file)
}
if err != nil {
common.ErrorResp(c, err, 500, true)
return
@ -150,3 +182,12 @@ func canProxy(storage driver.Driver, filename string) bool {
}
return false
}
type proxyResponseWriter struct {
http.ResponseWriter
io.Writer
}
func (pw *proxyResponseWriter) Write(p []byte) (int, error) {
return pw.Writer.Write(p)
}

View File

@ -6,6 +6,7 @@ import (
"github.com/alist-org/alist/v3/server/common"
"github.com/gin-gonic/gin"
"strconv"
"strings"
)
type SSHKeyAddReq struct {
@ -30,7 +31,7 @@ func AddMyPublicKey(c *gin.Context) {
}
key := &model.SSHPublicKey{
Title: req.Title,
KeyStr: req.Key,
KeyStr: strings.TrimSpace(req.Key),
UserId: userObj.ID,
}
err, parsed := op.CreateSSHPublicKey(key)

View File

@ -113,11 +113,15 @@ func (d *SftpDriver) PublicKeyAuth(conn ssh.ConnMetadata, key ssh.PublicKey) (*s
}
marshal := string(key.Marshal())
for _, sk := range keys {
if marshal == sk.KeyStr {
sk.LastUsedTime = time.Now()
_ = op.UpdateSSHPublicKey(&sk)
return nil, nil
if marshal != sk.KeyStr {
pubKey, _, _, _, e := ssh.ParseAuthorizedKey([]byte(sk.KeyStr))
if e != nil || marshal != string(pubKey.Marshal()) {
continue
}
}
sk.LastUsedTime = time.Now()
_ = op.UpdateSSHPublicKey(&sk)
return nil, nil
}
return nil, errors.New("public key refused")
}