* wip: refactor offline download (#5331) * base tool * working: aria2 * refactor: change type of percentage to float64 * wip: adapt aria2 * wip: use items in offline_download * wip: use tool manager * wip: adapt qBittorrent * chore: fix typo * Squashed commit of the following: commit 4fc0a77565702f9bf498485d42336502f2ee9776 Author: Andy Hsu <i@nn.ci> Date: Fri Oct 20 21:06:25 2023 +0800 fix(baidu_netdisk): upload file > 4GB (close #5392) commit aaffaee2b54fc067d240ea0c20ea3c2f39615d6e Author: gmugu <94156510@qq.com> Date: Thu Oct 19 19:17:53 2023 +0800 perf(webdav): support request with cookies (#5391) commit 8ef8023c20bfeee97ec82155b52eae0d80b1410e Author: NewbieOrange <NewbieOrange@users.noreply.github.com> Date: Thu Oct 19 19:17:09 2023 +0800 fix(aliyundrive_open): upload progress for normal upload (#5398) commit cdfbe6dcf2b361e4c93c2703c2f8c9bddeac0ee6 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Wed Oct 18 16:27:07 2023 +0800 fix: hash gcid empty file (#5394) commit 94d028743abf8e0d736f80c0ec4fb294a1cc064c Author: Andy Hsu <i@nn.ci> Date: Sat Oct 14 13:17:51 2023 +0800 ci: remove `pr-welcome` label when close issue [skip ci] commit 7f7335435c2f32a3eef76fac4c4f783d9d8624fd Author: itsHenry <2671230065@qq.com> Date: Sat Oct 14 13:12:46 2023 +0800 feat(cloudreve): support thumbnail (#5373 close #5348) * feat(cloudreve): support thumbnail * chore: remove unnecessary code commit b9e192b29cffddf14a0dfb2d3885def57a56ce16 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Thu Oct 12 20:57:12 2023 +0800 fix(115): limit request rate (#5367 close #5275) * fix(115):limit request rate * chore(115): fix unit of `limit_rate` --------- Co-authored-by: Andy Hsu <i@nn.ci> commit 69a98eaef612b58596e5c26c341b6d7cedecdf19 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Wed Oct 11 22:01:55 2023 +0800 fix(deps): update module github.com/aliyun/aliyun-oss-go-sdk to v2.2.9+incompatible (#5141) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 1ebc96a4e5220c979fd581bb3b5640e9436f6665 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:32:00 2023 +0800 fix(wopan): fatal error concurrent map writes (close #5352) commit 66e2324cac75cb3ef05af45dbdd10b124d534aff Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:23:11 2023 +0800 chore(deps): upgrade dependencies commit 7600dc28df137c439e538b4257731c33a63db9b5 Author: Andy Hsu <i@nn.ci> Date: Tue Oct 10 18:13:58 2023 +0800 fix(aliyundrive_open): change default api to raw server (close #5358) commit 8ef89ad0a496d5acc398794c0afa4f77c67ad371 Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Tue Oct 10 18:08:27 2023 +0800 fix(baidu_netdisk): hash and `error 2` (#5356) * fix(baidu):hash and error:2 * fix:invalid memory address commit 35d672217dde69e65b41b1fcd9786c1cfebcdc45 Author: jeffmingup <1960588251@qq.com> Date: Sun Oct 8 19:29:45 2023 +0800 fix(onedrive_app): incorrect api on `_accessToken` (#5346) commit 1a283bb2720eff6d1b0c1dd6f1667a6449905a9b Author: foxxorcat <95907542+foxxorcat@users.noreply.github.com> Date: Fri Oct 6 16:04:39 2023 +0800 feat(google_drive): add `hash_info`, `ctime`, `thumbnail` (#5334) commit a008f54f4d5eda5738abfd54bf1abf1e18c08430 Author: nkh0472 <67589323+nkh0472@users.noreply.github.com> Date: Thu Oct 5 13:10:51 2023 +0800 docs: minor language improvements (#5329) [skip ci] * fix: adapt update progress type * Squashed commit of the following: commit 65c5ec0c34d5f027a65933fe89af53791747bdd4 Author: itsHenry <2671230065@qq.com> Date: Sat Nov 4 13:35:09 2023 +0800 feat(cloudreve): folder size count and switch (#5457 close #5395) commit a6325967d0de18e6b6c744f06cb1ebaa08ec687e Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:11:20 2023 +0800 fix(deps): update module github.com/charmbracelet/lipgloss to v0.9.1 (#5234) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 4dff49470adce36416d8c56594e84868c04d023b Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Mon Oct 30 15:10:36 2023 +0800 fix(deps): update golang.org/x/exp digest to 7918f67 (#5366) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit cc86d6f3d1ff2120669c9dda719b7faabb922f52 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Sun Oct 29 14:45:55 2023 +0800 fix(deps): update module golang.org/x/net to v0.17.0 [security] (#5370) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit c0f9c8ebafdf8dd2afe5c0b9fba24456819c3155 Author: Andy Hsu <i@nn.ci> Date: Thu Oct 26 19:21:09 2023 +0800 feat: add ignore direct link params (close #5434)
580 lines
17 KiB
Go
580 lines
17 KiB
Go
package op
|
|
|
|
import (
|
|
"context"
|
|
stdpath "path"
|
|
"time"
|
|
|
|
"github.com/Xhofe/go-cache"
|
|
"github.com/alist-org/alist/v3/internal/driver"
|
|
"github.com/alist-org/alist/v3/internal/errs"
|
|
"github.com/alist-org/alist/v3/internal/model"
|
|
"github.com/alist-org/alist/v3/pkg/generic_sync"
|
|
"github.com/alist-org/alist/v3/pkg/singleflight"
|
|
"github.com/alist-org/alist/v3/pkg/utils"
|
|
"github.com/pkg/errors"
|
|
log "github.com/sirupsen/logrus"
|
|
)
|
|
|
|
// In order to facilitate adding some other things before and after file op
|
|
|
|
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
|
|
var listG singleflight.Group[[]model.Obj]
|
|
|
|
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
|
|
key := Key(storage, path)
|
|
objs, ok := listCache.Get(key)
|
|
if ok {
|
|
for i, obj := range objs {
|
|
if obj.GetName() == oldObj.GetName() {
|
|
objs[i] = newObj
|
|
break
|
|
}
|
|
}
|
|
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
}
|
|
}
|
|
|
|
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
|
|
key := Key(storage, path)
|
|
objs, ok := listCache.Get(key)
|
|
if ok {
|
|
for i, oldObj := range objs {
|
|
if oldObj.GetName() == obj.GetName() {
|
|
objs = append(objs[:i], objs[i+1:]...)
|
|
break
|
|
}
|
|
}
|
|
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
}
|
|
}
|
|
|
|
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
|
|
|
|
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
|
|
key := Key(storage, path)
|
|
objs, ok := listCache.Get(key)
|
|
if ok {
|
|
for i, obj := range objs {
|
|
if obj.GetName() == newObj.GetName() {
|
|
objs[i] = newObj
|
|
return
|
|
}
|
|
}
|
|
|
|
// Simple separation of files and folders
|
|
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
|
|
objs = append(objs, newObj)
|
|
} else {
|
|
objs = append([]model.Obj{newObj}, objs...)
|
|
}
|
|
|
|
if storage.Config().LocalSort {
|
|
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
|
|
log.Debug("addCacheObj: wait start sort")
|
|
debounce(func() {
|
|
log.Debug("addCacheObj: start sort")
|
|
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
|
addSortDebounceMap.Delete(key)
|
|
})
|
|
}
|
|
|
|
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
}
|
|
}
|
|
|
|
func ClearCache(storage driver.Driver, path string) {
|
|
objs, ok := listCache.Get(Key(storage, path))
|
|
if ok {
|
|
for _, obj := range objs {
|
|
if obj.IsDir() {
|
|
ClearCache(storage, stdpath.Join(path, obj.GetName()))
|
|
}
|
|
}
|
|
}
|
|
listCache.Del(Key(storage, path))
|
|
}
|
|
|
|
func Key(storage driver.Driver, path string) string {
|
|
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
|
|
}
|
|
|
|
// List files in storage, not contains virtual file
|
|
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs, refresh ...bool) ([]model.Obj, error) {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
path = utils.FixAndCleanPath(path)
|
|
log.Debugf("op.List %s", path)
|
|
key := Key(storage, path)
|
|
if !utils.IsBool(refresh...) {
|
|
if files, ok := listCache.Get(key); ok {
|
|
log.Debugf("use cache when list %s", path)
|
|
return files, nil
|
|
}
|
|
}
|
|
dir, err := GetUnwrap(ctx, storage, path)
|
|
if err != nil {
|
|
return nil, errors.WithMessage(err, "failed get dir")
|
|
}
|
|
log.Debugf("list dir: %+v", dir)
|
|
if !dir.IsDir() {
|
|
return nil, errors.WithStack(errs.NotFolder)
|
|
}
|
|
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
|
|
files, err := storage.List(ctx, dir, args)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "failed to list objs")
|
|
}
|
|
// set path
|
|
for _, f := range files {
|
|
if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && dir.GetPath() != "" {
|
|
s.SetPath(stdpath.Join(dir.GetPath(), f.GetName()))
|
|
}
|
|
}
|
|
// warp obj name
|
|
model.WrapObjsName(files)
|
|
// call hooks
|
|
go func(reqPath string, files []model.Obj) {
|
|
for _, hook := range objsUpdateHooks {
|
|
hook(reqPath, files)
|
|
}
|
|
}(utils.GetFullPath(storage.GetStorage().MountPath, path), files)
|
|
|
|
// sort objs
|
|
if storage.Config().LocalSort {
|
|
model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
|
}
|
|
model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
|
|
|
|
if !storage.Config().NoCache {
|
|
if len(files) > 0 {
|
|
log.Debugf("set cache: %s => %+v", key, files)
|
|
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
} else {
|
|
log.Debugf("del cache: %s", key)
|
|
listCache.Del(key)
|
|
}
|
|
}
|
|
return files, nil
|
|
})
|
|
return objs, err
|
|
}
|
|
|
|
// Get object from list of files
|
|
func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
|
|
path = utils.FixAndCleanPath(path)
|
|
log.Debugf("op.Get %s", path)
|
|
|
|
// get the obj directly without list so that we can reduce the io
|
|
if g, ok := storage.(driver.Getter); ok {
|
|
obj, err := g.Get(ctx, path)
|
|
if err == nil {
|
|
return model.WrapObjName(obj), nil
|
|
}
|
|
}
|
|
|
|
// is root folder
|
|
if utils.PathEqual(path, "/") {
|
|
var rootObj model.Obj
|
|
switch r := storage.GetAddition().(type) {
|
|
case driver.IRootId:
|
|
rootObj = &model.Object{
|
|
ID: r.GetRootId(),
|
|
Name: RootName,
|
|
Size: 0,
|
|
Modified: storage.GetStorage().Modified,
|
|
IsFolder: true,
|
|
}
|
|
case driver.IRootPath:
|
|
rootObj = &model.Object{
|
|
Path: r.GetRootPath(),
|
|
Name: RootName,
|
|
Size: 0,
|
|
Modified: storage.GetStorage().Modified,
|
|
IsFolder: true,
|
|
}
|
|
default:
|
|
if storage, ok := storage.(driver.GetRooter); ok {
|
|
obj, err := storage.GetRoot(ctx)
|
|
if err != nil {
|
|
return nil, errors.WithMessage(err, "failed get root obj")
|
|
}
|
|
rootObj = obj
|
|
}
|
|
}
|
|
if rootObj == nil {
|
|
return nil, errors.Errorf("please implement IRootPath or IRootId or GetRooter method")
|
|
}
|
|
return &model.ObjWrapName{
|
|
Name: RootName,
|
|
Obj: rootObj,
|
|
}, nil
|
|
}
|
|
|
|
// not root folder
|
|
dir, name := stdpath.Split(path)
|
|
files, err := List(ctx, storage, dir, model.ListArgs{})
|
|
if err != nil {
|
|
return nil, errors.WithMessage(err, "failed get parent list")
|
|
}
|
|
for _, f := range files {
|
|
if f.GetName() == name {
|
|
return f, nil
|
|
}
|
|
}
|
|
log.Debugf("cant find obj with name: %s", name)
|
|
return nil, errors.WithStack(errs.ObjectNotFound)
|
|
}
|
|
|
|
func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
|
|
obj, err := Get(ctx, storage, path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return model.UnwrapObj(obj), err
|
|
}
|
|
|
|
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
|
|
var linkG singleflight.Group[*model.Link]
|
|
|
|
// Link get link, if is an url. should have an expiry time
|
|
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
file, err := GetUnwrap(ctx, storage, path)
|
|
if err != nil {
|
|
return nil, nil, errors.WithMessage(err, "failed to get file")
|
|
}
|
|
if file.IsDir() {
|
|
return nil, nil, errors.WithStack(errs.NotFile)
|
|
}
|
|
key := Key(storage, path)
|
|
if link, ok := linkCache.Get(key); ok {
|
|
return link, file, nil
|
|
}
|
|
fn := func() (*model.Link, error) {
|
|
link, err := storage.Link(ctx, file, args)
|
|
if err != nil {
|
|
return nil, errors.Wrapf(err, "failed get link")
|
|
}
|
|
if link.Expiration != nil {
|
|
if link.IPCacheKey {
|
|
key = key + ":" + args.IP
|
|
}
|
|
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
|
|
}
|
|
return link, nil
|
|
}
|
|
link, err, _ := linkG.Do(key, fn)
|
|
return link, file, err
|
|
}
|
|
|
|
// Other api
|
|
func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (interface{}, error) {
|
|
obj, err := GetUnwrap(ctx, storage, args.Path)
|
|
if err != nil {
|
|
return nil, errors.WithMessagef(err, "failed to get obj")
|
|
}
|
|
if o, ok := storage.(driver.Other); ok {
|
|
return o.Other(ctx, model.OtherArgs{
|
|
Obj: obj,
|
|
Method: args.Method,
|
|
Data: args.Data,
|
|
})
|
|
} else {
|
|
return nil, errs.NotImplement
|
|
}
|
|
}
|
|
|
|
var mkdirG singleflight.Group[interface{}]
|
|
|
|
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
path = utils.FixAndCleanPath(path)
|
|
key := Key(storage, path)
|
|
_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
|
|
// check if dir exists
|
|
f, err := GetUnwrap(ctx, storage, path)
|
|
if err != nil {
|
|
if errs.IsObjectNotFound(err) {
|
|
parentPath, dirName := stdpath.Split(path)
|
|
err = MakeDir(ctx, storage, parentPath)
|
|
if err != nil {
|
|
return nil, errors.WithMessagef(err, "failed to make parent dir [%s]", parentPath)
|
|
}
|
|
parentDir, err := GetUnwrap(ctx, storage, parentPath)
|
|
// this should not happen
|
|
if err != nil {
|
|
return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath)
|
|
}
|
|
|
|
switch s := storage.(type) {
|
|
case driver.MkdirResult:
|
|
var newObj model.Obj
|
|
newObj, err = s.MakeDir(ctx, parentDir, dirName)
|
|
if err == nil {
|
|
if newObj != nil {
|
|
addCacheObj(storage, parentPath, model.WrapObjName(newObj))
|
|
} else if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, parentPath)
|
|
}
|
|
}
|
|
case driver.Mkdir:
|
|
err = s.MakeDir(ctx, parentDir, dirName)
|
|
if err == nil && !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, parentPath)
|
|
}
|
|
default:
|
|
return nil, errs.NotImplement
|
|
}
|
|
return nil, errors.WithStack(err)
|
|
}
|
|
return nil, errors.WithMessage(err, "failed to check if dir exists")
|
|
}
|
|
// dir exists
|
|
if f.IsDir() {
|
|
return nil, nil
|
|
}
|
|
// dir to make is a file
|
|
return nil, errors.New("file exists")
|
|
})
|
|
return err
|
|
}
|
|
|
|
func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
srcPath = utils.FixAndCleanPath(srcPath)
|
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
srcRawObj, err := Get(ctx, storage, srcPath)
|
|
if err != nil {
|
|
return errors.WithMessage(err, "failed to get src object")
|
|
}
|
|
srcObj := model.UnwrapObj(srcRawObj)
|
|
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
if err != nil {
|
|
return errors.WithMessage(err, "failed to get dst dir")
|
|
}
|
|
srcDirPath := stdpath.Dir(srcPath)
|
|
|
|
switch s := storage.(type) {
|
|
case driver.MoveResult:
|
|
var newObj model.Obj
|
|
newObj, err = s.Move(ctx, srcObj, dstDir)
|
|
if err == nil {
|
|
delCacheObj(storage, srcDirPath, srcRawObj)
|
|
if newObj != nil {
|
|
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
} else if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
}
|
|
case driver.Move:
|
|
err = s.Move(ctx, srcObj, dstDir)
|
|
if err == nil {
|
|
delCacheObj(storage, srcDirPath, srcRawObj)
|
|
if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
}
|
|
default:
|
|
return errs.NotImplement
|
|
}
|
|
return errors.WithStack(err)
|
|
}
|
|
|
|
func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string, lazyCache ...bool) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
srcPath = utils.FixAndCleanPath(srcPath)
|
|
srcRawObj, err := Get(ctx, storage, srcPath)
|
|
if err != nil {
|
|
return errors.WithMessage(err, "failed to get src object")
|
|
}
|
|
srcObj := model.UnwrapObj(srcRawObj)
|
|
srcDirPath := stdpath.Dir(srcPath)
|
|
|
|
switch s := storage.(type) {
|
|
case driver.RenameResult:
|
|
var newObj model.Obj
|
|
newObj, err = s.Rename(ctx, srcObj, dstName)
|
|
if err == nil {
|
|
if newObj != nil {
|
|
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
|
|
} else if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, srcDirPath)
|
|
}
|
|
}
|
|
case driver.Rename:
|
|
err = s.Rename(ctx, srcObj, dstName)
|
|
if err == nil && !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, srcDirPath)
|
|
}
|
|
default:
|
|
return errs.NotImplement
|
|
}
|
|
return errors.WithStack(err)
|
|
}
|
|
|
|
// Copy Just copy file[s] in a storage
|
|
func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
srcPath = utils.FixAndCleanPath(srcPath)
|
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
srcObj, err := GetUnwrap(ctx, storage, srcPath)
|
|
if err != nil {
|
|
return errors.WithMessage(err, "failed to get src object")
|
|
}
|
|
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
if err != nil {
|
|
return errors.WithMessage(err, "failed to get dst dir")
|
|
}
|
|
|
|
switch s := storage.(type) {
|
|
case driver.CopyResult:
|
|
var newObj model.Obj
|
|
newObj, err = s.Copy(ctx, srcObj, dstDir)
|
|
if err == nil {
|
|
if newObj != nil {
|
|
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
} else if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
}
|
|
case driver.Copy:
|
|
err = s.Copy(ctx, srcObj, dstDir)
|
|
if err == nil && !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
default:
|
|
return errs.NotImplement
|
|
}
|
|
return errors.WithStack(err)
|
|
}
|
|
|
|
func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
path = utils.FixAndCleanPath(path)
|
|
rawObj, err := Get(ctx, storage, path)
|
|
if err != nil {
|
|
// if object not found, it's ok
|
|
if errs.IsObjectNotFound(err) {
|
|
log.Debugf("%s have been removed", path)
|
|
return nil
|
|
}
|
|
return errors.WithMessage(err, "failed to get object")
|
|
}
|
|
dirPath := stdpath.Dir(path)
|
|
|
|
switch s := storage.(type) {
|
|
case driver.Remove:
|
|
err = s.Remove(ctx, model.UnwrapObj(rawObj))
|
|
if err == nil {
|
|
delCacheObj(storage, dirPath, rawObj)
|
|
// clear folder cache recursively
|
|
if rawObj.IsDir() {
|
|
ClearCache(storage, path)
|
|
}
|
|
}
|
|
default:
|
|
return errs.NotImplement
|
|
}
|
|
return errors.WithStack(err)
|
|
}
|
|
|
|
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file model.FileStreamer, up driver.UpdateProgress, lazyCache ...bool) error {
|
|
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
}
|
|
defer func() {
|
|
if err := file.Close(); err != nil {
|
|
log.Errorf("failed to close file streamer, %v", err)
|
|
}
|
|
}()
|
|
// if file exist and size = 0, delete it
|
|
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
dstPath := stdpath.Join(dstDirPath, file.GetName())
|
|
tempName := file.GetName() + ".alist_to_delete"
|
|
tempPath := stdpath.Join(dstDirPath, tempName)
|
|
fi, err := GetUnwrap(ctx, storage, dstPath)
|
|
if err == nil {
|
|
if fi.GetSize() == 0 {
|
|
err = Remove(ctx, storage, dstPath)
|
|
if err != nil {
|
|
return errors.WithMessagef(err, "while uploading, failed remove existing file which size = 0")
|
|
}
|
|
} else if storage.Config().NoOverwriteUpload {
|
|
// try to rename old obj
|
|
err = Rename(ctx, storage, dstPath, tempName)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
} else {
|
|
file.SetExist(fi)
|
|
}
|
|
}
|
|
err = MakeDir(ctx, storage, dstDirPath)
|
|
if err != nil {
|
|
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
|
}
|
|
parentDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
// this should not happen
|
|
if err != nil {
|
|
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
|
}
|
|
// if up is nil, set a default to prevent panic
|
|
if up == nil {
|
|
up = func(p float64) {}
|
|
}
|
|
|
|
switch s := storage.(type) {
|
|
case driver.PutResult:
|
|
var newObj model.Obj
|
|
newObj, err = s.Put(ctx, parentDir, file, up)
|
|
if err == nil {
|
|
if newObj != nil {
|
|
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
} else if !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
}
|
|
case driver.Put:
|
|
err = s.Put(ctx, parentDir, file, up)
|
|
if err == nil && !utils.IsBool(lazyCache...) {
|
|
ClearCache(storage, dstDirPath)
|
|
}
|
|
default:
|
|
return errs.NotImplement
|
|
}
|
|
log.Debugf("put file [%s] done", file.GetName())
|
|
if storage.Config().NoOverwriteUpload && fi != nil && fi.GetSize() > 0 {
|
|
if err != nil {
|
|
// upload failed, recover old obj
|
|
err := Rename(ctx, storage, tempPath, file.GetName())
|
|
if err != nil {
|
|
log.Errorf("failed recover old obj: %+v", err)
|
|
}
|
|
} else {
|
|
// upload success, remove old obj
|
|
err := Remove(ctx, storage, tempPath)
|
|
if err != nil {
|
|
return err
|
|
} else {
|
|
key := Key(storage, stdpath.Join(dstDirPath, file.GetName()))
|
|
linkCache.Del(key)
|
|
}
|
|
}
|
|
}
|
|
return errors.WithStack(err)
|
|
}
|