feat(archive): archive manage (#7817)
* feat(archive): archive management * fix(ftp-server): remove duplicate ReadAtSeeker realization * fix(archive): bad seeking of SeekableStream * fix(archive): split internal and driver extraction api * feat(archive): patch * fix(shutdown): clear decompress upload tasks * chore * feat(archive): support .iso format * chore
This commit is contained in:
7
internal/archive/all.go
Normal file
7
internal/archive/all.go
Normal file
@ -0,0 +1,7 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/archives"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/iso9660"
|
||||
_ "github.com/alist-org/alist/v3/internal/archive/zip"
|
||||
)
|
126
internal/archive/archives/archives.go
Normal file
126
internal/archive/archives/archives.go
Normal file
@ -0,0 +1,126 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Archives struct {
|
||||
}
|
||||
|
||||
func (_ *Archives) AcceptedExtensions() []string {
|
||||
return []string{
|
||||
".br", ".bz2", ".gz", ".lz4", ".lz", ".sz", ".s2", ".xz", ".zz", ".zst", ".tar", ".rar", ".7z",
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *Archives) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
fsys, err := getFs(ss, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = fsys.ReadDir(".")
|
||||
if err != nil {
|
||||
return nil, filterPassword(err)
|
||||
}
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: "",
|
||||
Encrypted: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (_ *Archives) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
if innerPath == "" {
|
||||
innerPath = "."
|
||||
}
|
||||
obj, err := fsys.ReadDir(innerPath)
|
||||
if err != nil {
|
||||
return nil, filterPassword(err)
|
||||
}
|
||||
return utils.SliceConvert(obj, func(src os.DirEntry) (model.Obj, error) {
|
||||
info, err := src.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toModelObj(info), nil
|
||||
})
|
||||
}
|
||||
|
||||
func (_ *Archives) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
file, err := fsys.Open(strings.TrimPrefix(args.InnerPath, "/"))
|
||||
if err != nil {
|
||||
return nil, 0, filterPassword(err)
|
||||
}
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, filterPassword(err)
|
||||
}
|
||||
return file, stat.Size(), nil
|
||||
}
|
||||
|
||||
func (_ *Archives) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
fsys, err := getFs(ss, args.ArchiveArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDir := false
|
||||
path := strings.TrimPrefix(args.InnerPath, "/")
|
||||
if path == "" {
|
||||
isDir = true
|
||||
path = "."
|
||||
} else {
|
||||
stat, err := fsys.Stat(path)
|
||||
if err != nil {
|
||||
return filterPassword(err)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
isDir = true
|
||||
outputPath = stdpath.Join(outputPath, stat.Name())
|
||||
err = os.Mkdir(outputPath, 0700)
|
||||
if err != nil {
|
||||
return filterPassword(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if isDir {
|
||||
err = fs.WalkDir(fsys, path, func(p string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath := strings.TrimPrefix(p, path+"/")
|
||||
dstPath := stdpath.Join(outputPath, relPath)
|
||||
if d.IsDir() {
|
||||
err = os.MkdirAll(dstPath, 0700)
|
||||
} else {
|
||||
dir := stdpath.Dir(dstPath)
|
||||
err = decompress(fsys, p, dir, func(_ float64) {})
|
||||
}
|
||||
return err
|
||||
})
|
||||
} else {
|
||||
err = decompress(fsys, path, outputPath, up)
|
||||
}
|
||||
return filterPassword(err)
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Archives)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Archives{})
|
||||
}
|
80
internal/archive/archives/utils.go
Normal file
80
internal/archive/archives/utils.go
Normal file
@ -0,0 +1,80 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/mholt/archives"
|
||||
"io"
|
||||
fs2 "io/fs"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getFs(ss *stream.SeekableStream, args model.ArchiveArgs) (*archives.ArchiveFS, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
format, _, err := archives.Identify(ss.Ctx, ss.GetName(), reader)
|
||||
if err != nil {
|
||||
return nil, errs.UnknownArchiveFormat
|
||||
}
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
if !ok {
|
||||
return nil, errs.UnknownArchiveFormat
|
||||
}
|
||||
switch f := format.(type) {
|
||||
case archives.SevenZip:
|
||||
f.Password = args.Password
|
||||
case archives.Rar:
|
||||
f.Password = args.Password
|
||||
}
|
||||
return &archives.ArchiveFS{
|
||||
Stream: io.NewSectionReader(reader, 0, ss.GetSize()),
|
||||
Format: extractor,
|
||||
Context: ss.Ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toModelObj(file os.FileInfo) *model.Object {
|
||||
return &model.Object{
|
||||
Name: file.Name(),
|
||||
Size: file.Size(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: file.IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
if err != nil && strings.Contains(err.Error(), "password") {
|
||||
return errs.WrongArchivePassword
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgress) error {
|
||||
rc, err := fsys.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
stat, err := rc.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: rc,
|
||||
Size: stat.Size(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
return err
|
||||
}
|
96
internal/archive/iso9660/iso9660.go
Normal file
96
internal/archive/iso9660/iso9660.go
Normal file
@ -0,0 +1,96 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/kdomanski/iso9660"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
)
|
||||
|
||||
type ISO9660 struct {
|
||||
}
|
||||
|
||||
func (t *ISO9660) AcceptedExtensions() []string {
|
||||
return []string{".iso"}
|
||||
}
|
||||
|
||||
func (t *ISO9660) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: "",
|
||||
Encrypted: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
img, err := getImage(ss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dir, err := getObj(img, args.InnerPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !dir.IsDir() {
|
||||
return nil, errs.NotFolder
|
||||
}
|
||||
children, err := dir.GetChildren()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := make([]model.Obj, 0, len(children))
|
||||
for _, child := range children {
|
||||
ret = append(ret, toModelObj(child))
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
img, err := getImage(ss)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
obj, err := getObj(img, args.InnerPath)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if obj.IsDir() {
|
||||
return nil, 0, errs.NotFile
|
||||
}
|
||||
return io.NopCloser(obj.Reader()), obj.Size(), nil
|
||||
}
|
||||
|
||||
func (t *ISO9660) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
img, err := getImage(ss)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obj, err := getObj(img, args.InnerPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if obj.IsDir() {
|
||||
if args.InnerPath != "/" {
|
||||
outputPath = stdpath.Join(outputPath, obj.Name())
|
||||
if err = os.MkdirAll(outputPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var children []*iso9660.File
|
||||
if children, err = obj.GetChildren(); err == nil {
|
||||
err = decompressAll(children, outputPath)
|
||||
}
|
||||
} else {
|
||||
err = decompress(obj, outputPath, up)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*ISO9660)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&ISO9660{})
|
||||
}
|
100
internal/archive/iso9660/utils.go
Normal file
100
internal/archive/iso9660/utils.go
Normal file
@ -0,0 +1,100 @@
|
||||
package iso9660
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/kdomanski/iso9660"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getImage(ss *stream.SeekableStream) (*iso9660.Image, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iso9660.OpenImage(reader)
|
||||
}
|
||||
|
||||
func getObj(img *iso9660.Image, path string) (*iso9660.File, error) {
|
||||
obj, err := img.RootDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if path == "/" {
|
||||
return obj, nil
|
||||
}
|
||||
paths := strings.Split(strings.TrimPrefix(path, "/"), "/")
|
||||
for _, p := range paths {
|
||||
if !obj.IsDir() {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
children, err := obj.GetChildren()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exist := false
|
||||
for _, child := range children {
|
||||
if child.Name() == p {
|
||||
obj = child
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func toModelObj(file *iso9660.File) model.Obj {
|
||||
return &model.Object{
|
||||
Name: file.Name(),
|
||||
Size: file.Size(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: file.IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
func decompress(f *iso9660.File, path string, up model.UpdateProgress) error {
|
||||
file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: f.Reader(),
|
||||
Size: f.Size(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func decompressAll(children []*iso9660.File, path string) error {
|
||||
for _, child := range children {
|
||||
if child.IsDir() {
|
||||
nextChildren, err := child.GetChildren()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextPath := stdpath.Join(path, child.Name())
|
||||
if err = os.MkdirAll(nextPath, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = decompressAll(nextChildren, nextPath); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := decompress(child, path, func(_ float64) {}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
15
internal/archive/tool/base.go
Normal file
15
internal/archive/tool/base.go
Normal file
@ -0,0 +1,15 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
)
|
||||
|
||||
type Tool interface {
|
||||
AcceptedExtensions() []string
|
||||
GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||
List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||
Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error)
|
||||
Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error
|
||||
}
|
23
internal/archive/tool/utils.go
Normal file
23
internal/archive/tool/utils.go
Normal file
@ -0,0 +1,23 @@
|
||||
package tool
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
)
|
||||
|
||||
var (
|
||||
Tools = make(map[string]Tool)
|
||||
)
|
||||
|
||||
func RegisterTool(tool Tool) {
|
||||
for _, ext := range tool.AcceptedExtensions() {
|
||||
Tools[ext] = tool
|
||||
}
|
||||
}
|
||||
|
||||
func GetArchiveTool(ext string) (Tool, error) {
|
||||
t, ok := Tools[ext]
|
||||
if !ok {
|
||||
return nil, errs.UnknownArchiveFormat
|
||||
}
|
||||
return t, nil
|
||||
}
|
156
internal/archive/zip/utils.go
Normal file
156
internal/archive/zip/utils.go
Normal file
@ -0,0 +1,156 @@
|
||||
package zip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/saintfish/chardet"
|
||||
"github.com/yeka/zip"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/korean"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"golang.org/x/text/encoding/traditionalchinese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/encoding/unicode/utf32"
|
||||
"golang.org/x/text/transform"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func toModelObj(file os.FileInfo) *model.Object {
|
||||
return &model.Object{
|
||||
Name: decodeName(file.Name()),
|
||||
Size: file.Size(),
|
||||
Modified: file.ModTime(),
|
||||
IsFolder: file.IsDir(),
|
||||
}
|
||||
}
|
||||
|
||||
func decompress(file *zip.File, filePath, outputPath, password string) error {
|
||||
targetPath := outputPath
|
||||
dir, base := stdpath.Split(filePath)
|
||||
if dir != "" {
|
||||
targetPath = stdpath.Join(targetPath, dir)
|
||||
err := os.MkdirAll(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if base != "" {
|
||||
err := _decompress(file, targetPath, password, func(_ float64) {})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _decompress(file *zip.File, targetPath, password string, up model.UpdateProgress) error {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(password)
|
||||
}
|
||||
rc, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, &stream.ReaderUpdatingProgress{
|
||||
Reader: &stream.SimpleReaderWithSize{
|
||||
Reader: rc,
|
||||
Size: file.FileInfo().Size(),
|
||||
},
|
||||
UpdateProgress: up,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterPassword(err error) error {
|
||||
if err != nil && strings.Contains(err.Error(), "password") {
|
||||
return errs.WrongArchivePassword
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func decodeName(name string) string {
|
||||
b := []byte(name)
|
||||
detector := chardet.NewTextDetector()
|
||||
result, err := detector.DetectBest(b)
|
||||
if err != nil {
|
||||
return name
|
||||
}
|
||||
enc := getEncoding(result.Charset)
|
||||
if enc == nil {
|
||||
return name
|
||||
}
|
||||
i := bytes.NewReader(b)
|
||||
decoder := transform.NewReader(i, enc.NewDecoder())
|
||||
content, _ := io.ReadAll(decoder)
|
||||
return string(content)
|
||||
}
|
||||
|
||||
func getEncoding(name string) (enc encoding.Encoding) {
|
||||
switch name {
|
||||
case "UTF-16BE":
|
||||
enc = unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)
|
||||
case "UTF-16LE":
|
||||
enc = unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
|
||||
case "UTF-32BE":
|
||||
enc = utf32.UTF32(utf32.BigEndian, utf32.IgnoreBOM)
|
||||
case "UTF-32LE":
|
||||
enc = utf32.UTF32(utf32.LittleEndian, utf32.IgnoreBOM)
|
||||
case "ISO-8859-1":
|
||||
enc = charmap.ISO8859_1
|
||||
case "ISO-8859-2":
|
||||
enc = charmap.ISO8859_2
|
||||
case "ISO-8859-3":
|
||||
enc = charmap.ISO8859_3
|
||||
case "ISO-8859-4":
|
||||
enc = charmap.ISO8859_4
|
||||
case "ISO-8859-5":
|
||||
enc = charmap.ISO8859_5
|
||||
case "ISO-8859-6":
|
||||
enc = charmap.ISO8859_6
|
||||
case "ISO-8859-7":
|
||||
enc = charmap.ISO8859_7
|
||||
case "ISO-8859-8":
|
||||
enc = charmap.ISO8859_8
|
||||
case "ISO-8859-8-I":
|
||||
enc = charmap.ISO8859_8I
|
||||
case "ISO-8859-9":
|
||||
enc = charmap.ISO8859_9
|
||||
case "windows-1251":
|
||||
enc = charmap.Windows1251
|
||||
case "windows-1256":
|
||||
enc = charmap.Windows1256
|
||||
case "KOI8-R":
|
||||
enc = charmap.KOI8R
|
||||
case "Shift_JIS":
|
||||
enc = japanese.ShiftJIS
|
||||
case "GB-18030":
|
||||
enc = simplifiedchinese.GB18030
|
||||
case "EUC-JP":
|
||||
enc = japanese.EUCJP
|
||||
case "EUC-KR":
|
||||
enc = korean.EUCKR
|
||||
case "Big5":
|
||||
enc = traditionalchinese.Big5
|
||||
case "ISO-2022-JP":
|
||||
enc = japanese.ISO2022JP
|
||||
default:
|
||||
enc = nil
|
||||
}
|
||||
return
|
||||
}
|
174
internal/archive/zip/zip.go
Normal file
174
internal/archive/zip/zip.go
Normal file
@ -0,0 +1,174 @@
|
||||
package zip
|
||||
|
||||
import (
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/yeka/zip"
|
||||
"io"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Zip struct {
|
||||
}
|
||||
|
||||
func (_ *Zip) AcceptedExtensions() []string {
|
||||
return []string{".zip"}
|
||||
}
|
||||
|
||||
func (_ *Zip) GetMeta(ss *stream.SeekableStream, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encrypted := false
|
||||
for _, file := range zipReader.File {
|
||||
if file.IsEncrypted() {
|
||||
encrypted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return &model.ArchiveMetaInfo{
|
||||
Comment: zipReader.Comment,
|
||||
Encrypted: encrypted,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (_ *Zip) List(ss *stream.SeekableStream, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
ret := make([]model.Obj, 0)
|
||||
passVerified := false
|
||||
for _, file := range zipReader.File {
|
||||
if !passVerified && file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
rc, e := file.Open()
|
||||
if e != nil {
|
||||
return nil, filterPassword(e)
|
||||
}
|
||||
_ = rc.Close()
|
||||
passVerified = true
|
||||
}
|
||||
name := decodeName(file.Name)
|
||||
if strings.Contains(strings.TrimSuffix(name, "/"), "/") {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
}
|
||||
return ret, nil
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/") + "/"
|
||||
ret := make([]model.Obj, 0)
|
||||
exist := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
if name == innerPath {
|
||||
exist = true
|
||||
}
|
||||
dir := stdpath.Dir(strings.TrimSuffix(name, "/")) + "/"
|
||||
if dir != innerPath {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, toModelObj(file.FileInfo()))
|
||||
}
|
||||
if !exist {
|
||||
return nil, errs.ObjectNotFound
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (_ *Zip) Extract(ss *stream.SeekableStream, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
for _, file := range zipReader.File {
|
||||
if decodeName(file.Name) == innerPath {
|
||||
if file.IsEncrypted() {
|
||||
file.SetPassword(args.Password)
|
||||
}
|
||||
r, e := file.Open()
|
||||
if e != nil {
|
||||
return nil, 0, e
|
||||
}
|
||||
return r, file.FileInfo().Size(), nil
|
||||
}
|
||||
}
|
||||
return nil, 0, errs.ObjectNotFound
|
||||
}
|
||||
|
||||
func (_ *Zip) Decompress(ss *stream.SeekableStream, outputPath string, args model.ArchiveInnerArgs, up model.UpdateProgress) error {
|
||||
reader, err := stream.NewReadAtSeeker(ss, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
zipReader, err := zip.NewReader(reader, ss.GetSize())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if args.InnerPath == "/" {
|
||||
for i, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
err = decompress(file, name, outputPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(float64(i+1) * 100.0 / float64(len(zipReader.File)))
|
||||
}
|
||||
} else {
|
||||
innerPath := strings.TrimPrefix(args.InnerPath, "/")
|
||||
innerBase := stdpath.Base(innerPath)
|
||||
createdBaseDir := false
|
||||
for _, file := range zipReader.File {
|
||||
name := decodeName(file.Name)
|
||||
if name == innerPath {
|
||||
err = _decompress(file, outputPath, args.Password, up)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
} else if strings.HasPrefix(name, innerPath+"/") {
|
||||
targetPath := stdpath.Join(outputPath, innerBase)
|
||||
if !createdBaseDir {
|
||||
err = os.Mkdir(targetPath, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdBaseDir = true
|
||||
}
|
||||
restPath := strings.TrimPrefix(name, innerPath+"/")
|
||||
err = decompress(file, restPath, targetPath, args.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ tool.Tool = (*Zip)(nil)
|
||||
|
||||
func init() {
|
||||
tool.RegisterTool(&Zip{})
|
||||
}
|
@ -26,13 +26,14 @@ func initUser() {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
salt := random.String(16)
|
||||
admin = &model.User{
|
||||
Username: "admin",
|
||||
Salt: salt,
|
||||
PwdHash: model.TwoHashPwd(adminPassword, salt),
|
||||
Role: model.ADMIN,
|
||||
BasePath: "/",
|
||||
Authn: "[]",
|
||||
Permission: 0xFF, // 0(can see hidden) - 7(can remove)
|
||||
Username: "admin",
|
||||
Salt: salt,
|
||||
PwdHash: model.TwoHashPwd(adminPassword, salt),
|
||||
Role: model.ADMIN,
|
||||
BasePath: "/",
|
||||
Authn: "[]",
|
||||
// 0(can see hidden) - 7(can remove) & 12(can read archives) - 13(can decompress archives)
|
||||
Permission: 0x30FF,
|
||||
}
|
||||
if err := op.CreateUser(admin); err != nil {
|
||||
panic(err)
|
||||
|
@ -5,18 +5,20 @@ import (
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
)
|
||||
|
||||
// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage)
|
||||
// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705.
|
||||
// GrantAdminPermissions gives admin Permission 0(can see hidden) - 9(webdav manage) and
|
||||
// 12(can read archives) - 13(can decompress archives)
|
||||
// This patch is written to help users upgrading from older version better adapt to PR AlistGo/alist#7705 and
|
||||
// PR AlistGo/alist#7817.
|
||||
func GrantAdminPermissions() {
|
||||
admin, err := op.GetAdmin()
|
||||
if err != nil {
|
||||
utils.Log.Errorf("Cannot grant permissions to admin: %v", err)
|
||||
}
|
||||
if (admin.Permission & 0x3FF) == 0 {
|
||||
admin.Permission |= 0x3FF
|
||||
}
|
||||
err = op.UpdateUser(admin)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("Cannot grant permissions to admin: %v", err)
|
||||
if (admin.Permission & 0x33FF) == 0 {
|
||||
admin.Permission |= 0x33FF
|
||||
err = op.UpdateUser(admin)
|
||||
if err != nil {
|
||||
utils.Log.Errorf("Cannot grant permissions to admin: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,4 +16,6 @@ func InitTaskManager() {
|
||||
if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted
|
||||
CleanTempDir()
|
||||
}
|
||||
fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(conf.Conf.Tasks.Decompress.Workers), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry))
|
||||
fs.ArchiveContentUploadTaskManager.Manager = tache.NewManager[*fs.ArchiveContentUploadTask](tache.WithWorks(conf.Conf.Tasks.DecompressUpload.Workers), tache.WithMaxRetry(conf.Conf.Tasks.DecompressUpload.MaxRetry)) //decompress upload will not support persist
|
||||
}
|
||||
|
@ -53,10 +53,12 @@ type TaskConfig struct {
|
||||
}
|
||||
|
||||
type TasksConfig struct {
|
||||
Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
|
||||
Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
|
||||
Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Download TaskConfig `json:"download" envPrefix:"DOWNLOAD_"`
|
||||
Transfer TaskConfig `json:"transfer" envPrefix:"TRANSFER_"`
|
||||
Upload TaskConfig `json:"upload" envPrefix:"UPLOAD_"`
|
||||
Copy TaskConfig `json:"copy" envPrefix:"COPY_"`
|
||||
Decompress TaskConfig `json:"decompress" envPrefix:"DECOMPRESS_"`
|
||||
DecompressUpload TaskConfig `json:"decompress_upload" envPrefix:"DECOMPRESS_UPLOAD_"`
|
||||
}
|
||||
|
||||
type Cors struct {
|
||||
@ -169,6 +171,15 @@ func DefaultConfig() *Config {
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
Decompress: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
// TaskPersistant: true,
|
||||
},
|
||||
DecompressUpload: TaskConfig{
|
||||
Workers: 5,
|
||||
MaxRetry: 2,
|
||||
},
|
||||
},
|
||||
Cors: Cors{
|
||||
AllowOrigins: []string{"*"},
|
||||
|
@ -123,7 +123,43 @@ type PutURLResult interface {
|
||||
PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error)
|
||||
}
|
||||
|
||||
type UpdateProgress func(percentage float64)
|
||||
type ArchiveReader interface {
|
||||
// GetArchiveMeta get the meta-info of an archive
|
||||
// return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty
|
||||
// return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases:
|
||||
// 1. the driver do not support the format of the archive but there may be an internal tool do
|
||||
// 2. handling archives is a VIP feature, but the driver does not have VIP access
|
||||
GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error)
|
||||
// ListArchive list the children of model.ArchiveArgs.InnerPath in the archive
|
||||
// return errs.NotImplement to use internal archive tools to list the children
|
||||
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
|
||||
ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error)
|
||||
// Extract get url/filepath/reader of a file in the archive
|
||||
// return errs.NotImplement to use internal archive tools to extract
|
||||
Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error)
|
||||
}
|
||||
|
||||
type ArchiveGetter interface {
|
||||
// ArchiveGet get file by inner path
|
||||
// return errs.NotImplement to use internal archive tools to get the children
|
||||
// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
|
||||
ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error)
|
||||
}
|
||||
|
||||
type ArchiveDecompress interface {
|
||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error
|
||||
}
|
||||
|
||||
type ArchiveDecompressResult interface {
|
||||
// ArchiveDecompress decompress an archive
|
||||
// when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension
|
||||
// return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false
|
||||
// return only the newly created folder when args.PutIntoNewDir is true
|
||||
// return errs.NotImplement to use internal archive tools to decompress
|
||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||
}
|
||||
|
||||
type UpdateProgress model.UpdateProgress
|
||||
|
||||
type Progress struct {
|
||||
Total int64
|
||||
|
@ -19,6 +19,10 @@ var (
|
||||
StorageNotFound = errors.New("storage not found")
|
||||
StreamIncomplete = errors.New("upload/download stream incomplete, possible network issue")
|
||||
StreamPeekFail = errors.New("StreamPeekFail")
|
||||
|
||||
UnknownArchiveFormat = errors.New("unknown archive format")
|
||||
WrongArchivePassword = errors.New("wrong archive password")
|
||||
DriverExtractNotSupported = errors.New("driver extraction not supported")
|
||||
)
|
||||
|
||||
// NewErr wrap constant error with an extra message
|
||||
|
395
internal/fs/archive.go
Normal file
395
internal/fs/archive.go
Normal file
@ -0,0 +1,395 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/xhofe/tache"
|
||||
"io"
|
||||
"math/rand"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
stdpath "path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ArchiveDownloadTask struct {
|
||||
task.TaskExtension
|
||||
model.ArchiveDecompressArgs
|
||||
status string
|
||||
SrcObjPath string
|
||||
DstDirPath string
|
||||
srcStorage driver.Driver
|
||||
dstStorage driver.Driver
|
||||
SrcStorageMp string
|
||||
DstStorageMp string
|
||||
Tool tool.Tool
|
||||
}
|
||||
|
||||
func (t *ArchiveDownloadTask) GetName() string {
|
||||
return fmt.Sprintf("decompress [%s](%s)[%s] to [%s](%s) with password <%s>", t.SrcStorageMp, t.SrcObjPath,
|
||||
t.InnerPath, t.DstStorageMp, t.DstDirPath, t.Password)
|
||||
}
|
||||
|
||||
func (t *ArchiveDownloadTask) GetStatus() string {
|
||||
return t.status
|
||||
}
|
||||
|
||||
func (t *ArchiveDownloadTask) Run() error {
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
uploadTask, err := t.RunWithoutPushUploadTask()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ArchiveContentUploadTaskManager.Add(uploadTask)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *ArchiveDownloadTask) RunWithoutPushUploadTask() (*ArchiveContentUploadTask, error) {
|
||||
var err error
|
||||
if t.srcStorage == nil {
|
||||
t.srcStorage, err = op.GetStorageByMountPath(t.SrcStorageMp)
|
||||
}
|
||||
l, srcObj, err := op.Link(t.Ctx(), t.srcStorage, t.SrcObjPath, model.LinkArgs{
|
||||
Header: http.Header{},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs := stream.FileStream{
|
||||
Obj: srcObj,
|
||||
Ctx: t.Ctx(),
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(fs, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
}
|
||||
}()
|
||||
var decompressUp model.UpdateProgress
|
||||
if t.CacheFull {
|
||||
t.SetTotalBytes(srcObj.GetSize())
|
||||
t.status = "getting src object"
|
||||
_, err = ss.CacheFullInTempFileAndUpdateProgress(t.SetProgress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decompressUp = func(_ float64) {}
|
||||
} else {
|
||||
decompressUp = t.SetProgress
|
||||
}
|
||||
t.status = "walking and decompressing"
|
||||
dir, err := os.MkdirTemp(conf.Conf.TempDir, "dir-*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = t.Tool.Decompress(ss, dir, t.ArchiveInnerArgs, decompressUp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseName := strings.TrimSuffix(srcObj.GetName(), stdpath.Ext(srcObj.GetName()))
|
||||
uploadTask := &ArchiveContentUploadTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: t.GetCreator(),
|
||||
},
|
||||
ObjName: baseName,
|
||||
InPlace: !t.PutIntoNewDir,
|
||||
FilePath: dir,
|
||||
DstDirPath: t.DstDirPath,
|
||||
dstStorage: t.dstStorage,
|
||||
DstStorageMp: t.DstStorageMp,
|
||||
}
|
||||
return uploadTask, nil
|
||||
}
|
||||
|
||||
var ArchiveDownloadTaskManager *tache.Manager[*ArchiveDownloadTask]
|
||||
|
||||
type ArchiveContentUploadTask struct {
|
||||
task.TaskExtension
|
||||
status string
|
||||
ObjName string
|
||||
InPlace bool
|
||||
FilePath string
|
||||
DstDirPath string
|
||||
dstStorage driver.Driver
|
||||
DstStorageMp string
|
||||
finalized bool
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) GetName() string {
|
||||
return fmt.Sprintf("upload %s to [%s](%s)", t.ObjName, t.DstStorageMp, t.DstDirPath)
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) GetStatus() string {
|
||||
return t.status
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) Run() error {
|
||||
t.ClearEndTime()
|
||||
t.SetStartTime(time.Now())
|
||||
defer func() { t.SetEndTime(time.Now()) }()
|
||||
return t.RunWithNextTaskCallback(func(nextTsk *ArchiveContentUploadTask) error {
|
||||
ArchiveContentUploadTaskManager.Add(nextTsk)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) RunWithNextTaskCallback(f func(nextTsk *ArchiveContentUploadTask) error) error {
|
||||
var err error
|
||||
if t.dstStorage == nil {
|
||||
t.dstStorage, err = op.GetStorageByMountPath(t.DstStorageMp)
|
||||
}
|
||||
info, err := os.Stat(t.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
t.status = "src object is dir, listing objs"
|
||||
nextDstPath := t.DstDirPath
|
||||
if !t.InPlace {
|
||||
nextDstPath = stdpath.Join(nextDstPath, t.ObjName)
|
||||
err = op.MakeDir(t.Ctx(), t.dstStorage, nextDstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
entries, err := os.ReadDir(t.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var es error
|
||||
for _, entry := range entries {
|
||||
var nextFilePath string
|
||||
if entry.IsDir() {
|
||||
nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "dir-")
|
||||
} else {
|
||||
nextFilePath, err = moveToTempPath(stdpath.Join(t.FilePath, entry.Name()), "file-")
|
||||
}
|
||||
if err != nil {
|
||||
es = stderrors.Join(es, err)
|
||||
continue
|
||||
}
|
||||
err = f(&ArchiveContentUploadTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: t.GetCreator(),
|
||||
},
|
||||
ObjName: entry.Name(),
|
||||
InPlace: false,
|
||||
FilePath: nextFilePath,
|
||||
DstDirPath: nextDstPath,
|
||||
dstStorage: t.dstStorage,
|
||||
DstStorageMp: t.DstStorageMp,
|
||||
})
|
||||
if err != nil {
|
||||
es = stderrors.Join(es, err)
|
||||
}
|
||||
}
|
||||
if es != nil {
|
||||
return es
|
||||
}
|
||||
} else {
|
||||
t.SetTotalBytes(info.Size())
|
||||
file, err := os.Open(t.FilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs := &stream.FileStream{
|
||||
Obj: &model.Object{
|
||||
Name: t.ObjName,
|
||||
Size: info.Size(),
|
||||
Modified: time.Now(),
|
||||
},
|
||||
Mimetype: mime.TypeByExtension(filepath.Ext(t.ObjName)),
|
||||
WebPutAsTask: true,
|
||||
Reader: file,
|
||||
}
|
||||
fs.Closers.Add(file)
|
||||
t.status = "uploading"
|
||||
err = op.Put(t.Ctx(), t.dstStorage, t.DstDirPath, fs, t.SetProgress, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
t.deleteSrcFile()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) Cancel() {
|
||||
t.TaskExtension.Cancel()
|
||||
t.deleteSrcFile()
|
||||
}
|
||||
|
||||
func (t *ArchiveContentUploadTask) deleteSrcFile() {
|
||||
if !t.finalized {
|
||||
_ = os.RemoveAll(t.FilePath)
|
||||
t.finalized = true
|
||||
}
|
||||
}
|
||||
|
||||
func moveToTempPath(path, prefix string) (string, error) {
|
||||
newPath, err := genTempFileName(prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = os.Rename(path, newPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return newPath, nil
|
||||
}
|
||||
|
||||
func genTempFileName(prefix string) (string, error) {
|
||||
retry := 0
|
||||
for retry < 10000 {
|
||||
newPath := stdpath.Join(conf.Conf.TempDir, prefix+strconv.FormatUint(uint64(rand.Uint32()), 10))
|
||||
if _, err := os.Stat(newPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return newPath, nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
retry++
|
||||
}
|
||||
return "", errors.New("failed to generate temp-file name: too many retries")
|
||||
}
|
||||
|
||||
type archiveContentUploadTaskManagerType struct {
|
||||
*tache.Manager[*ArchiveContentUploadTask]
|
||||
}
|
||||
|
||||
func (m *archiveContentUploadTaskManagerType) Remove(id string) {
|
||||
if t, ok := m.GetByID(id); ok {
|
||||
t.deleteSrcFile()
|
||||
m.Manager.Remove(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *archiveContentUploadTaskManagerType) RemoveAll() {
|
||||
tasks := m.GetAll()
|
||||
for _, t := range tasks {
|
||||
m.Remove(t.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
func (m *archiveContentUploadTaskManagerType) RemoveByState(state ...tache.State) {
|
||||
tasks := m.GetByState(state...)
|
||||
for _, t := range tasks {
|
||||
m.Remove(t.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
func (m *archiveContentUploadTaskManagerType) RemoveByCondition(condition func(task *ArchiveContentUploadTask) bool) {
|
||||
tasks := m.GetByCondition(condition)
|
||||
for _, t := range tasks {
|
||||
m.Remove(t.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
var ArchiveContentUploadTaskManager = &archiveContentUploadTaskManagerType{
|
||||
Manager: nil,
|
||||
}
|
||||
|
||||
func archiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.GetArchiveMeta(ctx, storage, actualPath, args)
|
||||
}
|
||||
|
||||
func archiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.ListArchive(ctx, storage, actualPath, args)
|
||||
}
|
||||
|
||||
func archiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) {
|
||||
srcStorage, srcObjActualPath, err := op.GetStorageAndActualPath(srcObjPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get src storage")
|
||||
}
|
||||
dstStorage, dstDirActualPath, err := op.GetStorageAndActualPath(dstDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed get dst storage")
|
||||
}
|
||||
if srcStorage.GetStorage() == dstStorage.GetStorage() {
|
||||
err = op.ArchiveDecompress(ctx, srcStorage, srcObjActualPath, dstDirActualPath, args, lazyCache...)
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ext := stdpath.Ext(srcObjActualPath)
|
||||
t, err := tool.GetArchiveTool(ext)
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
||||
}
|
||||
taskCreator, _ := ctx.Value("user").(*model.User)
|
||||
tsk := &ArchiveDownloadTask{
|
||||
TaskExtension: task.TaskExtension{
|
||||
Creator: taskCreator,
|
||||
},
|
||||
ArchiveDecompressArgs: args,
|
||||
srcStorage: srcStorage,
|
||||
dstStorage: dstStorage,
|
||||
SrcObjPath: srcObjActualPath,
|
||||
DstDirPath: dstDirActualPath,
|
||||
SrcStorageMp: srcStorage.GetStorage().MountPath,
|
||||
DstStorageMp: dstStorage.GetStorage().MountPath,
|
||||
Tool: t,
|
||||
}
|
||||
if ctx.Value(conf.NoTaskKey) != nil {
|
||||
uploadTask, err := tsk.RunWithoutPushUploadTask()
|
||||
if err != nil {
|
||||
return nil, errors.WithMessagef(err, "failed download [%s]", srcObjPath)
|
||||
}
|
||||
defer uploadTask.deleteSrcFile()
|
||||
var callback func(t *ArchiveContentUploadTask) error
|
||||
callback = func(t *ArchiveContentUploadTask) error {
|
||||
e := t.RunWithNextTaskCallback(callback)
|
||||
t.deleteSrcFile()
|
||||
return e
|
||||
}
|
||||
return nil, uploadTask.RunWithNextTaskCallback(callback)
|
||||
} else {
|
||||
ArchiveDownloadTaskManager.Add(tsk)
|
||||
return tsk, nil
|
||||
}
|
||||
}
|
||||
|
||||
func archiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.DriverExtract(ctx, storage, actualPath, args)
|
||||
}
|
||||
|
||||
func archiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
storage, actualPath, err := op.GetStorageAndActualPath(path)
|
||||
if err != nil {
|
||||
return nil, 0, errors.WithMessage(err, "failed get storage")
|
||||
}
|
||||
return op.InternalExtract(ctx, storage, actualPath, args)
|
||||
}
|
@ -7,6 +7,7 @@ import (
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/task"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"io"
|
||||
)
|
||||
|
||||
// the param named path of functions in this package is a mount path
|
||||
@ -109,6 +110,46 @@ func PutAsTask(ctx context.Context, dstDirPath string, file model.FileStreamer)
|
||||
return t, err
|
||||
}
|
||||
|
||||
func ArchiveMeta(ctx context.Context, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||
meta, err := archiveMeta(ctx, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed get archive meta %s: %+v", path, err)
|
||||
}
|
||||
return meta, err
|
||||
}
|
||||
|
||||
func ArchiveList(ctx context.Context, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||
objs, err := archiveList(ctx, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed list archive [%s]%s: %+v", path, args.InnerPath, err)
|
||||
}
|
||||
return objs, err
|
||||
}
|
||||
|
||||
func ArchiveDecompress(ctx context.Context, srcObjPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) (task.TaskExtensionInfo, error) {
|
||||
t, err := archiveDecompress(ctx, srcObjPath, dstDirPath, args, lazyCache...)
|
||||
if err != nil {
|
||||
log.Errorf("failed decompress [%s]%s: %+v", srcObjPath, args.InnerPath, err)
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
func ArchiveDriverExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||
l, obj, err := archiveDriverExtract(ctx, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err)
|
||||
}
|
||||
return l, obj, err
|
||||
}
|
||||
|
||||
func ArchiveInternalExtract(ctx context.Context, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
l, obj, err := archiveInternalExtract(ctx, path, args)
|
||||
if err != nil {
|
||||
log.Errorf("failed extract [%s]%s: %+v", path, args.InnerPath, err)
|
||||
}
|
||||
return l, obj, err
|
||||
}
|
||||
|
||||
type GetStoragesArgs struct {
|
||||
}
|
||||
|
||||
|
49
internal/model/archive.go
Normal file
49
internal/model/archive.go
Normal file
@ -0,0 +1,49 @@
|
||||
package model
|
||||
|
||||
type ObjTree interface {
|
||||
Obj
|
||||
GetChildren() []ObjTree
|
||||
}
|
||||
|
||||
type ObjectTree struct {
|
||||
Object
|
||||
Children []ObjTree
|
||||
}
|
||||
|
||||
func (t *ObjectTree) GetChildren() []ObjTree {
|
||||
return t.Children
|
||||
}
|
||||
|
||||
type ArchiveMeta interface {
|
||||
GetComment() string
|
||||
// IsEncrypted means if the content of the archive requires a password to access
|
||||
// GetArchiveMeta should return errs.WrongArchivePassword if the meta-info is also encrypted,
|
||||
// and the provided password is empty.
|
||||
IsEncrypted() bool
|
||||
// GetTree directly returns the full folder structure
|
||||
// returns nil if the folder structure should be acquired by calling driver.ArchiveReader.ListArchive
|
||||
GetTree() []ObjTree
|
||||
}
|
||||
|
||||
type ArchiveMetaInfo struct {
|
||||
Comment string
|
||||
Encrypted bool
|
||||
Tree []ObjTree
|
||||
}
|
||||
|
||||
func (m *ArchiveMetaInfo) GetComment() string {
|
||||
return m.Comment
|
||||
}
|
||||
|
||||
func (m *ArchiveMetaInfo) IsEncrypted() bool {
|
||||
return m.Encrypted
|
||||
}
|
||||
|
||||
func (m *ArchiveMetaInfo) GetTree() []ObjTree {
|
||||
return m.Tree
|
||||
}
|
||||
|
||||
type ArchiveMetaProvider struct {
|
||||
ArchiveMeta
|
||||
DriverProviding bool
|
||||
}
|
@ -48,6 +48,33 @@ type FsOtherArgs struct {
|
||||
Method string `json:"method" form:"method"`
|
||||
Data interface{} `json:"data" form:"data"`
|
||||
}
|
||||
|
||||
type ArchiveArgs struct {
|
||||
Password string
|
||||
LinkArgs
|
||||
}
|
||||
|
||||
type ArchiveInnerArgs struct {
|
||||
ArchiveArgs
|
||||
InnerPath string
|
||||
}
|
||||
|
||||
type ArchiveMetaArgs struct {
|
||||
ArchiveArgs
|
||||
Refresh bool
|
||||
}
|
||||
|
||||
type ArchiveListArgs struct {
|
||||
ArchiveInnerArgs
|
||||
Refresh bool
|
||||
}
|
||||
|
||||
type ArchiveDecompressArgs struct {
|
||||
ArchiveInnerArgs
|
||||
CacheFull bool
|
||||
PutIntoNewDir bool
|
||||
}
|
||||
|
||||
type RangeReadCloserIF interface {
|
||||
RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
|
||||
utils.ClosersIF
|
||||
|
@ -48,8 +48,11 @@ type FileStreamer interface {
|
||||
RangeRead(http_range.Range) (io.Reader, error)
|
||||
//for a non-seekable Stream, if Read is called, this function won't work
|
||||
CacheFullInTempFile() (File, error)
|
||||
CacheFullInTempFileAndUpdateProgress(up UpdateProgress) (File, error)
|
||||
}
|
||||
|
||||
type UpdateProgress func(percentage float64)
|
||||
|
||||
type URL interface {
|
||||
URL() string
|
||||
}
|
||||
|
@ -44,6 +44,8 @@ type User struct {
|
||||
// 9: webdav write
|
||||
// 10: ftp/sftp login and read
|
||||
// 11: ftp/sftp write
|
||||
// 12: can read archives
|
||||
// 13: can decompress archives
|
||||
Permission int32 `json:"permission"`
|
||||
OtpSecret string `json:"-"`
|
||||
SsoID string `json:"sso_id"` // unique by sso platform
|
||||
@ -127,6 +129,14 @@ func (u *User) CanFTPManage() bool {
|
||||
return (u.Permission>>11)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) CanReadArchives() bool {
|
||||
return (u.Permission>>12)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) CanDecompress() bool {
|
||||
return (u.Permission>>13)&1 == 1
|
||||
}
|
||||
|
||||
func (u *User) JoinPath(reqPath string) (string, error) {
|
||||
return utils.JoinBasePath(u.BasePath, reqPath)
|
||||
}
|
||||
|
424
internal/op/archive.go
Normal file
424
internal/op/archive.go
Normal file
@ -0,0 +1,424 @@
|
||||
package op
|
||||
|
||||
import (
|
||||
"context"
|
||||
stderrors "errors"
|
||||
"github.com/alist-org/alist/v3/internal/archive/tool"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Xhofe/go-cache"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/pkg/singleflight"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var archiveMetaCache = cache.NewMemCache(cache.WithShards[*model.ArchiveMetaProvider](64))
|
||||
var archiveMetaG singleflight.Group[*model.ArchiveMetaProvider]
|
||||
|
||||
func GetArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (*model.ArchiveMetaProvider, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
key := Key(storage, path)
|
||||
if !args.Refresh {
|
||||
if meta, ok := archiveMetaCache.Get(key); ok {
|
||||
log.Debugf("use cache when get %s archive meta", path)
|
||||
return meta, nil
|
||||
}
|
||||
}
|
||||
fn := func() (*model.ArchiveMetaProvider, error) {
|
||||
_, m, err := getArchiveMeta(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get %s archive met: %+v", path, err)
|
||||
}
|
||||
if !storage.Config().NoCache {
|
||||
archiveMetaCache.Set(key, m, cache.WithEx[*model.ArchiveMetaProvider](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
if storage.Config().OnlyLocal {
|
||||
meta, err := fn()
|
||||
return meta, err
|
||||
}
|
||||
meta, err, _ := archiveMetaG.Do(key, fn)
|
||||
return meta, err
|
||||
}
|
||||
|
||||
func getArchiveToolAndStream(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (model.Obj, tool.Tool, *stream.SeekableStream, error) {
|
||||
l, obj, err := Link(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] link", path)
|
||||
}
|
||||
ext := stdpath.Ext(obj.GetName())
|
||||
t, err := tool.GetArchiveTool(ext)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] archive tool", ext)
|
||||
}
|
||||
ss, err := stream.NewSeekableStream(stream.FileStream{Ctx: ctx, Obj: obj}, l)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.WithMessagef(err, "failed get [%s] stream", path)
|
||||
}
|
||||
return obj, t, ss, nil
|
||||
}
|
||||
|
||||
func getArchiveMeta(ctx context.Context, storage driver.Driver, path string, args model.ArchiveMetaArgs) (model.Obj, *model.ArchiveMetaProvider, error) {
|
||||
storageAr, ok := storage.(driver.ArchiveReader)
|
||||
if ok {
|
||||
obj, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed to get file")
|
||||
}
|
||||
if obj.IsDir() {
|
||||
return nil, nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
meta, err := storageAr.GetArchiveMeta(ctx, obj, args.ArchiveArgs)
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: true}, err
|
||||
}
|
||||
}
|
||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
}
|
||||
}()
|
||||
meta, err := t.GetMeta(ss, args.ArchiveArgs)
|
||||
return obj, &model.ArchiveMetaProvider{ArchiveMeta: meta, DriverProviding: false}, err
|
||||
}
|
||||
|
||||
var archiveListCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
|
||||
var archiveListG singleflight.Group[[]model.Obj]
|
||||
|
||||
func ListArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) ([]model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
metaKey := Key(storage, path)
|
||||
key := stdpath.Join(metaKey, args.InnerPath)
|
||||
if !args.Refresh {
|
||||
if files, ok := archiveListCache.Get(key); ok {
|
||||
log.Debugf("use cache when list archive [%s]%s", path, args.InnerPath)
|
||||
return files, nil
|
||||
}
|
||||
if meta, ok := archiveMetaCache.Get(metaKey); ok {
|
||||
log.Debugf("use meta cache when list archive [%s]%s", path, args.InnerPath)
|
||||
return getChildrenFromArchiveMeta(meta, args.InnerPath)
|
||||
}
|
||||
}
|
||||
objs, err, _ := archiveListG.Do(key, func() ([]model.Obj, error) {
|
||||
obj, files, err := listArchive(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to list archive [%s]%s: %+v", path, args.InnerPath, err)
|
||||
}
|
||||
// set path
|
||||
for _, f := range files {
|
||||
if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && obj.GetPath() != "" {
|
||||
s.SetPath(stdpath.Join(obj.GetPath(), args.InnerPath, f.GetName()))
|
||||
}
|
||||
}
|
||||
// warp obj name
|
||||
model.WrapObjsName(files)
|
||||
// sort objs
|
||||
if storage.Config().LocalSort {
|
||||
model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
||||
}
|
||||
model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
|
||||
if !storage.Config().NoCache {
|
||||
if len(files) > 0 {
|
||||
log.Debugf("set cache: %s => %+v", key, files)
|
||||
archiveListCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
||||
} else {
|
||||
log.Debugf("del cache: %s", key)
|
||||
archiveListCache.Del(key)
|
||||
}
|
||||
}
|
||||
return files, nil
|
||||
})
|
||||
return objs, err
|
||||
}
|
||||
|
||||
func _listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
|
||||
storageAr, ok := storage.(driver.ArchiveReader)
|
||||
if ok {
|
||||
obj, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed to get file")
|
||||
}
|
||||
if obj.IsDir() {
|
||||
return nil, nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
files, err := storageAr.ListArchive(ctx, obj, args.ArchiveInnerArgs)
|
||||
if !errors.Is(err, errs.NotImplement) {
|
||||
return obj, files, err
|
||||
}
|
||||
}
|
||||
obj, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := ss.Close(); err != nil {
|
||||
log.Errorf("failed to close file streamer, %v", err)
|
||||
}
|
||||
}()
|
||||
files, err := t.List(ss, args.ArchiveInnerArgs)
|
||||
return obj, files, err
|
||||
}
|
||||
|
||||
func listArchive(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, []model.Obj, error) {
|
||||
obj, files, err := _listArchive(ctx, storage, path, args)
|
||||
if errors.Is(err, errs.NotSupport) {
|
||||
var meta model.ArchiveMeta
|
||||
meta, err = GetArchiveMeta(ctx, storage, path, model.ArchiveMetaArgs{
|
||||
ArchiveArgs: args.ArchiveArgs,
|
||||
Refresh: args.Refresh,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
files, err = getChildrenFromArchiveMeta(meta, args.InnerPath)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if err == nil && obj == nil {
|
||||
obj, err = GetUnwrap(ctx, storage, path)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return obj, files, err
|
||||
}
|
||||
|
||||
func getChildrenFromArchiveMeta(meta model.ArchiveMeta, innerPath string) ([]model.Obj, error) {
|
||||
obj := meta.GetTree()
|
||||
if obj == nil {
|
||||
return nil, errors.WithStack(errs.NotImplement)
|
||||
}
|
||||
dirs := splitPath(innerPath)
|
||||
for _, dir := range dirs {
|
||||
var next model.ObjTree
|
||||
for _, c := range obj {
|
||||
if c.GetName() == dir {
|
||||
next = c
|
||||
break
|
||||
}
|
||||
}
|
||||
if next == nil {
|
||||
return nil, errors.WithStack(errs.ObjectNotFound)
|
||||
}
|
||||
if !next.IsDir() || next.GetChildren() == nil {
|
||||
return nil, errors.WithStack(errs.NotFolder)
|
||||
}
|
||||
obj = next.GetChildren()
|
||||
}
|
||||
return utils.SliceConvert(obj, func(src model.ObjTree) (model.Obj, error) {
|
||||
return src, nil
|
||||
})
|
||||
}
|
||||
|
||||
func splitPath(path string) []string {
|
||||
var parts []string
|
||||
for {
|
||||
dir, file := stdpath.Split(path)
|
||||
if file == "" {
|
||||
break
|
||||
}
|
||||
parts = append([]string{file}, parts...)
|
||||
path = strings.TrimSuffix(dir, "/")
|
||||
}
|
||||
return parts
|
||||
}
|
||||
|
||||
func ArchiveGet(ctx context.Context, storage driver.Driver, path string, args model.ArchiveListArgs) (model.Obj, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
path = utils.FixAndCleanPath(path)
|
||||
af, err := GetUnwrap(ctx, storage, path)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed to get file")
|
||||
}
|
||||
if af.IsDir() {
|
||||
return nil, nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
if g, ok := storage.(driver.ArchiveGetter); ok {
|
||||
obj, err := g.ArchiveGet(ctx, af, args.ArchiveInnerArgs)
|
||||
if err == nil {
|
||||
return af, model.WrapObjName(obj), nil
|
||||
}
|
||||
}
|
||||
|
||||
if utils.PathEqual(args.InnerPath, "/") {
|
||||
return af, &model.ObjWrapName{
|
||||
Name: RootName,
|
||||
Obj: &model.Object{
|
||||
Name: af.GetName(),
|
||||
Path: af.GetPath(),
|
||||
ID: af.GetID(),
|
||||
Size: af.GetSize(),
|
||||
Modified: af.ModTime(),
|
||||
IsFolder: true,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
innerDir, name := stdpath.Split(args.InnerPath)
|
||||
args.InnerPath = strings.TrimSuffix(innerDir, "/")
|
||||
files, err := ListArchive(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, nil, errors.WithMessage(err, "failed get parent list")
|
||||
}
|
||||
for _, f := range files {
|
||||
if f.GetName() == name {
|
||||
return af, f, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, errors.WithStack(errs.ObjectNotFound)
|
||||
}
|
||||
|
||||
type extractLink struct {
|
||||
Link *model.Link
|
||||
Obj model.Obj
|
||||
}
|
||||
|
||||
var extractCache = cache.NewMemCache(cache.WithShards[*extractLink](16))
|
||||
var extractG singleflight.Group[*extractLink]
|
||||
|
||||
func DriverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*model.Link, model.Obj, error) {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
key := stdpath.Join(Key(storage, path), args.InnerPath)
|
||||
if link, ok := extractCache.Get(key); ok {
|
||||
return link.Link, link.Obj, nil
|
||||
} else if link, ok := extractCache.Get(key + ":" + args.IP); ok {
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
fn := func() (*extractLink, error) {
|
||||
link, err := driverExtract(ctx, storage, path, args)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed extract archive")
|
||||
}
|
||||
if link.Link.Expiration != nil {
|
||||
if link.Link.IPCacheKey {
|
||||
key = key + ":" + args.IP
|
||||
}
|
||||
extractCache.Set(key, link, cache.WithEx[*extractLink](*link.Link.Expiration))
|
||||
}
|
||||
return link, nil
|
||||
}
|
||||
if storage.Config().OnlyLocal {
|
||||
link, err := fn()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link.Link, link.Obj, nil
|
||||
}
|
||||
link, err, _ := extractG.Do(key, fn)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return link.Link, link.Obj, err
|
||||
}
|
||||
|
||||
func driverExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (*extractLink, error) {
|
||||
storageAr, ok := storage.(driver.ArchiveReader)
|
||||
if !ok {
|
||||
return nil, errs.DriverExtractNotSupported
|
||||
}
|
||||
archiveFile, extracted, err := ArchiveGet(ctx, storage, path, model.ArchiveListArgs{
|
||||
ArchiveInnerArgs: args,
|
||||
Refresh: false,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, "failed to get file")
|
||||
}
|
||||
if extracted.IsDir() {
|
||||
return nil, errors.WithStack(errs.NotFile)
|
||||
}
|
||||
link, err := storageAr.Extract(ctx, archiveFile, args)
|
||||
return &extractLink{Link: link, Obj: extracted}, err
|
||||
}
|
||||
|
||||
type streamWithParent struct {
|
||||
rc io.ReadCloser
|
||||
parent *stream.SeekableStream
|
||||
}
|
||||
|
||||
func (s *streamWithParent) Read(p []byte) (int, error) {
|
||||
return s.rc.Read(p)
|
||||
}
|
||||
|
||||
func (s *streamWithParent) Close() error {
|
||||
err1 := s.rc.Close()
|
||||
err2 := s.parent.Close()
|
||||
return stderrors.Join(err1, err2)
|
||||
}
|
||||
|
||||
func InternalExtract(ctx context.Context, storage driver.Driver, path string, args model.ArchiveInnerArgs) (io.ReadCloser, int64, error) {
|
||||
_, t, ss, err := getArchiveToolAndStream(ctx, storage, path, args.LinkArgs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
rc, size, err := t.Extract(ss, args)
|
||||
if err != nil {
|
||||
if e := ss.Close(); e != nil {
|
||||
log.Errorf("failed to close file streamer, %v", e)
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return &streamWithParent{rc: rc, parent: ss}, size, nil
|
||||
}
|
||||
|
||||
func ArchiveDecompress(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, args model.ArchiveDecompressArgs, lazyCache ...bool) error {
|
||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
||||
}
|
||||
srcPath = utils.FixAndCleanPath(srcPath)
|
||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
||||
srcObj, err := GetUnwrap(ctx, storage, srcPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get src object")
|
||||
}
|
||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
||||
if err != nil {
|
||||
return errors.WithMessage(err, "failed to get dst dir")
|
||||
}
|
||||
|
||||
switch s := storage.(type) {
|
||||
case driver.ArchiveDecompressResult:
|
||||
var newObjs []model.Obj
|
||||
newObjs, err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
|
||||
if err == nil {
|
||||
if newObjs != nil && len(newObjs) > 0 {
|
||||
for _, newObj := range newObjs {
|
||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
||||
}
|
||||
} else if !utils.IsBool(lazyCache...) {
|
||||
ClearCache(storage, dstDirPath)
|
||||
}
|
||||
}
|
||||
case driver.ArchiveDecompress:
|
||||
err = s.ArchiveDecompress(ctx, srcObj, dstDir, args)
|
||||
if err == nil && !utils.IsBool(lazyCache...) {
|
||||
ClearCache(storage, dstDirPath)
|
||||
}
|
||||
default:
|
||||
return errs.NotImplement
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
}
|
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
@ -60,6 +61,8 @@ func (f *FileStream) Close() error {
|
||||
err2 = os.RemoveAll(f.tmpFile.Name())
|
||||
if err2 != nil {
|
||||
err2 = errs.NewErr(err2, "failed to remove tmpFile [%s]", f.tmpFile.Name())
|
||||
} else {
|
||||
f.tmpFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,6 +95,26 @@ func (f *FileStream) CacheFullInTempFile() (model.File, error) {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) {
|
||||
if f.tmpFile != nil {
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
if file, ok := f.Reader.(model.File); ok {
|
||||
return file, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
||||
Reader: f,
|
||||
UpdateProgress: up,
|
||||
}, f.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Add(tmpF)
|
||||
f.tmpFile = tmpF
|
||||
f.Reader = tmpF
|
||||
return f.tmpFile, nil
|
||||
}
|
||||
|
||||
const InMemoryBufMaxSize = 10 // Megabytes
|
||||
const InMemoryBufMaxSizeBytes = InMemoryBufMaxSize * 1024 * 1024
|
||||
|
||||
@ -247,7 +270,202 @@ func (ss *SeekableStream) CacheFullInTempFile() (model.File, error) {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
func (ss *SeekableStream) CacheFullInTempFileAndUpdateProgress(up model.UpdateProgress) (model.File, error) {
|
||||
if ss.tmpFile != nil {
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
if ss.mFile != nil {
|
||||
return ss.mFile, nil
|
||||
}
|
||||
tmpF, err := utils.CreateTempFile(&ReaderUpdatingProgress{
|
||||
Reader: ss,
|
||||
UpdateProgress: up,
|
||||
}, ss.GetSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ss.Add(tmpF)
|
||||
ss.tmpFile = tmpF
|
||||
ss.Reader = tmpF
|
||||
return ss.tmpFile, nil
|
||||
}
|
||||
|
||||
func (f *FileStream) SetTmpFile(r *os.File) {
|
||||
f.Reader = r
|
||||
f.tmpFile = r
|
||||
}
|
||||
|
||||
type ReaderWithSize interface {
|
||||
io.Reader
|
||||
GetSize() int64
|
||||
}
|
||||
|
||||
type SimpleReaderWithSize struct {
|
||||
io.Reader
|
||||
Size int64
|
||||
}
|
||||
|
||||
func (r *SimpleReaderWithSize) GetSize() int64 {
|
||||
return r.Size
|
||||
}
|
||||
|
||||
type ReaderUpdatingProgress struct {
|
||||
Reader ReaderWithSize
|
||||
model.UpdateProgress
|
||||
offset int
|
||||
}
|
||||
|
||||
func (r *ReaderUpdatingProgress) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
r.offset += n
|
||||
r.UpdateProgress(math.Min(100.0, float64(r.offset)/float64(r.Reader.GetSize())*100.0))
|
||||
return n, err
|
||||
}
|
||||
|
||||
type SStreamReadAtSeeker interface {
|
||||
model.File
|
||||
GetRawStream() *SeekableStream
|
||||
}
|
||||
|
||||
type readerCur struct {
|
||||
reader io.Reader
|
||||
cur int64
|
||||
}
|
||||
|
||||
type RangeReadReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
masterOff int64
|
||||
readers []*readerCur
|
||||
}
|
||||
|
||||
type FileReadAtSeeker struct {
|
||||
ss *SeekableStream
|
||||
}
|
||||
|
||||
func NewReadAtSeeker(ss *SeekableStream, offset int64, forceRange ...bool) (SStreamReadAtSeeker, error) {
|
||||
if ss.mFile != nil {
|
||||
_, err := ss.mFile.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileReadAtSeeker{ss: ss}, nil
|
||||
}
|
||||
var r io.Reader
|
||||
var err error
|
||||
if offset != 0 || utils.IsBool(forceRange...) {
|
||||
if offset < 0 || offset > ss.GetSize() {
|
||||
return nil, errors.New("offset out of range")
|
||||
}
|
||||
r, err = ss.RangeRead(http_range.Range{Start: offset, Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rc, ok := r.(io.Closer); ok {
|
||||
ss.Closers.Add(rc)
|
||||
}
|
||||
} else {
|
||||
r = ss
|
||||
}
|
||||
return &RangeReadReadAtSeeker{
|
||||
ss: ss,
|
||||
masterOff: offset,
|
||||
readers: []*readerCur{{reader: r, cur: offset}},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return r.ss
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) getReaderAtOffset(off int64) (*readerCur, error) {
|
||||
for _, reader := range r.readers {
|
||||
if reader.cur == off {
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
reader, err := r.ss.RangeRead(http_range.Range{Start: off, Length: -1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c, ok := reader.(io.Closer); ok {
|
||||
r.ss.Closers.Add(c)
|
||||
}
|
||||
rc := &readerCur{reader: reader, cur: off}
|
||||
r.readers = append(r.readers, rc)
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) ReadAt(p []byte, off int64) (int, error) {
|
||||
rc, err := r.getReaderAtOffset(off)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
num := 0
|
||||
for num < len(p) {
|
||||
n, err := rc.reader.Read(p[num:])
|
||||
rc.cur += int64(n)
|
||||
num += n
|
||||
if err != nil {
|
||||
return num, err
|
||||
}
|
||||
}
|
||||
return num, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
if offset == 0 {
|
||||
return r.masterOff, nil
|
||||
}
|
||||
offset += r.masterOff
|
||||
case io.SeekEnd:
|
||||
offset += r.ss.GetSize()
|
||||
default:
|
||||
return 0, errs.NotSupport
|
||||
}
|
||||
if offset < 0 {
|
||||
return r.masterOff, errors.New("invalid seek: negative position")
|
||||
}
|
||||
if offset > r.ss.GetSize() {
|
||||
return r.masterOff, io.EOF
|
||||
}
|
||||
r.masterOff = offset
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
rc, err := r.getReaderAtOffset(r.masterOff)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err = rc.reader.Read(p)
|
||||
rc.cur += int64(n)
|
||||
r.masterOff += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *RangeReadReadAtSeeker) Close() error {
|
||||
return r.ss.Close()
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) GetRawStream() *SeekableStream {
|
||||
return f.ss
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Read(p []byte) (n int, err error) {
|
||||
return f.ss.mFile.Read(p)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return f.ss.mFile.ReadAt(p, off)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.ss.mFile.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *FileReadAtSeeker) Close() error {
|
||||
return f.ss.Close()
|
||||
}
|
||||
|
20
internal/task/manager.go
Normal file
20
internal/task/manager.go
Normal file
@ -0,0 +1,20 @@
|
||||
package task
|
||||
|
||||
import "github.com/xhofe/tache"
|
||||
|
||||
type Manager[T tache.Task] interface {
|
||||
Add(task T)
|
||||
Cancel(id string)
|
||||
CancelAll()
|
||||
CancelByCondition(condition func(task T) bool)
|
||||
GetAll() []T
|
||||
GetByID(id string) (T, bool)
|
||||
GetByState(state ...tache.State) []T
|
||||
GetByCondition(condition func(task T) bool) []T
|
||||
Remove(id string)
|
||||
RemoveAll()
|
||||
RemoveByState(state ...tache.State)
|
||||
RemoveByCondition(condition func(task T) bool)
|
||||
Retry(id string)
|
||||
RetryAllFailed()
|
||||
}
|
Reference in New Issue
Block a user