feat: misc improvements about upload/copy/hash (#5045)

general: add createTime/updateTime support in webdav and some drivers
general: add hash support in some drivers
general: cross-storage rapid-upload support
general: enhance upload to avoid local temp file if possible
general: replace readseekcloser with File interface to speed upstream operations
feat(aliyun_open): same as above
feat(crypt): add hack for 139cloud

Close #4934 
Close #4819 

baidu_netdisk needs to improve the upload code to support rapid-upload
This commit is contained in:
Sean
2023-08-27 21:14:23 +08:00
committed by GitHub
parent 9b765ef696
commit a3748af772
77 changed files with 1731 additions and 615 deletions

View File

@ -2,13 +2,13 @@ package _115
import (
"context"
"os"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
"strings"
)
type Pan115 struct {
@ -38,15 +38,15 @@ func (d *Pan115) List(ctx context.Context, dir model.Obj, args model.ListArgs) (
if err != nil && !errors.Is(err, driver115.ErrNotExist) {
return nil, err
}
return utils.SliceConvert(files, func(src driver115.File) (model.Obj, error) {
return src, nil
return utils.SliceConvert(files, func(src FileObj) (model.Obj, error) {
return &src, nil
})
}
func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
downloadInfo, err := d.client.
SetUserAgent(driver115.UA115Browser).
Download(file.(driver115.File).PickCode)
Download(file.(*FileObj).PickCode)
// recover for upload
d.client.SetUserAgent(driver115.UA115Desktop)
if err != nil {
@ -83,15 +83,67 @@ func (d *Pan115) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
var (
fastInfo *driver115.UploadInitResp
dirID = dstDir.GetID()
)
if ok, err := d.client.UploadAvailable(); err != nil || !ok {
return err
}
if stream.GetSize() > d.client.UploadMetaInfo.SizeLimit {
return driver115.ErrUploadTooLarge
}
//if digest, err = d.client.GetDigestResult(stream); err != nil {
// return err
//}
const PreHashSize int64 = 128 * utils.KB
hashSize := PreHashSize
if stream.GetSize() < PreHashSize {
hashSize = stream.GetSize()
}
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: hashSize})
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
return d.client.UploadFastOrByMultipart(dstDir.GetID(), stream.GetName(), stream.GetSize(), tempFile)
preHash, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return err
}
preHash = strings.ToUpper(preHash)
fullHash := stream.GetHash().GetHash(utils.SHA1)
if len(fullHash) <= 0 {
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
fullHash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return err
}
}
fullHash = strings.ToUpper(fullHash)
// rapid-upload
// note that 115 add timeout for rapid-upload,
// and "sig invalid" err is thrown even when the hash is correct after timeout.
if fastInfo, err = d.rapidUpload(stream.GetSize(), stream.GetName(), dirID, preHash, fullHash, stream); err != nil {
return err
}
if matched, err := fastInfo.Ok(); err != nil {
return err
} else if matched {
return nil
}
// 闪传失败,上传
if stream.GetSize() <= utils.KB { // 文件大小小于1KB改用普通模式上传
return d.client.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID)
}
// 分片上传
return d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID)
}
var _ driver.Driver = (*Pan115)(nil)

View File

@ -3,6 +3,20 @@ package _115
import (
"github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
var _ model.Obj = (*driver.File)(nil)
var _ model.Obj = (*FileObj)(nil)
type FileObj struct {
driver.File
}
func (f *FileObj) CreateTime() time.Time {
return f.File.CreateTime
}
func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}

View File

@ -1,10 +1,25 @@
package _115
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/orzogc/fake115uploader/cipher"
"io"
"net/url"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/SheltonZhu/115driver/pkg/driver"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/pkg/errors"
)
@ -41,8 +56,8 @@ func (d *Pan115) login() error {
return d.client.LoginCheck()
}
func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
res := make([]driver.File, 0)
func (d *Pan115) getFiles(fileId string) ([]FileObj, error) {
res := make([]FileObj, 0)
if d.PageSize <= 0 {
d.PageSize = driver.FileListLimit
}
@ -51,7 +66,357 @@ func (d *Pan115) getFiles(fileId string) ([]driver.File, error) {
return nil, err
}
for _, file := range *files {
res = append(res, file)
res = append(res, FileObj{file})
}
return res, nil
}
const (
appVer = "2.0.3.6"
)
func (d *Pan115) rapidUpload(fileSize int64, fileName, dirID, preID, fileID string, stream model.FileStreamer) (*driver115.UploadInitResp, error) {
var (
ecdhCipher *cipher.EcdhCipher
encrypted []byte
decrypted []byte
encodedToken string
err error
target = "U_1_" + dirID
bodyBytes []byte
result = driver115.UploadInitResp{}
fileSizeStr = strconv.FormatInt(fileSize, 10)
)
if ecdhCipher, err = cipher.NewEcdhCipher(); err != nil {
return nil, err
}
userID := strconv.FormatInt(d.client.UserID, 10)
form := url.Values{}
form.Set("appid", "0")
form.Set("appversion", appVer)
form.Set("userid", userID)
form.Set("filename", fileName)
form.Set("filesize", fileSizeStr)
form.Set("fileid", fileID)
form.Set("target", target)
form.Set("sig", d.client.GenerateSignature(fileID, target))
signKey, signVal := "", ""
for retry := true; retry; {
t := driver115.Now()
if encodedToken, err = ecdhCipher.EncodeToken(t.ToInt64()); err != nil {
return nil, err
}
params := map[string]string{
"k_ec": encodedToken,
}
form.Set("t", t.String())
form.Set("token", d.client.GenerateToken(fileID, preID, t.String(), fileSizeStr, signKey, signVal))
if signKey != "" && signVal != "" {
form.Set("sign_key", signKey)
form.Set("sign_val", signVal)
}
if encrypted, err = ecdhCipher.Encrypt([]byte(form.Encode())); err != nil {
return nil, err
}
req := d.client.NewRequest().
SetQueryParams(params).
SetBody(encrypted).
SetHeaderVerbatim("Content-Type", "application/x-www-form-urlencoded").
SetDoNotParseResponse(true)
resp, err := req.Post(driver115.ApiUploadInit)
if err != nil {
return nil, err
}
data := resp.RawBody()
defer data.Close()
if bodyBytes, err = io.ReadAll(data); err != nil {
return nil, err
}
if decrypted, err = ecdhCipher.Decrypt(bodyBytes); err != nil {
return nil, err
}
if err = driver115.CheckErr(json.Unmarshal(decrypted, &result), &result, resp); err != nil {
return nil, err
}
if result.Status == 7 {
// Update signKey & signVal
signKey = result.SignKey
signVal, err = UploadDigestRange(stream, result.SignCheck)
if err != nil {
return nil, err
}
} else {
retry = false
}
result.SHA1 = fileID
}
return &result, nil
}
func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result string, err error) {
var start, end int64
if _, err = fmt.Sscanf(rangeSpec, "%d-%d", &start, &end); err != nil {
return
}
length := end - start + 1
reader, err := stream.RangeRead(http_range.Range{Start: start, Length: length})
hashStr, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return "", err
}
result = strings.ToUpper(hashStr)
return
}
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) error {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
imur oss.InitiateMultipartUploadResult
ossClient *oss.Client
bucket *oss.Bucket
ossToken *driver115.UploadOSSTokenResp
err error
)
tmpF, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
options := driver115.DefalutUploadMultipartOptions()
if len(opts) > 0 {
for _, f := range opts {
f(options)
}
}
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
if ossClient, err = oss.New(driver115.OSSEndpoint, ossToken.AccessKeyID, ossToken.AccessKeySecret); err != nil {
return err
}
if bucket, err = ossClient.Bucket(params.Bucket); err != nil {
return err
}
// ossToken一小时后就会失效所以每50分钟重新获取一次
ticker := time.NewTicker(options.TokenRefreshTime)
defer ticker.Stop()
// 设置超时
timeout := time.NewTimer(options.Timeout)
if chunks, err = SplitFile(fileSize); err != nil {
return err
}
if imur, err = bucket.InitiateMultipartUpload(params.Object,
oss.SetHeader(driver115.OssSecurityTokenHeaderName, ossToken.SecurityToken),
oss.UserAgentHeader(driver115.OSSUserAgent),
); err != nil {
return err
}
wg := sync.WaitGroup{}
wg.Add(len(chunks))
chunksCh := make(chan oss.FileChunk)
errCh := make(chan error)
UploadedPartsCh := make(chan oss.UploadPart)
quit := make(chan struct{})
// producer
go chunksProducer(chunksCh, chunks)
go func() {
wg.Wait()
quit <- struct{}{}
}()
// consumers
for i := 0; i < options.ThreadsNum; i++ {
go func(threadId int) {
defer func() {
if r := recover(); r != nil {
errCh <- fmt.Errorf("Recovered in %v", r)
}
}()
for chunk := range chunksCh {
var part oss.UploadPart // 出现错误就继续尝试共尝试3次
for retry := 0; retry < 3; retry++ {
select {
case <-ticker.C:
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
errCh <- errors.Wrap(err, "刷新token时出现错误")
}
default:
}
buf := make([]byte, chunk.Size)
if _, err = tmpF.ReadAt(buf, chunk.Offset); err != nil && !errors.Is(err, io.EOF) {
continue
}
b := bytes.NewBuffer(buf)
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
break
}
}
if err != nil {
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误%v", stream.GetName(), chunk.Number, err))
}
UploadedPartsCh <- part
}
}(i)
}
go func() {
for part := range UploadedPartsCh {
parts = append(parts, part)
wg.Done()
}
}()
LOOP:
for {
select {
case <-ticker.C:
// 到时重新获取ossToken
if ossToken, err = d.client.GetOSSToken(); err != nil {
return err
}
case <-quit:
break LOOP
case <-errCh:
return err
case <-timeout.C:
return fmt.Errorf("time out")
}
}
// EOF错误是xml的Unmarshal导致的响应其实是json格式所以实际上上传是成功的
if _, err = bucket.CompleteMultipartUpload(imur, parts, driver115.OssOption(params, ossToken)...); err != nil && !errors.Is(err, io.EOF) {
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误实际上上传是成功的
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
return err
}
}
return d.checkUploadStatus(dirID, params.SHA1)
}
func chunksProducer(ch chan oss.FileChunk, chunks []oss.FileChunk) {
for _, chunk := range chunks {
ch <- chunk
}
}
func (d *Pan115) checkUploadStatus(dirID, sha1 string) error {
// 验证上传是否成功
req := d.client.NewRequest().ForceContentType("application/json;charset=UTF-8")
opts := []driver115.GetFileOptions{
driver115.WithOrder(driver115.FileOrderByTime),
driver115.WithShowDirEnable(false),
driver115.WithAsc(false),
driver115.WithLimit(500),
}
fResp, err := driver115.GetFiles(req, dirID, opts...)
if err != nil {
return err
}
for _, fileInfo := range fResp.Files {
if fileInfo.Sha1 == sha1 {
return nil
}
}
return driver115.ErrUploadFailed
}
func SplitFile(fileSize int64) (chunks []oss.FileChunk, err error) {
for i := int64(1); i < 10; i++ {
if fileSize < i*utils.GB { // 文件大小小于iGB时分为i*1000片
if chunks, err = SplitFileByPartNum(fileSize, int(i*1000)); err != nil {
return
}
break
}
}
if fileSize > 9*utils.GB { // 文件大小大于9GB时分为10000片
if chunks, err = SplitFileByPartNum(fileSize, 10000); err != nil {
return
}
}
// 单个分片大小不能小于100KB
if chunks[0].Size < 100*utils.KB {
if chunks, err = SplitFileByPartSize(fileSize, 100*utils.KB); err != nil {
return
}
}
return
}
// SplitFileByPartNum splits big file into parts by the num of parts.
// Split the file with specified parts count, returns the split result when error is nil.
func SplitFileByPartNum(fileSize int64, chunkNum int) ([]oss.FileChunk, error) {
if chunkNum <= 0 || chunkNum > 10000 {
return nil, errors.New("chunkNum invalid")
}
if int64(chunkNum) > fileSize {
return nil, errors.New("oss: chunkNum invalid")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
var chunkN = (int64)(chunkNum)
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * (fileSize / chunkN)
if i == chunkN-1 {
chunk.Size = fileSize/chunkN + fileSize%chunkN
} else {
chunk.Size = fileSize / chunkN
}
chunks = append(chunks, chunk)
}
return chunks, nil
}
// SplitFileByPartSize splits big file into parts by the size of parts.
// Splits the file by the part size. Returns the FileChunk when error is nil.
func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, error) {
if chunkSize <= 0 {
return nil, errors.New("chunkSize invalid")
}
var chunkN = fileSize / chunkSize
if chunkN >= 10000 {
return nil, errors.New("Too many parts, please increase part size")
}
var chunks []oss.FileChunk
var chunk = oss.FileChunk{}
for i := int64(0); i < chunkN; i++ {
chunk.Number = int(i + 1)
chunk.Offset = i * chunkSize
chunk.Size = chunkSize
chunks = append(chunks, chunk)
}
if fileSize%chunkSize > 0 {
chunk.Number = len(chunks) + 1
chunk.Offset = int64(len(chunks)) * chunkSize
chunk.Size = fileSize % chunkSize
chunks = append(chunks, chunk)
}
return chunks, nil
}

View File

@ -6,11 +6,6 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"net/url"
"os"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -22,6 +17,9 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"io"
"net/http"
"net/url"
)
type Pan123 struct {
@ -184,13 +182,12 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
// const DEFAULT int64 = 10485760
h := md5.New()
// need to calculate md5 of the full content
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
if _, err = io.Copy(h, tempFile); err != nil {
return err

View File

@ -1,6 +1,7 @@
package _123
import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url"
"path"
"strconv"
@ -21,6 +22,14 @@ type File struct {
DownloadUrl string `json:"DownloadUrl"`
}
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string {
return ""
}

View File

@ -1,6 +1,7 @@
package _123Share
import (
"github.com/alist-org/alist/v3/pkg/utils"
"net/url"
"path"
"strconv"
@ -21,6 +22,10 @@ type File struct {
DownloadUrl string `json:"DownloadUrl"`
}
func (f File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f File) GetPath() string {
return ""
}
@ -36,6 +41,9 @@ func (f File) GetName() string {
func (f File) ModTime() time.Time {
return f.UpdateAt
}
func (f File) CreateTime() time.Time {
return f.UpdateAt
}
func (f File) IsDir() bool {
return f.Type == 1

View File

@ -3,6 +3,7 @@ package _189pc
import (
"encoding/xml"
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"sort"
"strings"
"time"
@ -175,6 +176,14 @@ type Cloud189File struct {
// StarLabel int64 `json:"starLabel"`
}
func (c *Cloud189File) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
func (c *Cloud189File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.MD5, c.Md5)
}
func (c *Cloud189File) GetSize() int64 { return c.Size }
func (c *Cloud189File) GetName() string { return c.Name }
func (c *Cloud189File) ModTime() time.Time { return time.Time(c.LastOpTime) }
@ -199,6 +208,14 @@ type Cloud189Folder struct {
// StarLabel int64 `json:"starLabel"`
}
func (c *Cloud189Folder) CreateTime() time.Time {
return time.Time(c.CreateDate)
}
func (c *Cloud189Folder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (c *Cloud189Folder) GetSize() int64 { return 0 }
func (c *Cloud189Folder) GetName() string { return c.Name }
func (c *Cloud189Folder) ModTime() time.Time { return time.Time(c.LastOpTime) }

View File

@ -13,7 +13,6 @@ import (
"net/http"
"net/http/cookiejar"
"net/url"
"os"
"regexp"
"sort"
"strconv"
@ -550,13 +549,12 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
// 快传
func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var sliceSize = partSize(file.GetSize())
@ -742,13 +740,12 @@ func (y *Cloud189PC) GetMultiUploadUrls(ctx context.Context, uploadFileId string
// 旧版本上传,家庭云不支持覆盖
func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(file.GetReadCloser(), file.GetSize())
tempFile, err := file.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
// 计算md5

View File

@ -3,6 +3,7 @@ package alist_v3
import (
"context"
"fmt"
"io"
"net/http"
"path"
"strconv"
@ -176,7 +177,7 @@ func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
SetHeader("Password", d.MetaPassword).
SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
SetContentLength(true).
SetBody(stream.GetReadCloser())
SetBody(io.ReadCloser(stream))
})
return err
}

View File

@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
"math"
"math/big"
@ -67,7 +68,7 @@ func (d *AliDrive) Init(ctx context.Context) error {
return nil
}
// init deviceID
deviceID := utils.GetSHA256Encode([]byte(d.UserID))
deviceID := utils.HashData(utils.SHA256, []byte(d.UserID))
// init privateKey
privateKey, _ := NewPrivateKeyFromHex(deviceID)
state := State{
@ -163,14 +164,14 @@ func (d *AliDrive) Remove(ctx context.Context, obj model.Obj) error {
return err
}
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
file := model.FileStream{
Obj: stream,
ReadCloser: stream,
Mimetype: stream.GetMimetype(),
func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
file := stream.FileStream{
Obj: streamer,
Reader: streamer,
Mimetype: streamer.GetMimetype(),
}
const DEFAULT int64 = 10485760
var count = int(math.Ceil(float64(stream.GetSize()) / float64(DEFAULT)))
var count = int(math.Ceil(float64(streamer.GetSize()) / float64(DEFAULT)))
partInfoList := make([]base.Json, 0, count)
for i := 1; i <= count; i++ {
@ -187,25 +188,25 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
}
var localFile *os.File
if fileStream, ok := file.ReadCloser.(*model.FileStream); ok {
localFile, _ = fileStream.ReadCloser.(*os.File)
if fileStream, ok := file.Reader.(*stream.FileStream); ok {
localFile, _ = fileStream.Reader.(*os.File)
}
if d.RapidUpload {
buf := bytes.NewBuffer(make([]byte, 0, 1024))
io.CopyN(buf, file, 1024)
reqBody["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
reqBody["pre_hash"] = utils.HashData(utils.SHA1, buf.Bytes())
if localFile != nil {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return err
}
} else {
// 把头部拼接回去
file.ReadCloser = struct {
file.Reader = struct {
io.Reader
io.Closer
}{
Reader: io.MultiReader(buf, file),
Closer: file,
Closer: &file,
}
}
} else {
@ -281,7 +282,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
if _, err = localFile.Seek(0, io.SeekStart); err != nil {
return err
}
file.ReadCloser = localFile
file.Reader = localFile
}
for i, partInfo := range resp.PartInfoList {

View File

@ -1,6 +1,7 @@
package aliyundrive_open
import (
"github.com/alist-org/alist/v3/pkg/utils"
"time"
"github.com/alist-org/alist/v3/internal/model"
@ -46,6 +47,8 @@ func fileToObj(f File) *model.ObjThumb {
Size: f.Size,
Modified: f.UpdatedAt,
IsFolder: f.Type == "folder",
Ctime: f.CreatedAt,
HashInfo: utils.NewHashInfo(utils.SHA1, f.ContentHash),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbnail},
}

View File

@ -3,14 +3,12 @@ package aliyundrive_open
import (
"bytes"
"context"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"github.com/alist-org/alist/v3/pkg/http_range"
"io"
"math"
"net/http"
"os"
"strconv"
"strings"
"time"
@ -33,19 +31,19 @@ func makePartInfos(size int) []base.Json {
}
func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * 1024 * 1024
var partSize int64 = 20 * utils.MB
if fileSize > partSize {
if fileSize > 1*1024*1024*1024*1024 { // file Size over 1TB
partSize = 5 * 1024 * 1024 * 1024 // file part size 5GB
} else if fileSize > 768*1024*1024*1024 { // over 768GB
if fileSize > 1*utils.TB { // file Size over 1TB
partSize = 5 * utils.GB // file part size 5GB
} else if fileSize > 768*utils.GB { // over 768GB
partSize = 109951163 // ≈ 104.8576MB, split 1TB into 10,000 part
} else if fileSize > 512*1024*1024*1024 { // over 512GB
} else if fileSize > 512*utils.GB { // over 512GB
partSize = 82463373 // ≈ 78.6432MB
} else if fileSize > 384*1024*1024*1024 { // over 384GB
} else if fileSize > 384*utils.GB { // over 384GB
partSize = 54975582 // ≈ 52.4288MB
} else if fileSize > 256*1024*1024*1024 { // over 256GB
} else if fileSize > 256*utils.GB { // over 256GB
partSize = 41231687 // ≈ 39.3216MB
} else if fileSize > 128*1024*1024*1024 { // over 128GB
} else if fileSize > 128*utils.GB { // over 128GB
partSize = 27487791 // ≈ 26.2144MB
}
}
@ -127,17 +125,22 @@ func getProofRange(input string, size int64) (*ProofRange, error) {
return pr, nil
}
func (d *AliyundriveOpen) calProofCode(file *os.File, fileSize int64) (string, error) {
proofRange, err := getProofRange(d.AccessToken, fileSize)
func (d *AliyundriveOpen) calProofCode(stream model.FileStreamer) (string, error) {
proofRange, err := getProofRange(d.AccessToken, stream.GetSize())
if err != nil {
return "", err
}
buf := make([]byte, proofRange.End-proofRange.Start)
_, err = file.ReadAt(buf, proofRange.Start)
length := proofRange.End - proofRange.Start
buf := bytes.NewBuffer(make([]byte, 0, length))
reader, err := stream.RangeRead(http_range.Range{Start: proofRange.Start, Length: length})
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf), nil
_, err = io.CopyN(buf, reader, length)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
}
func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
@ -145,70 +148,68 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
// Part Size Unit: Bytes, Default: 20MB,
// Maximum number of slices 10,000, ≈195.3125GB
var partSize = calPartSize(stream.GetSize())
const dateFormat = "2006-01-02T15:04:05.88Z"
mtime := stream.ModTime()
mtimeStr := mtime.UTC().Format(dateFormat)
ctimeStr := stream.CreateTime().UTC().Format(dateFormat)
createData := base.Json{
"drive_id": d.DriveId,
"parent_file_id": dstDir.GetID(),
"name": stream.GetName(),
"type": "file",
"check_name_mode": "ignore",
"drive_id": d.DriveId,
"parent_file_id": dstDir.GetID(),
"name": stream.GetName(),
"type": "file",
"check_name_mode": "ignore",
"local_modified_at": mtimeStr,
"local_created_at": ctimeStr,
}
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
createData["part_info_list"] = makePartInfos(count)
// rapid upload
rapidUpload := stream.GetSize() > 100*1024 && d.RapidUpload
rapidUpload := stream.GetSize() > 100*utils.KB && d.RapidUpload
if rapidUpload {
log.Debugf("[aliyundrive_open] start cal pre_hash")
// read 1024 bytes to calculate pre hash
buf := bytes.NewBuffer(make([]byte, 0, 1024))
_, err := io.CopyN(buf, stream, 1024)
reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
if err != nil {
return nil, err
}
hash, err := utils.HashReader(utils.SHA1, reader)
if err != nil {
return nil, err
}
createData["size"] = stream.GetSize()
createData["pre_hash"] = utils.GetSHA1Encode(buf.Bytes())
// if support seek, seek to start
if localFile, ok := stream.(io.Seeker); ok {
if _, err := localFile.Seek(0, io.SeekStart); err != nil {
return nil, err
}
} else {
// Put spliced head back to stream
stream.SetReadCloser(struct {
io.Reader
io.Closer
}{
Reader: io.MultiReader(buf, stream.GetReadCloser()),
Closer: stream.GetReadCloser(),
})
}
createData["pre_hash"] = hash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp("/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
req.SetBody(createData).SetResult(&createResp)
})
var tmpF model.File
if err != nil {
if e.Code != "PreHashMatched" || !rapidUpload {
return nil, err
}
log.Debugf("[aliyundrive_open] pre_hash matched, start rapid upload")
// convert to local file
file, err := utils.CreateTempFile(stream, stream.GetSize())
if err != nil {
return nil, err
}
_ = stream.GetReadCloser().Close()
stream.SetReadCloser(file)
// calculate full hash
h := sha1.New()
_, err = io.Copy(h, file)
if err != nil {
return nil, err
hi := stream.GetHash()
hash := hi.GetHash(utils.SHA1)
if len(hash) <= 0 {
tmpF, err = stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
hash, err = utils.HashFile(utils.SHA1, tmpF)
if err != nil {
return nil, err
}
}
delete(createData, "pre_hash")
createData["proof_version"] = "v1"
createData["content_hash_name"] = "sha1"
createData["content_hash"] = hex.EncodeToString(h.Sum(nil))
createData["proof_code"], err = d.calProofCode(file, stream.GetSize())
createData["content_hash"] = hash
createData["proof_code"], err = d.calProofCode(stream)
if err != nil {
return nil, fmt.Errorf("cal proof code error: %s", err.Error())
}
@ -218,17 +219,15 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, err
}
// seek to start
if _, err = file.Seek(0, io.SeekStart); err != nil {
return nil, err
}
}
if !createResp.RapidUpload {
// 2. upload
// 2. normal upload
log.Debugf("[aliyundive_open] normal upload")
preTime := time.Now()
var offset, length int64 = 0, partSize
//var length
for i := 0; i < len(createResp.PartInfoList); i++ {
if utils.IsCanceled(ctx) {
return nil, ctx.Err()
@ -241,9 +240,16 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
}
preTime = time.Now()
}
rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
if remain := stream.GetSize() - offset; length > remain {
length = remain
}
//rd := utils.NewMultiReadable(io.LimitReader(stream, partSize))
rd, err := stream.RangeRead(http_range.Range{Start: offset, Length: length})
if err != nil {
return nil, err
}
err = retry.Do(func() error {
rd.Reset()
//rd.Reset()
return d.uploadPart(ctx, rd, createResp.PartInfoList[i])
},
retry.Attempts(3),
@ -252,6 +258,7 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
if err != nil {
return nil, err
}
offset += partSize
}
} else {
log.Debugf("[aliyundrive_open] rapid upload success, file id: %s", createResp.FileId)

View File

@ -9,7 +9,6 @@ import (
"io"
"math"
"net/url"
"os"
stdpath "path"
"strconv"
"time"
@ -31,7 +30,7 @@ type BaiduNetdisk struct {
uploadThread int
}
const DefaultSliceSize int64 = 4 * 1024 * 1024
const DefaultSliceSize int64 = 4 * utils.MB
func (d *BaiduNetdisk) Config() driver.Config {
return config
@ -81,7 +80,7 @@ func (d *BaiduNetdisk) Link(ctx context.Context, file model.Obj, args model.Link
func (d *BaiduNetdisk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error) {
var newDir File
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir)
_, err := d.create(stdpath.Join(parentDir.GetPath(), dirName), 0, 1, "", "", &newDir, 0, 0)
if err != nil {
return nil, err
}
@ -148,14 +147,10 @@ func (d *BaiduNetdisk) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
streamSize := stream.GetSize()
count := int(math.Max(math.Ceil(float64(streamSize)/float64(DefaultSliceSize)), 1))
@ -194,15 +189,15 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
rawPath := stdpath.Join(dstDir.GetPath(), stream.GetName())
path := encodeURIComponent(rawPath)
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
// step.1 预上传
// 尝试获取之前的进度
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s",
path, streamSize,
blockListStr,
contentMd5, sliceMd5)
data := fmt.Sprintf("path=%s&size=%d&isdir=0&autoinit=1&rtype=3&block_list=%s&content-md5=%s&slice-md5=%s&local_mtime=%d&local_ctime=%d",
path, streamSize, blockListStr, contentMd5, sliceMd5, mtime, ctime)
params := map[string]string{
"method": "precreate",
}
@ -263,7 +258,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
// step.3 创建文件
var newFile File
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile)
_, err = d.create(rawPath, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,7 @@
package baidu_netdisk
import (
"github.com/alist-org/alist/v3/pkg/utils"
"path"
"strconv"
"time"
@ -40,11 +41,11 @@ type File struct {
Isdir int `json:"isdir"`
// list resp
//ServerCtime int64 `json:"server_ctime"`
ServerCtime int64 `json:"server_ctime"`
ServerMtime int64 `json:"server_mtime"`
//ServerAtime int64 `json:"server_atime"`
//LocalCtime int64 `json:"local_ctime"`
//LocalMtime int64 `json:"local_mtime"`
LocalMtime int64 `json:"local_mtime"`
LocalCtime int64 `json:"local_ctime"`
//ServerAtime int64 `json:"server_atime"` `
// only create and precreate resp
Ctime int64 `json:"ctime"`
@ -55,8 +56,11 @@ func fileToObj(f File) *model.ObjThumb {
if f.ServerFilename == "" {
f.ServerFilename = path.Base(f.Path)
}
if f.ServerMtime == 0 {
f.ServerMtime = int64(f.Mtime)
if f.LocalCtime == 0 {
f.LocalCtime = f.Ctime
}
if f.LocalMtime == 0 {
f.LocalMtime = f.Mtime
}
return &model.ObjThumb{
Object: model.Object{
@ -64,8 +68,10 @@ func fileToObj(f File) *model.ObjThumb {
Path: f.Path,
Name: f.ServerFilename,
Size: f.Size,
Modified: time.Unix(f.ServerMtime, 0),
Modified: time.Unix(f.LocalMtime, 0),
Ctime: time.Unix(f.LocalCtime, 0),
IsFolder: f.Isdir == 1,
HashInfo: utils.NewHashInfo(utils.MD5, f.Md5),
},
Thumbnail: model.Thumbnail{Thumbnail: f.Thumbs.Url3},
}

View File

@ -198,11 +198,17 @@ func (d *BaiduNetdisk) manage(opera string, filelist any) ([]byte, error) {
return d.post("/xpan/file", params, data, nil)
}
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any) ([]byte, error) {
func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, block_list string, resp any, mtime, ctime int64) ([]byte, error) {
params := map[string]string{
"method": "create",
}
data := fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
data := ""
if mtime == 0 || ctime == 0 {
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3", encodeURIComponent(path), size, isdir)
} else {
data = fmt.Sprintf("path=%s&size=%d&isdir=%d&rtype=3&local_mtime=%d&local_ctime=%d", encodeURIComponent(path), size, isdir, mtime, ctime)
}
if uploadid != "" {
data += fmt.Sprintf("&uploadid=%s&block_list=%s", uploadid, block_list)
}

View File

@ -8,7 +8,6 @@ import (
"fmt"
"io"
"math"
"os"
"regexp"
"strconv"
"strings"
@ -229,13 +228,12 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
}
// 需要获取完整文件md5,必须支持 io.Seek
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
const DEFAULT int64 = 1 << 22

View File

@ -61,12 +61,12 @@ func moveFileToAlbumFile(file *File, album *Album, uk int64) *AlbumFile {
func renameAlbum(album *Album, newName string) *Album {
return &Album{
AlbumID: album.AlbumID,
Tid: album.Tid,
JoinTime: album.JoinTime,
CreateTime: album.CreateTime,
Title: newName,
Mtime: time.Now().Unix(),
AlbumID: album.AlbumID,
Tid: album.Tid,
JoinTime: album.JoinTime,
CreationTime: album.CreationTime,
Title: newName,
Mtime: time.Now().Unix(),
}
}

View File

@ -2,6 +2,7 @@ package baiduphoto
import (
"fmt"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
"github.com/alist-org/alist/v3/internal/model"
@ -73,6 +74,13 @@ func (c *File) Thumb() string {
}
return ""
}
func (c *File) CreateTime() time.Time {
return time.Unix(c.Ctime, 0)
}
func (c *File) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
/*相册部分*/
type (
@ -84,12 +92,12 @@ type (
}
Album struct {
AlbumID string `json:"album_id"`
Tid int64 `json:"tid"`
Title string `json:"title"`
JoinTime int64 `json:"join_time"`
CreateTime int64 `json:"create_time"`
Mtime int64 `json:"mtime"`
AlbumID string `json:"album_id"`
Tid int64 `json:"tid"`
Title string `json:"title"`
JoinTime int64 `json:"join_time"`
CreationTime int64 `json:"create_time"`
Mtime int64 `json:"mtime"`
parseTime *time.Time
}
@ -109,6 +117,14 @@ type (
}
)
func (a *Album) CreateTime() time.Time {
return time.Unix(a.CreationTime, 0)
}
func (a *Album) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (a *Album) GetSize() int64 { return 0 }
func (a *Album) GetName() string { return a.Title }
func (a *Album) ModTime() time.Time {

View File

@ -115,7 +115,7 @@ func (d *Cloudreve) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Cloudreve) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if stream.GetReadCloser() == http.NoBody {
if io.ReadCloser(stream) == http.NoBody {
return d.create(ctx, dstDir, stream)
}
var r DirectoryResp

View File

@ -3,8 +3,8 @@ package crypt
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
"net/http"
stdpath "path"
"regexp"
"strings"
@ -13,7 +13,6 @@ import (
"github.com/alist-org/alist/v3/internal/errs"
"github.com/alist-org/alist/v3/internal/fs"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
@ -82,7 +81,6 @@ func (d *Crypt) Init(ctx context.Context) error {
}
d.cipher = c
//c, err := rcCrypt.newCipher(rcCrypt.NameEncryptionStandard, "", "", true, nil)
return nil
}
@ -128,6 +126,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
Size: 0,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
}
result = append(result, &objRes)
} else {
@ -147,6 +147,8 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
Size: size,
Modified: obj.ModTime(),
IsFolder: obj.IsDir(),
Ctime: obj.CreateTime(),
// discarding hash as it's encrypted
}
if !ok {
result = append(result, &objRes)
@ -232,70 +234,53 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
return nil, err
}
if remoteLink.RangeReadCloser.RangeReader == nil && remoteLink.ReadSeekCloser == nil && len(remoteLink.URL) == 0 {
if remoteLink.RangeReadCloser == nil && remoteLink.MFile == nil && len(remoteLink.URL) == 0 {
return nil, fmt.Errorf("the remote storage driver need to be enhanced to support encrytion")
}
remoteFileSize := remoteFile.GetSize()
remoteClosers := utils.NewClosers()
remoteClosers := utils.EmptyClosers()
rangeReaderFunc := func(ctx context.Context, underlyingOffset, underlyingLength int64) (io.ReadCloser, error) {
length := underlyingLength
if underlyingLength >= 0 && underlyingOffset+underlyingLength >= remoteFileSize {
length = -1
}
if remoteLink.RangeReadCloser.RangeReader != nil {
rrc := remoteLink.RangeReadCloser
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
var converted, err = stream.GetRangeReadCloserFromLink(remoteFileSize, rangedRemoteLink)
if err != nil {
return nil, err
}
rrc = converted
}
if rrc != nil {
//remoteRangeReader, err :=
remoteReader, err := remoteLink.RangeReadCloser.RangeReader(http_range.Range{Start: underlyingOffset, Length: length})
remoteClosers.Add(remoteLink.RangeReadCloser.Closers)
remoteReader, err := rrc.RangeRead(ctx, http_range.Range{Start: underlyingOffset, Length: length})
remoteClosers.AddClosers(rrc.GetClosers())
if err != nil {
return nil, err
}
return remoteReader, nil
}
if remoteLink.ReadSeekCloser != nil {
_, err := remoteLink.ReadSeekCloser.Seek(underlyingOffset, io.SeekStart)
if remoteLink.MFile != nil {
_, err := remoteLink.MFile.Seek(underlyingOffset, io.SeekStart)
if err != nil {
return nil, err
}
//remoteClosers.Add(remoteLink.ReadSeekCloser)
//keep reuse same ReadSeekCloser and close at last.
return io.NopCloser(remoteLink.ReadSeekCloser), nil
//remoteClosers.Add(remoteLink.MFile)
//keep reuse same MFile and close at last.
remoteClosers.Add(remoteLink.MFile)
return io.NopCloser(remoteLink.MFile), nil
}
if len(remoteLink.URL) > 0 {
rangedRemoteLink := &model.Link{
URL: remoteLink.URL,
Header: remoteLink.Header,
}
response, err := RequestRangedHttp(args.HttpReq, rangedRemoteLink, underlyingOffset, length)
//remoteClosers.Add(response.Body)
if err != nil {
return nil, fmt.Errorf("remote storage http request failure,status: %d err:%s", response.StatusCode, err)
}
if underlyingOffset == 0 && length == -1 || response.StatusCode == http.StatusPartialContent {
return response.Body, nil
} else if response.StatusCode == http.StatusOK {
log.Warnf("remote http server not supporting range request, expect low perfromace!")
readCloser, err := net.GetRangedHttpReader(response.Body, underlyingOffset, length)
if err != nil {
return nil, err
}
return readCloser, nil
}
return response.Body, nil
}
//if remoteLink.Data != nil {
// log.Warnf("remote storage not supporting range request, expect low perfromace!")
// readCloser, err := net.GetRangedHttpReader(remoteLink.Data, underlyingOffset, length)
// remoteCloser = remoteLink.Data
// if err != nil {
// return nil, err
// }
// return readCloser, nil
//}
return nil, errs.NotSupport
}
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
readSeeker, err := d.cipher.DecryptDataSeek(ctx, rangeReaderFunc, httpRange.Start, httpRange.Length)
if err != nil {
return nil, err
@ -306,7 +291,7 @@ func (d *Crypt) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: remoteClosers}
resultLink := &model.Link{
Header: remoteLink.Header,
RangeReadCloser: *resultRangeReadCloser,
RangeReadCloser: resultRangeReadCloser,
Expiration: remoteLink.Expiration,
}
@ -370,32 +355,32 @@ func (d *Crypt) Remove(ctx context.Context, obj model.Obj) error {
return op.Remove(ctx, d.remoteStorage, remoteActualPath)
}
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileStreamer, up driver.UpdateProgress) error {
dstDirActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true)
if err != nil {
return fmt.Errorf("failed to convert path to remote path: %w", err)
}
in := stream.GetReadCloser()
// Encrypt the data into wrappedIn
wrappedIn, err := d.cipher.EncryptData(in)
wrappedIn, err := d.cipher.EncryptData(streamer)
if err != nil {
return fmt.Errorf("failed to EncryptData: %w", err)
}
streamOut := &model.FileStream{
// doesn't support seekableStream, since rapid-upload is not working for encrypted data
streamOut := &stream.FileStream{
Obj: &model.Object{
ID: stream.GetID(),
Path: stream.GetPath(),
Name: d.cipher.EncryptFileName(stream.GetName()),
Size: d.cipher.EncryptedSize(stream.GetSize()),
Modified: stream.ModTime(),
IsFolder: stream.IsDir(),
ID: streamer.GetID(),
Path: streamer.GetPath(),
Name: d.cipher.EncryptFileName(streamer.GetName()),
Size: d.cipher.EncryptedSize(streamer.GetSize()),
Modified: streamer.ModTime(),
IsFolder: streamer.IsDir(),
},
ReadCloser: io.NopCloser(wrappedIn),
Reader: wrappedIn,
Mimetype: "application/octet-stream",
WebPutAsTask: stream.NeedStore(),
Old: stream.GetOld(),
WebPutAsTask: streamer.NeedStore(),
Exist: streamer.GetExist(),
}
err = op.Put(ctx, d.remoteStorage, dstDirActualPath, streamOut, up, false)
if err != nil {

View File

@ -1,24 +1,13 @@
package crypt
import (
"net/http"
stdpath "path"
"path/filepath"
"strings"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/internal/op"
"github.com/alist-org/alist/v3/pkg/http_range"
)
func RequestRangedHttp(r *http.Request, link *model.Link, offset, length int64) (*http.Response, error) {
header := net.ProcessHeader(http.Header{}, link.Header)
header = http_range.ApplyRangeToHttpHeader(http_range.Range{Start: offset, Length: length}, header)
return net.RequestHttp("GET", header, link.URL)
}
// will give the best guessing based on the path
func guessPath(path string) (isFolder, secondTry bool) {
if strings.HasSuffix(path, "/") {

View File

@ -66,7 +66,7 @@ func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
r := NewFTPFileReader(d.conn, file.GetPath())
link := &model.Link{
ReadSeekCloser: r,
MFile: r,
}
return link, nil
}

View File

@ -30,13 +30,14 @@ func (d *FTP) login() error {
return nil
}
// An FTP file reader that implements io.ReadSeekCloser for seeking.
// An FTP file reader that implements io.MFile for seeking.
type FTPFileReader struct {
conn *ftp.ServerConn
resp *ftp.Response
offset int64
mu sync.Mutex
path string
conn *ftp.ServerConn
resp *ftp.Response
offset int64
readAtOffset int64
mu sync.Mutex
path string
}
func NewFTPFileReader(conn *ftp.ServerConn, path string) *FTPFileReader {
@ -50,15 +51,33 @@ func (r *FTPFileReader) Read(buf []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
n, err = r.ReadAt(buf, r.offset)
r.offset += int64(n)
return
}
func (r *FTPFileReader) ReadAt(buf []byte, off int64) (n int, err error) {
if off < 0 {
return -1, os.ErrInvalid
}
r.mu.Lock()
defer r.mu.Unlock()
if off != r.readAtOffset {
//have to restart the connection, to correct offset
_ = r.resp.Close()
r.resp = nil
}
if r.resp == nil {
r.resp, err = r.conn.RetrFrom(r.path, uint64(r.offset))
r.resp, err = r.conn.RetrFrom(r.path, uint64(off))
r.readAtOffset = off
if err != nil {
return 0, err
}
}
n, err = r.resp.Read(buf)
r.offset += int64(n)
r.readAtOffset += int64(n)
return
}
@ -92,12 +111,6 @@ func (r *FTPFileReader) Seek(offset int64, whence int) (int64, error) {
return oldOffset, nil
}
r.offset = newOffset
if r.resp != nil {
// close the existing ftp data connection, otherwise the next read will be blocked
_ = r.resp.Close() // we do not care about whether it returns an error
r.resp = nil
}
return newOffset, nil
}

View File

@ -112,7 +112,7 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
obj := stream.GetOld()
obj := stream.GetExist()
var (
e Error
url string
@ -158,7 +158,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
putUrl := res.Header().Get("location")
if stream.GetSize() < d.ChunkSize*1024*1024 {
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream)
}, nil)
} else {
err = d.chunkUpload(ctx, stream, putUrl)

View File

@ -5,7 +5,7 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"io"
"github.com/alist-org/alist/v3/pkg/http_range"
"io/ioutil"
"net/http"
"os"
@ -216,25 +216,29 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
var defaultChunkSize = d.ChunkSize * 1024 * 1024
var finish int64 = 0
for finish < stream.GetSize() {
var offset int64 = 0
for offset < stream.GetSize() {
if utils.IsCanceled(ctx) {
return ctx.Err()
}
chunkSize := stream.GetSize() - finish
chunkSize := stream.GetSize() - offset
if chunkSize > defaultChunkSize {
chunkSize = defaultChunkSize
}
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
reader, err := stream.RangeRead(http_range.Range{Start: offset, Length: chunkSize})
if err != nil {
return err
}
_, err = d.request(url, http.MethodPut, func(req *resty.Request) {
req.SetHeaders(map[string]string{
"Content-Length": strconv.FormatInt(chunkSize, 10),
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, stream.GetSize()),
}).SetBody(reader).SetContext(ctx)
}, nil)
if err != nil {
return err
}
finish += chunkSize
offset += chunkSize
}
return nil
}

View File

@ -124,7 +124,7 @@ func (d *GooglePhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
}
resp, err := d.request(postUrl, http.MethodPost, func(req *resty.Request) {
req.SetBody(stream.GetReadCloser()).SetContext(ctx)
req.SetBody(stream).SetContext(ctx)
}, nil, postHeaders)
if err != nil {

View File

@ -3,6 +3,8 @@ package lanzou
import (
"errors"
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"time"
)
@ -18,6 +20,9 @@ type RespInfo[T any] struct {
Info T `json:"info"`
}
var _ model.Obj = (*FileOrFolder)(nil)
var _ model.Obj = (*FileOrFolderByShareUrl)(nil)
type FileOrFolder struct {
Name string `json:"name"`
//Onof string `json:"onof"` // 是否存在提取码
@ -49,6 +54,14 @@ type FileOrFolder struct {
shareInfo *FileShare `json:"-"`
}
func (f *FileOrFolder) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolder) GetID() string {
if f.IsDir() {
return f.FolID
@ -130,6 +143,14 @@ type FileOrFolderByShareUrl struct {
repairFlag bool `json:"-"`
}
func (f *FileOrFolderByShareUrl) CreateTime() time.Time {
return f.ModTime()
}
func (f *FileOrFolderByShareUrl) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *FileOrFolderByShareUrl) GetID() string { return f.ID }
func (f *FileOrFolderByShareUrl) GetName() string { return f.NameAll }
func (f *FileOrFolderByShareUrl) GetPath() string { return "" }

View File

@ -5,15 +5,6 @@ import (
"context"
"errors"
"fmt"
"io"
"io/fs"
"net/http"
"os"
stdpath "path"
"path/filepath"
"strconv"
"strings"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -21,7 +12,15 @@ import (
"github.com/alist-org/alist/v3/internal/sign"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/alist-org/alist/v3/server/common"
"github.com/djherbis/times"
_ "golang.org/x/image/webp"
"io/fs"
"net/http"
"os"
stdpath "path"
"path/filepath"
"strconv"
"strings"
)
type Local struct {
@ -102,6 +101,14 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
if !isFolder {
size = f.Size()
}
ctime := f.ModTime()
t, err := times.Stat(stdpath.Join(fullPath, f.Name()))
if err == nil {
if t.HasBirthTime() {
ctime = t.BirthTime()
}
}
file := model.ObjThumb{
Object: model.Object{
Path: filepath.Join(fullPath, f.Name()),
@ -109,6 +116,7 @@ func (d *Local) FileInfoToObj(f fs.FileInfo, reqPath string, fullPath string) mo
Modified: f.ModTime(),
Size: size,
IsFolder: isFolder,
Ctime: ctime,
},
Thumbnail: model.Thumbnail{
Thumbnail: thumb,
@ -171,9 +179,9 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
link.ReadSeekCloser = open
link.MFile = open
} else {
link.ReadSeekCloser = utils.ReadSeekerNopCloser(bytes.NewReader(buf.Bytes()))
link.MFile = model.NewNopMFile(bytes.NewReader(buf.Bytes()))
//link.Header.Set("Content-Length", strconv.Itoa(buf.Len()))
}
} else {
@ -181,15 +189,7 @@ func (d *Local) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
if err != nil {
return nil, err
}
link.ReadSeekCloser = struct {
io.Reader
io.Seeker
io.Closer
}{
Reader: open,
Seeker: open,
Closer: open,
}
link.MFile = open
}
return &link, nil
}

View File

@ -7,7 +7,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"strconv"
"time"
@ -181,13 +180,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
if err != nil {
return err
}
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
uploader := s3manager.NewUploader(s)
input := &s3manager.UploadInput{

View File

@ -42,7 +42,7 @@ func (d *Mega) Drop(ctx context.Context) error {
func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
if node, ok := dir.(*MegaNode); ok {
nodes, err := d.c.FS.GetChildren(node.Node)
nodes, err := d.c.FS.GetChildren(node.n)
if err != nil {
return nil, err
}
@ -56,7 +56,7 @@ func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]
return res, nil
}
log.Errorf("can't convert: %+v", dir)
return nil, fmt.Errorf("unable to convert dir to mega node")
return nil, fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
@ -68,21 +68,21 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if node, ok := file.(*MegaNode); ok {
//down, err := d.c.NewDownload(node.Node)
//down, err := d.c.NewDownload(n.Node)
//if err != nil {
// return nil, fmt.Errorf("open download file failed: %w", err)
//}
size := file.GetSize()
var finalClosers utils.Closers
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
length = -1
}
var down *mega.Download
err := utils.Retry(3, time.Second, func() (err error) {
down, err = d.c.NewDownload(node.Node)
down, err = d.c.NewDownload(node.n)
return err
})
if err != nil {
@ -97,37 +97,37 @@ func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
return readers.NewLimitedReadCloser(oo, length), nil
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: &finalClosers}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers}
resultLink := &model.Link{
RangeReadCloser: *resultRangeReadCloser,
RangeReadCloser: resultRangeReadCloser,
}
return resultLink, nil
}
return nil, fmt.Errorf("unable to convert dir to mega node")
return nil, fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
if parentNode, ok := parentDir.(*MegaNode); ok {
_, err := d.c.CreateDir(dirName, parentNode.Node)
_, err := d.c.CreateDir(dirName, parentNode.n)
return err
}
return fmt.Errorf("unable to convert dir to mega node")
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
if srcNode, ok := srcObj.(*MegaNode); ok {
if dstNode, ok := dstDir.(*MegaNode); ok {
return d.c.Move(srcNode.Node, dstNode.Node)
return d.c.Move(srcNode.n, dstNode.n)
}
}
return fmt.Errorf("unable to convert dir to mega node")
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
if srcNode, ok := srcObj.(*MegaNode); ok {
return d.c.Rename(srcNode.Node, newName)
return d.c.Rename(srcNode.n, newName)
}
return fmt.Errorf("unable to convert dir to mega node")
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
@ -136,14 +136,14 @@ func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
func (d *Mega) Remove(ctx context.Context, obj model.Obj) error {
if node, ok := obj.(*MegaNode); ok {
return d.c.Delete(node.Node, false)
return d.c.Delete(node.n, false)
}
return fmt.Errorf("unable to convert dir to mega node")
return fmt.Errorf("unable to convert dir to mega n")
}
func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
if dstNode, ok := dstDir.(*MegaNode); ok {
u, err := d.c.NewUpload(dstNode.Node, stream.GetName(), stream.GetSize())
u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize())
if err != nil {
return err
}
@ -175,7 +175,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
_, err = u.Finish()
return err
}
return fmt.Errorf("unable to convert dir to mega node")
return fmt.Errorf("unable to convert dir to mega n")
}
//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {

View File

@ -1,6 +1,7 @@
package mega
import (
"github.com/alist-org/alist/v3/pkg/utils"
"time"
"github.com/alist-org/alist/v3/internal/model"
@ -8,29 +9,36 @@ import (
)
type MegaNode struct {
*mega.Node
n *mega.Node
}
//func (m *MegaNode) GetSize() int64 {
// //TODO implement me
// panic("implement me")
//}
//
//func (m *MegaNode) GetName() string {
// //TODO implement me
// panic("implement me")
//}
func (m *MegaNode) GetSize() int64 {
return m.n.GetSize()
}
func (m *MegaNode) GetName() string {
return m.n.GetName()
}
func (m *MegaNode) CreateTime() time.Time {
return m.n.GetTimeStamp()
}
func (m *MegaNode) GetHash() utils.HashInfo {
//Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud
return utils.HashInfo{}
}
func (m *MegaNode) ModTime() time.Time {
return m.GetTimeStamp()
return m.n.GetTimeStamp()
}
func (m *MegaNode) IsDir() bool {
return m.GetType() == mega.FOLDER || m.GetType() == mega.ROOT
return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT
}
func (m *MegaNode) GetID() string {
return m.GetHash()
return m.n.GetHash()
}
func (m *MegaNode) GetPath() string {

View File

@ -6,7 +6,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"strconv"
"time"
@ -231,13 +230,12 @@ func (d *MoPan) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
file, err := utils.CreateTempFile(stream, stream.GetSize())
file, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
_ = os.Remove(file.Name())
}()
// step.1

View File

@ -5,7 +5,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
@ -124,13 +123,12 @@ func (d *PikPak) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
// cal gcid
sha1Str, err := getGcid(tempFile, stream.GetSize())

View File

@ -7,7 +7,6 @@ import (
"encoding/hex"
"io"
"net/http"
"os"
"time"
"github.com/alist-org/alist/v3/drivers/base"
@ -75,7 +74,7 @@ func (d *QuarkOrUC) Link(ctx context.Context, file model.Obj, args model.LinkArg
"User-Agent": []string{ua},
},
Concurrency: 2,
PartSize: 10 * 1024 * 1024,
PartSize: 10 * utils.MB,
}, nil
}
@ -136,13 +135,12 @@ func (d *QuarkOrUC) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
m := md5.New()
_, err = io.Copy(m, tempFile)

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/alist-org/alist/v3/internal/stream"
"io"
"net/url"
stdpath "path"
@ -96,13 +97,13 @@ func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*mo
func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
return d.Put(ctx, &model.Object{
Path: stdpath.Join(parentDir.GetPath(), dirName),
}, &model.FileStream{
}, &stream.FileStream{
Obj: &model.Object{
Name: getPlaceholderName(d.Placeholder),
Modified: time.Now(),
},
ReadCloser: io.NopCloser(bytes.NewReader([]byte{})),
Mimetype: "application/octet-stream",
Reader: io.NopCloser(bytes.NewReader([]byte{})),
Mimetype: "application/octet-stream",
}, func(int) {})
}

View File

@ -56,7 +56,7 @@ func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*
return nil, err
}
link := &model.Link{
ReadSeekCloser: remoteFile,
MFile: remoteFile,
}
return link, nil
}

View File

@ -61,6 +61,7 @@ func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]m
Modified: f.ModTime(),
Size: f.Size(),
IsFolder: f.IsDir(),
Ctime: f.(*smb2.FileStat).CreationTime,
},
}
files = append(files, &file)
@ -79,7 +80,7 @@ func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*m
return nil, err
}
link := &model.Link{
ReadSeekCloser: remoteFile,
MFile: remoteFile,
}
d.updateLastConnTime()
return link, nil

View File

@ -11,7 +11,6 @@ import (
log "github.com/sirupsen/logrus"
"io"
"math"
"os"
stdpath "path"
"strconv"
"strings"
@ -116,13 +115,12 @@ func (d *Terabox) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
var Default int64 = 4 * 1024 * 1024
defaultByteData := make([]byte, Default)

View File

@ -5,7 +5,6 @@ import (
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/alist-org/alist/v3/drivers/base"
@ -333,13 +332,12 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
}
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
tempFile, err := utils.CreateTempFile(stream.GetReadCloser(), stream.GetSize())
tempFile, err := stream.CacheFullInTempFile()
if err != nil {
return err
}
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
gcid, err := getGcid(tempFile, stream.GetSize())

View File

@ -2,6 +2,8 @@ package thunder
import (
"fmt"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"strconv"
"time"
)
@ -84,6 +86,8 @@ type Link struct {
Type string `json:"type"`
}
var _ model.Obj = (*Files)(nil)
type Files struct {
Kind string `json:"kind"`
ID string `json:"id"`
@ -146,6 +150,14 @@ type Files struct {
//Collection interface{} `json:"collection"`
}
func (c *Files) CreateTime() time.Time {
return c.CreatedTime
}
func (c *Files) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (c *Files) GetSize() int64 { size, _ := strconv.ParseInt(c.Size, 10, 64); return size }
func (c *Files) GetName() string { return c.Name }
func (c *Files) ModTime() time.Time { return c.ModifiedTime }

View File

@ -52,18 +52,29 @@ func (d *Virtual) List(ctx context.Context, dir model.Obj, args model.ListArgs)
return res, nil
}
type nopReadSeekCloser struct {
type DummyMFile struct {
io.Reader
}
func (nopReadSeekCloser) Seek(offset int64, whence int) (int64, error) {
func (f DummyMFile) Read(p []byte) (n int, err error) {
return f.Reader.Read(p)
}
func (f DummyMFile) ReadAt(p []byte, off int64) (n int, err error) {
return f.Reader.Read(p)
}
func (f DummyMFile) Close() error {
return nil
}
func (DummyMFile) Seek(offset int64, whence int) (int64, error) {
return offset, nil
}
func (nopReadSeekCloser) Close() error { return nil }
func (d *Virtual) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
return &model.Link{
ReadSeekCloser: nopReadSeekCloser{io.LimitReader(random.Rand, file.GetSize())},
MFile: DummyMFile{Reader: random.Rand},
}, nil
}

View File

@ -6,7 +6,6 @@ import (
"io"
"math"
"net/http"
"os"
"strconv"
"time"
@ -310,13 +309,12 @@ func (d *WeiYun) Remove(ctx context.Context, obj model.Obj) error {
func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
if folder, ok := dstDir.(*Folder); ok {
file, err := utils.CreateTempFile(stream, stream.GetSize())
file, err := stream.CacheFullInTempFile()
if err != nil {
return nil, err
}
defer func() {
_ = file.Close()
_ = os.Remove(file.Name())
}()
// step 1.

View File

@ -1,6 +1,7 @@
package weiyun
import (
"github.com/alist-org/alist/v3/pkg/utils"
"time"
weiyunsdkgo "github.com/foxxorcat/weiyun-sdk-go"
@ -21,12 +22,27 @@ func (f *File) GetPath() string { return "" }
func (f *File) GetPKey() string {
return f.PFolder.DirKey
}
func (f *File) CreateTime() time.Time {
return time.Time(f.FileCtime)
}
func (f *File) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.FileSha)
}
type Folder struct {
PFolder *Folder
weiyunsdkgo.Folder
}
func (f *Folder) CreateTime() time.Time {
return time.Time(f.DirCtime)
}
func (f *Folder) GetHash() utils.HashInfo {
return utils.HashInfo{}
}
func (f *Folder) GetID() string { return f.DirKey }
func (f *Folder) GetSize() int64 { return 0 }
func (f *Folder) GetName() string { return f.DirName }