fix(driver): implement canceling and updating progress for putting for some drivers (#7847)
* fix(driver): additionally implement canceling and updating progress for putting for some drivers * refactor: add driver archive api into template * fix(123): use built-in MD5 to avoid caching full * . * fix build failed
This commit is contained in:
parent
b9f397d29f
commit
779c293f04
@ -215,12 +215,12 @@ func (d *Pan115) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
var uploadResult *UploadResult
|
var uploadResult *UploadResult
|
||||||
// 闪传失败,上传
|
// 闪传失败,上传
|
||||||
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
if stream.GetSize() <= 10*utils.MB { // 文件大小小于10MB,改用普通模式上传
|
||||||
if uploadResult, err = d.UploadByOSS(&fastInfo.UploadOSSParams, stream, dirID); err != nil {
|
if uploadResult, err = d.UploadByOSS(ctx, &fastInfo.UploadOSSParams, stream, dirID, up); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 分片上传
|
// 分片上传
|
||||||
if uploadResult, err = d.UploadByMultipart(&fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID); err != nil {
|
if uploadResult, err = d.UploadByMultipart(ctx, &fastInfo.UploadOSSParams, stream.GetSize(), stream, dirID, up); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,17 +2,21 @@ package _115
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/conf"
|
"github.com/alist-org/alist/v3/internal/conf"
|
||||||
@ -271,7 +275,7 @@ func UploadDigestRange(stream model.FileStreamer, rangeSpec string) (result stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UploadByOSS use aliyun sdk to upload
|
// UploadByOSS use aliyun sdk to upload
|
||||||
func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dirID string) (*UploadResult, error) {
|
func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSParams, s model.FileStreamer, dirID string, up driver.UpdateProgress) (*UploadResult, error) {
|
||||||
ossToken, err := c.client.GetOSSToken()
|
ossToken, err := c.client.GetOSSToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -286,6 +290,13 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
|||||||
}
|
}
|
||||||
|
|
||||||
var bodyBytes []byte
|
var bodyBytes []byte
|
||||||
|
r := &stream.ReaderWithCtx{
|
||||||
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
}
|
||||||
if err = bucket.PutObject(params.Object, r, append(
|
if err = bucket.PutObject(params.Object, r, append(
|
||||||
driver115.OssOption(params, ossToken),
|
driver115.OssOption(params, ossToken),
|
||||||
oss.CallbackResult(&bodyBytes),
|
oss.CallbackResult(&bodyBytes),
|
||||||
@ -301,7 +312,8 @@ func (c *Pan115) UploadByOSS(params *driver115.UploadOSSParams, r io.Reader, dir
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UploadByMultipart upload by mutipart blocks
|
// UploadByMultipart upload by mutipart blocks
|
||||||
func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize int64, stream model.FileStreamer, dirID string, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
|
||||||
|
dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
|
||||||
var (
|
var (
|
||||||
chunks []oss.FileChunk
|
chunks []oss.FileChunk
|
||||||
parts []oss.UploadPart
|
parts []oss.UploadPart
|
||||||
@ -313,7 +325,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := stream.CacheFullInTempFile()
|
tmpF, err := s.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -372,6 +384,7 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
quit <- struct{}{}
|
quit <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
completedNum := atomic.Int32{}
|
||||||
// consumers
|
// consumers
|
||||||
for i := 0; i < options.ThreadsNum; i++ {
|
for i := 0; i < options.ThreadsNum; i++ {
|
||||||
go func(threadId int) {
|
go func(threadId int) {
|
||||||
@ -384,6 +397,8 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||||
for retry := 0; retry < 3; retry++ {
|
for retry := 0; retry < 3; retry++ {
|
||||||
select {
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
if ossToken, err = d.client.GetOSSToken(); err != nil { // 到时重新获取ossToken
|
||||||
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
errCh <- errors.Wrap(err, "刷新token时出现错误")
|
||||||
@ -396,12 +411,18 @@ func (d *Pan115) UploadByMultipart(params *driver115.UploadOSSParams, fileSize i
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if part, err = bucket.UploadPart(imur, bytes.NewBuffer(buf), chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
||||||
|
Reader: bytes.NewBuffer(buf),
|
||||||
|
Ctx: ctx,
|
||||||
|
}, chunk.Size, chunk.Number, driver115.OssOption(params, ossToken)...); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||||
|
} else {
|
||||||
|
num := completedNum.Add(1)
|
||||||
|
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||||
}
|
}
|
||||||
UploadedPartsCh <- part
|
UploadedPartsCh <- part
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -185,32 +186,35 @@ func (d *Pan123) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// const DEFAULT int64 = 10485760
|
etag := file.GetHash().GetHash(utils.MD5)
|
||||||
h := md5.New()
|
if len(etag) < utils.MD5.Width {
|
||||||
// need to calculate md5 of the full content
|
// const DEFAULT int64 = 10485760
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
h := md5.New()
|
||||||
if err != nil {
|
// need to calculate md5 of the full content
|
||||||
return err
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = tempFile.Close()
|
||||||
|
}()
|
||||||
|
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = tempFile.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
etag = hex.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
_ = tempFile.Close()
|
|
||||||
}()
|
|
||||||
if _, err = utils.CopyWithBuffer(h, tempFile); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = tempFile.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
etag := hex.EncodeToString(h.Sum(nil))
|
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"driveId": 0,
|
"driveId": 0,
|
||||||
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
"duplicate": 2, // 2->覆盖 1->重命名 0->默认
|
||||||
"etag": etag,
|
"etag": etag,
|
||||||
"fileName": stream.GetName(),
|
"fileName": file.GetName(),
|
||||||
"parentFileId": dstDir.GetID(),
|
"parentFileId": dstDir.GetID(),
|
||||||
"size": stream.GetSize(),
|
"size": file.GetSize(),
|
||||||
"type": 0,
|
"type": 0,
|
||||||
}
|
}
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
@ -225,7 +229,7 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
if resp.Data.AccessKeyId == "" || resp.Data.SecretAccessKey == "" || resp.Data.SessionToken == "" {
|
||||||
err = d.newUpload(ctx, &resp, stream, tempFile, up)
|
err = d.newUpload(ctx, &resp, file, up)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
cfg := &aws.Config{
|
cfg := &aws.Config{
|
||||||
@ -239,15 +243,21 @@ func (d *Pan123) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Key,
|
Key: &resp.Data.Key,
|
||||||
Body: tempFile,
|
Body: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
_, err = d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
|
@ -69,7 +69,7 @@ func (d *Pan123) completeS3(ctx context.Context, upReq *UploadResp, file model.F
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, reader io.Reader, up driver.UpdateProgress) error {
|
func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
chunkSize := int64(1024 * 1024 * 16)
|
chunkSize := int64(1024 * 1024 * 16)
|
||||||
// fetch s3 pre signed urls
|
// fetch s3 pre signed urls
|
||||||
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
chunkCount := int(math.Ceil(float64(file.GetSize()) / float64(chunkSize)))
|
||||||
@ -103,7 +103,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
|||||||
if j == chunkCount {
|
if j == chunkCount {
|
||||||
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
curSize = file.GetSize() - (int64(chunkCount)-1)*chunkSize
|
||||||
}
|
}
|
||||||
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(reader, chunkSize), curSize, false, getS3UploadUrl)
|
err = d.uploadS3Chunk(ctx, upReq, s3PreSignedUrls, j, end, io.LimitReader(file, chunkSize), curSize, false, getS3UploadUrl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package alist_v3
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
@ -181,25 +182,28 @@ func (d *AListV3) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *AListV3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", stream)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPut, d.Address+"/api/fs/put", &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req.Header.Set("Authorization", d.Token)
|
req.Header.Set("Authorization", d.Token)
|
||||||
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), stream.GetName()))
|
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
|
||||||
req.Header.Set("Password", d.MetaPassword)
|
req.Header.Set("Password", d.MetaPassword)
|
||||||
if md5 := stream.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
|
||||||
req.Header.Set("X-File-Md5", md5)
|
req.Header.Set("X-File-Md5", md5)
|
||||||
}
|
}
|
||||||
if sha1 := stream.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
|
||||||
req.Header.Set("X-File-Sha1", sha1)
|
req.Header.Set("X-File-Sha1", sha1)
|
||||||
}
|
}
|
||||||
if sha256 := stream.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
|
||||||
req.Header.Set("X-File-Sha256", sha256)
|
req.Header.Set("X-File-Sha256", sha256)
|
||||||
}
|
}
|
||||||
|
|
||||||
req.ContentLength = stream.GetSize()
|
req.ContentLength = s.GetSize()
|
||||||
// client := base.NewHttpClient()
|
// client := base.NewHttpClient()
|
||||||
// client.Timeout = time.Hour * 6
|
// client.Timeout = time.Hour * 6
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -215,7 +216,7 @@ func (d *ChaoXing) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
var resp UploadDataRsp
|
var resp UploadDataRsp
|
||||||
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
_, err := d.request("https://noteyd.chaoxing.com/pc/files/getUploadConfig", http.MethodGet, func(req *resty.Request) {
|
||||||
}, &resp)
|
}, &resp)
|
||||||
@ -227,11 +228,11 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
}
|
}
|
||||||
body := &bytes.Buffer{}
|
body := &bytes.Buffer{}
|
||||||
writer := multipart.NewWriter(body)
|
writer := multipart.NewWriter(body)
|
||||||
filePart, err := writer.CreateFormFile("file", stream.GetName())
|
filePart, err := writer.CreateFormFile("file", file.GetName())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = utils.CopyWithBuffer(filePart, stream)
|
_, err = utils.CopyWithBuffer(filePart, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -248,7 +249,14 @@ func (d *ChaoXing) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("POST", "https://pan-yz.chaoxing.com/upload", body)
|
r := &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
|
Reader: body,
|
||||||
|
Size: int64(body.Len()),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", "https://pan-yz.chaoxing.com/upload", r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package ftp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
@ -114,13 +115,18 @@ func (d *FTP) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *FTP) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
if err := d.login(); err != nil {
|
if err := d.login(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// TODO: support cancel
|
path := stdpath.Join(dstDir.GetPath(), s.GetName())
|
||||||
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
|
return d.conn.Stor(encode(path, d.Encoding), &stream.ReaderWithCtx{
|
||||||
return d.conn.Stor(encode(path, d.Encoding), stream)
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ driver.Driver = (*FTP)(nil)
|
var _ driver.Driver = (*FTP)(nil)
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
@ -649,15 +650,15 @@ func (d *Github) createGitKeep(path, message string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
func (d *Github) putBlob(ctx context.Context, s model.FileStreamer, up driver.UpdateProgress) (string, error) {
|
||||||
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
|
beforeContent := "{\"encoding\":\"base64\",\"content\":\""
|
||||||
afterContent := "\"}"
|
afterContent := "\"}"
|
||||||
length := int64(len(beforeContent)) + calculateBase64Length(stream.GetSize()) + int64(len(afterContent))
|
length := int64(len(beforeContent)) + calculateBase64Length(s.GetSize()) + int64(len(afterContent))
|
||||||
beforeContentReader := strings.NewReader(beforeContent)
|
beforeContentReader := strings.NewReader(beforeContent)
|
||||||
contentReader, contentWriter := io.Pipe()
|
contentReader, contentWriter := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
|
encoder := base64.NewEncoder(base64.StdEncoding, contentWriter)
|
||||||
if _, err := utils.CopyWithBuffer(encoder, stream); err != nil {
|
if _, err := utils.CopyWithBuffer(encoder, s); err != nil {
|
||||||
_ = contentWriter.CloseWithError(err)
|
_ = contentWriter.CloseWithError(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -667,10 +668,12 @@ func (d *Github) putBlob(ctx context.Context, stream model.FileStreamer, up driv
|
|||||||
afterContentReader := strings.NewReader(afterContent)
|
afterContentReader := strings.NewReader(afterContent)
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||||
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
fmt.Sprintf("https://api.github.com/repos/%s/%s/git/blobs", d.Owner, d.Repo),
|
||||||
&ReaderWithProgress{
|
&stream.ReaderUpdatingProgress{
|
||||||
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
Length: length,
|
Reader: io.MultiReader(beforeContentReader, contentReader, afterContentReader),
|
||||||
Progress: up,
|
Size: length,
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -7,26 +7,10 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
"text/template"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ReaderWithProgress struct {
|
|
||||||
Reader io.Reader
|
|
||||||
Length int64
|
|
||||||
Progress func(percentage float64)
|
|
||||||
offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *ReaderWithProgress) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.Reader.Read(p)
|
|
||||||
r.offset += int64(n)
|
|
||||||
r.Progress(math.Min(100.0, float64(r.offset)/float64(r.Length)*100.0))
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type MessageTemplateVars struct {
|
type MessageTemplateVars struct {
|
||||||
UserName string
|
UserName string
|
||||||
ObjName string
|
ObjName string
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -266,10 +267,10 @@ func (d *ILanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
|
|
||||||
const DefaultPartSize = 1024 * 1024 * 8
|
const DefaultPartSize = 1024 * 1024 * 8
|
||||||
|
|
||||||
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
// need to calculate md5 of the full content
|
// need to calculate md5 of the full content
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
tempFile, err := s.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -288,8 +289,8 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
|
res, err := d.proved("/7n/getUpToken", http.MethodPost, func(req *resty.Request) {
|
||||||
req.SetBody(base.Json{
|
req.SetBody(base.Json{
|
||||||
"fileId": "",
|
"fileId": "",
|
||||||
"fileName": stream.GetName(),
|
"fileName": s.GetName(),
|
||||||
"fileSize": stream.GetSize()/1024 + 1,
|
"fileSize": s.GetSize()/1024 + 1,
|
||||||
"folderId": dstDir.GetID(),
|
"folderId": dstDir.GetID(),
|
||||||
"md5": etag,
|
"md5": etag,
|
||||||
"type": 1,
|
"type": 1,
|
||||||
@ -301,13 +302,20 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
upToken := utils.Json.Get(res, "upToken").ToString()
|
upToken := utils.Json.Get(res, "upToken").ToString()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
key := fmt.Sprintf("disk/%d/%d/%d/%s/%016d", now.Year(), now.Month(), now.Day(), d.account, now.UnixMilli())
|
||||||
|
reader := &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
|
Reader: tempFile,
|
||||||
|
Size: s.GetSize(),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
var token string
|
var token string
|
||||||
if stream.GetSize() <= DefaultPartSize {
|
if s.GetSize() <= DefaultPartSize {
|
||||||
res, err := d.upClient.R().SetMultipartFormData(map[string]string{
|
res, err := d.upClient.R().SetContext(ctx).SetMultipartFormData(map[string]string{
|
||||||
"token": upToken,
|
"token": upToken,
|
||||||
"key": key,
|
"key": key,
|
||||||
"fname": stream.GetName(),
|
"fname": s.GetName(),
|
||||||
}).SetMultipartField("file", stream.GetName(), stream.GetMimetype(), tempFile).
|
}).SetMultipartField("file", s.GetName(), s.GetMimetype(), reader).
|
||||||
Post("https://upload.qiniup.com/")
|
Post("https://upload.qiniup.com/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -321,10 +329,10 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
}
|
}
|
||||||
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
|
uploadId := utils.Json.Get(res.Body(), "uploadId").ToString()
|
||||||
parts := make([]Part, 0)
|
parts := make([]Part, 0)
|
||||||
partNum := (stream.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
partNum := (s.GetSize() + DefaultPartSize - 1) / DefaultPartSize
|
||||||
for i := 1; i <= int(partNum); i++ {
|
for i := 1; i <= int(partNum); i++ {
|
||||||
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
|
u := fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s/%d", d.conf.bucket, keyBase64, uploadId, i)
|
||||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(tempFile, DefaultPartSize)).Put(u)
|
res, err = d.upClient.R().SetContext(ctx).SetHeader("Authorization", "UpToken "+upToken).SetBody(io.LimitReader(reader, DefaultPartSize)).Put(u)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -335,7 +343,7 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
|
res, err = d.upClient.R().SetHeader("Authorization", "UpToken "+upToken).SetBody(base.Json{
|
||||||
"fnmae": stream.GetName(),
|
"fnmae": s.GetName(),
|
||||||
"parts": parts,
|
"parts": parts,
|
||||||
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
|
}).Post(fmt.Sprintf("https://upload.qiniup.com/buckets/%s/objects/%s/uploads/%s", d.conf.bucket, keyBase64, uploadId))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -373,9 +381,9 @@ func (d *ILanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
ID: strconv.FormatInt(file.FileId, 10),
|
ID: strconv.FormatInt(file.FileId, 10),
|
||||||
//Path: ,
|
//Path: ,
|
||||||
Name: file.FileName,
|
Name: file.FileName,
|
||||||
Size: stream.GetSize(),
|
Size: s.GetSize(),
|
||||||
Modified: stream.ModTime(),
|
Modified: s.ModTime(),
|
||||||
Ctime: stream.CreateTime(),
|
Ctime: s.CreateTime(),
|
||||||
IsFolder: false,
|
IsFolder: false,
|
||||||
HashInfo: utils.NewHashInfo(utils.MD5, etag),
|
HashInfo: utils.NewHashInfo(utils.MD5, etag),
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -3,6 +3,7 @@ package ipfs
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/url"
|
"net/url"
|
||||||
stdpath "path"
|
stdpath "path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -108,9 +109,15 @@ func (d *IPFS) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
return d.sh.FilesRm(ctx, obj.GetPath(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *IPFS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// TODO upload file, optional
|
// TODO upload file, optional
|
||||||
_, err := d.sh.Add(stream, ToFiles(stdpath.Join(dstDir.GetPath(), stream.GetName())))
|
_, err := d.sh.Add(&stream.ReaderWithCtx{
|
||||||
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
}, ToFiles(stdpath.Join(dstDir.GetPath(), s.GetName())))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ package kodbox
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/go-resty/resty/v2"
|
"github.com/go-resty/resty/v2"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -225,14 +226,19 @@ func (d *KodBox) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
var resp *CommonResp
|
var resp *CommonResp
|
||||||
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
_, err := d.request(http.MethodPost, "/?explorer/upload/fileUpload", func(req *resty.Request) {
|
||||||
req.SetFileReader("file", stream.GetName(), stream).
|
r := &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
|
req.SetFileReader("file", s.GetName(), r).
|
||||||
SetResult(&resp).
|
SetResult(&resp).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"path": dstDir.GetPath(),
|
"path": dstDir.GetPath(),
|
||||||
})
|
}).
|
||||||
|
SetContext(ctx)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -244,8 +250,8 @@ func (d *KodBox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
return &model.ObjThumb{
|
return &model.ObjThumb{
|
||||||
Object: model.Object{
|
Object: model.Object{
|
||||||
Path: resp.Info.(string),
|
Path: resp.Info.(string),
|
||||||
Name: stream.GetName(),
|
Name: s.GetName(),
|
||||||
Size: stream.GetSize(),
|
Size: s.GetSize(),
|
||||||
IsFolder: false,
|
IsFolder: false,
|
||||||
Modified: time.Now(),
|
Modified: time.Now(),
|
||||||
Ctime: time.Now(),
|
Ctime: time.Now(),
|
||||||
|
@ -2,6 +2,7 @@ package lanzou
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@ -208,7 +209,7 @@ func (d *LanZou) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return errs.NotSupport
|
return errs.NotSupport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
if d.IsCookie() || d.IsAccount() {
|
if d.IsCookie() || d.IsAccount() {
|
||||||
var resp RespText[[]FileOrFolder]
|
var resp RespText[[]FileOrFolder]
|
||||||
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
_, err := d._post(d.BaseUrl+"/html5up.php", func(req *resty.Request) {
|
||||||
@ -217,9 +218,12 @@ func (d *LanZou) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
"vie": "2",
|
"vie": "2",
|
||||||
"ve": "2",
|
"ve": "2",
|
||||||
"id": "WU_FILE_0",
|
"id": "WU_FILE_0",
|
||||||
"name": stream.GetName(),
|
"name": s.GetName(),
|
||||||
"folder_id_bb_n": dstDir.GetID(),
|
"folder_id_bb_n": dstDir.GetID(),
|
||||||
}).SetFileReader("upload_file", stream.GetName(), stream).SetContext(ctx)
|
}).SetFileReader("upload_file", s.GetName(), &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}).SetContext(ctx)
|
||||||
}, &resp, true)
|
}, &resp, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -161,7 +162,7 @@ func (d *MediaTrack) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
src := "assets/" + uuid.New().String()
|
src := "assets/" + uuid.New().String()
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
_, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) {
|
_, err := d.request("https://jayce.api.mediatrack.cn/v3/storage/tokens/asset", http.MethodGet, func(req *resty.Request) {
|
||||||
@ -180,7 +181,7 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tempFile, err := stream.CacheFullInTempFile()
|
tempFile, err := file.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -188,13 +189,19 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
_ = tempFile.Close()
|
_ = tempFile.Close()
|
||||||
}()
|
}()
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &resp.Data.Bucket,
|
Bucket: &resp.Data.Bucket,
|
||||||
Key: &resp.Data.Object,
|
Key: &resp.Data.Object,
|
||||||
Body: tempFile,
|
Body: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: &stream.SimpleReaderWithSize{
|
||||||
|
Reader: tempFile,
|
||||||
|
Size: file.GetSize(),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, input)
|
_, err = uploader.UploadWithContext(ctx, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -213,12 +220,12 @@ func (d *MediaTrack) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
|||||||
hash := hex.EncodeToString(h.Sum(nil))
|
hash := hex.EncodeToString(h.Sum(nil))
|
||||||
data := base.Json{
|
data := base.Json{
|
||||||
"category": 0,
|
"category": 0,
|
||||||
"description": stream.GetName(),
|
"description": file.GetName(),
|
||||||
"hash": hash,
|
"hash": hash,
|
||||||
"mime": stream.GetMimetype(),
|
"mime": file.GetMimetype(),
|
||||||
"size": stream.GetSize(),
|
"size": file.GetSize(),
|
||||||
"src": src,
|
"src": src,
|
||||||
"title": stream.GetName(),
|
"title": file.GetName(),
|
||||||
"type": 0,
|
"type": 0,
|
||||||
}
|
}
|
||||||
_, err = d.request(url, http.MethodPost, func(req *resty.Request) {
|
_, err = d.request(url, http.MethodPost, func(req *resty.Request) {
|
||||||
|
@ -88,7 +88,7 @@ func (d *NeteaseMusic) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *NeteaseMusic) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
return d.putSongStream(stream)
|
return d.putSongStream(ctx, stream, up)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
func (d *NeteaseMusic) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||||
|
@ -2,6 +2,7 @@ package netease_music
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -71,6 +72,8 @@ func (lrc *LyricObj) getLyricLink() *model.Link {
|
|||||||
type ReqOption struct {
|
type ReqOption struct {
|
||||||
crypto string
|
crypto string
|
||||||
stream model.FileStreamer
|
stream model.FileStreamer
|
||||||
|
up driver.UpdateProgress
|
||||||
|
ctx context.Context
|
||||||
data map[string]string
|
data map[string]string
|
||||||
headers map[string]string
|
headers map[string]string
|
||||||
cookies []*http.Cookie
|
cookies []*http.Cookie
|
||||||
@ -113,3 +116,16 @@ func (ch *Characteristic) merge(data map[string]string) map[string]interface{} {
|
|||||||
}
|
}
|
||||||
return body
|
return body
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InlineReadCloser struct {
|
||||||
|
io.Reader
|
||||||
|
io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *InlineReadCloser) Read(p []byte) (int, error) {
|
||||||
|
return rc.Reader.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *InlineReadCloser) Close() error {
|
||||||
|
return rc.Closer.Close()
|
||||||
|
}
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
package netease_music
|
package netease_music
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -47,9 +49,12 @@ func (u *uploader) init(stream model.FileStreamer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
utils.CopyWithBuffer(h, stream)
|
_, err := utils.CopyWithBuffer(h, stream)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
u.md5 = hex.EncodeToString(h.Sum(nil))
|
u.md5 = hex.EncodeToString(h.Sum(nil))
|
||||||
_, err := u.file.Seek(0, io.SeekStart)
|
_, err = u.file.Seek(0, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -167,7 +172,7 @@ func (u *uploader) publishInfo(resourceId string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *uploader) upload(stream model.FileStreamer) error {
|
func (u *uploader) upload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
bucket := "jd-musicrep-privatecloud-audio-public"
|
bucket := "jd-musicrep-privatecloud-audio-public"
|
||||||
token, err := u.allocToken(bucket)
|
token, err := u.allocToken(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -192,6 +197,8 @@ func (u *uploader) upload(stream model.FileStreamer) error {
|
|||||||
http.MethodPost,
|
http.MethodPost,
|
||||||
ReqOption{
|
ReqOption{
|
||||||
stream: stream,
|
stream: stream,
|
||||||
|
up: up,
|
||||||
|
ctx: ctx,
|
||||||
headers: map[string]string{
|
headers: map[string]string{
|
||||||
"x-nos-token": token.token,
|
"x-nos-token": token.token,
|
||||||
"Content-Type": "audio/mpeg",
|
"Content-Type": "audio/mpeg",
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
package netease_music
|
package netease_music
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -58,20 +60,38 @@ func (d *NeteaseMusic) request(url, method string, opt ReqOption) ([]byte, error
|
|||||||
url = "https://music.163.com/api/linux/forward"
|
url = "https://music.163.com/api/linux/forward"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.ctx != nil {
|
||||||
|
req.SetContext(opt.ctx)
|
||||||
|
}
|
||||||
if method == http.MethodPost {
|
if method == http.MethodPost {
|
||||||
if opt.stream != nil {
|
if opt.stream != nil {
|
||||||
|
if opt.up == nil {
|
||||||
|
opt.up = func(_ float64) {}
|
||||||
|
}
|
||||||
req.SetContentLength(true)
|
req.SetContentLength(true)
|
||||||
req.SetBody(io.ReadCloser(opt.stream))
|
req.SetBody(&InlineReadCloser{
|
||||||
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: opt.stream,
|
||||||
|
UpdateProgress: opt.up,
|
||||||
|
},
|
||||||
|
Closer: opt.stream,
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
req.SetFormData(data)
|
req.SetFormData(data)
|
||||||
}
|
}
|
||||||
res, err := req.Post(url)
|
res, err := req.Post(url)
|
||||||
return res.Body(), err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if method == http.MethodGet {
|
if method == http.MethodGet {
|
||||||
res, err := req.Get(url)
|
res, err := req.Get(url)
|
||||||
return res.Body(), err
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return res.Body(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
@ -206,7 +226,7 @@ func (d *NeteaseMusic) removeSongObj(file model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
|
func (d *NeteaseMusic) putSongStream(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
tmp, err := stream.CacheFullInTempFile()
|
tmp, err := stream.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -231,7 +251,7 @@ func (d *NeteaseMusic) putSongStream(stream model.FileStreamer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if u.meta.needUpload {
|
if u.meta.needUpload {
|
||||||
err = u.upload(stream)
|
err = u.upload(ctx, stream, up)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -255,10 +255,10 @@ func (d *PikPak) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
}
|
}
|
||||||
|
|
||||||
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传
|
if stream.GetSize() <= 10*utils.MB { // 文件大小 小于10MB,改用普通模式上传
|
||||||
return d.UploadByOSS(¶ms, stream, up)
|
return d.UploadByOSS(ctx, ¶ms, stream, up)
|
||||||
}
|
}
|
||||||
// 分片上传
|
// 分片上传
|
||||||
return d.UploadByMultipart(¶ms, stream.GetSize(), stream, up)
|
return d.UploadByMultipart(ctx, ¶ms, stream.GetSize(), stream, up)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 离线下载文件
|
// 离线下载文件
|
||||||
|
@ -2,6 +2,7 @@ package pikpak
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@ -9,6 +10,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||||
jsoniter "github.com/json-iterator/go"
|
jsoniter "github.com/json-iterator/go"
|
||||||
@ -19,6 +21,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@ -417,7 +420,7 @@ func (d *PikPak) refreshCaptchaToken(action string, metas map[string]string) err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *PikPak) UploadByOSS(ctx context.Context, params *S3Params, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret)
|
ossClient, err := oss.New(params.Endpoint, params.AccessKeyID, params.AccessKeySecret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -427,14 +430,20 @@ func (d *PikPak) UploadByOSS(params *S3Params, stream model.FileStreamer, up dri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = bucket.PutObject(params.Key, stream, OssOption(params)...)
|
err = bucket.PutObject(params.Key, &stream.ReaderWithCtx{
|
||||||
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
}, OssOption(params)...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *PikPak) UploadByMultipart(ctx context.Context, params *S3Params, fileSize int64, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
var (
|
var (
|
||||||
chunks []oss.FileChunk
|
chunks []oss.FileChunk
|
||||||
parts []oss.UploadPart
|
parts []oss.UploadPart
|
||||||
@ -444,7 +453,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
|||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
tmpF, err := stream.CacheFullInTempFile()
|
tmpF, err := s.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -488,6 +497,7 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
|||||||
quit <- struct{}{}
|
quit <- struct{}{}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
completedNum := atomic.Int32{}
|
||||||
// consumers
|
// consumers
|
||||||
for i := 0; i < ThreadsNum; i++ {
|
for i := 0; i < ThreadsNum; i++ {
|
||||||
go func(threadId int) {
|
go func(threadId int) {
|
||||||
@ -500,6 +510,8 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
|||||||
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
var part oss.UploadPart // 出现错误就继续尝试,共尝试3次
|
||||||
for retry := 0; retry < 3; retry++ {
|
for retry := 0; retry < 3; retry++ {
|
||||||
select {
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
errCh <- errors.Wrap(err, "ossToken 过期")
|
errCh <- errors.Wrap(err, "ossToken 过期")
|
||||||
default:
|
default:
|
||||||
@ -511,12 +523,18 @@ func (d *PikPak) UploadByMultipart(params *S3Params, fileSize int64, stream mode
|
|||||||
}
|
}
|
||||||
|
|
||||||
b := bytes.NewBuffer(buf)
|
b := bytes.NewBuffer(buf)
|
||||||
if part, err = bucket.UploadPart(imur, b, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
if part, err = bucket.UploadPart(imur, &stream.ReaderWithCtx{
|
||||||
|
Reader: b,
|
||||||
|
Ctx: ctx,
|
||||||
|
}, chunk.Size, chunk.Number, OssOption(params)...); err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", stream.GetName(), chunk.Number, err))
|
errCh <- errors.Wrap(err, fmt.Sprintf("上传 %s 的第%d个分片时出现错误:%v", s.GetName(), chunk.Number, err))
|
||||||
|
} else {
|
||||||
|
num := completedNum.Add(1)
|
||||||
|
up(float64(num) * 100.0 / float64(len(chunks)))
|
||||||
}
|
}
|
||||||
UploadedPartsCh <- part
|
UploadedPartsCh <- part
|
||||||
}
|
}
|
||||||
@ -547,7 +565,7 @@ LOOP:
|
|||||||
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
// EOF错误是xml的Unmarshal导致的,响应其实是json格式,所以实际上上传是成功的
|
||||||
if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) {
|
if _, err = bucket.CompleteMultipartUpload(imur, parts, OssOption(params)...); err != nil && !errors.Is(err, io.EOF) {
|
||||||
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
// 当文件名含有 &< 这两个字符之一时响应的xml解析会出现错误,实际上上传是成功的
|
||||||
if filename := filepath.Base(stream.GetName()); !strings.ContainsAny(filename, "&<") {
|
if filename := filepath.Base(s.GetName()); !strings.ContainsAny(filename, "&<") {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package quqi
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -11,6 +12,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/driver"
|
"github.com/alist-org/alist/v3/internal/driver"
|
||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
|
istream "github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils/random"
|
"github.com/alist-org/alist/v3/pkg/utils/random"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -385,9 +387,16 @@ func (d *Quqi) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
|||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
buf := make([]byte, 1024*1024*2)
|
buf := make([]byte, 1024*1024*2)
|
||||||
|
fup := &istream.ReaderUpdatingProgress{
|
||||||
|
Reader: &istream.SimpleReaderWithSize{
|
||||||
|
Reader: f,
|
||||||
|
Size: int64(len(buf)),
|
||||||
|
},
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
for partNumber := int64(1); ; partNumber++ {
|
for partNumber := int64(1); ; partNumber++ {
|
||||||
n, err := io.ReadFull(f, buf)
|
n, err := io.ReadFull(fup, buf)
|
||||||
if err != nil && err != io.ErrUnexpectedEOF {
|
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -163,18 +163,21 @@ func (d *S3) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return d.removeFile(obj.GetPath())
|
return d.removeFile(obj.GetPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *S3) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
uploader := s3manager.NewUploader(d.Session)
|
uploader := s3manager.NewUploader(d.Session)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if s.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = s.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false)
|
key := getKey(stdpath.Join(dstDir.GetPath(), s.GetName()), false)
|
||||||
contentType := stream.GetMimetype()
|
contentType := s.GetMimetype()
|
||||||
log.Debugln("key:", key)
|
log.Debugln("key:", key)
|
||||||
input := &s3manager.UploadInput{
|
input := &s3manager.UploadInput{
|
||||||
Bucket: &d.Bucket,
|
Bucket: &d.Bucket,
|
||||||
Key: &key,
|
Key: &key,
|
||||||
Body: stream,
|
Body: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
ContentType: &contentType,
|
ContentType: &contentType,
|
||||||
}
|
}
|
||||||
_, err := uploader.UploadWithContext(ctx, input)
|
_, err := uploader.UploadWithContext(ctx, input)
|
||||||
|
@ -3,6 +3,7 @@ package seafile
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -197,7 +198,7 @@ func (d *Seafile) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
repo, path, err := d.getRepoAndPath(dstDir.GetPath())
|
repo, path, err := d.getRepoAndPath(dstDir.GetPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -214,11 +215,16 @@ func (d *Seafile) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
|||||||
u := string(res)
|
u := string(res)
|
||||||
u = u[1 : len(u)-1] // remove quotes
|
u = u[1 : len(u)-1] // remove quotes
|
||||||
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
_, err = d.request(http.MethodPost, u, func(req *resty.Request) {
|
||||||
req.SetFileReader("file", stream.GetName(), stream).
|
r := &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
}
|
||||||
|
req.SetFileReader("file", s.GetName(), r).
|
||||||
SetFormData(map[string]string{
|
SetFormData(map[string]string{
|
||||||
"parent_dir": path,
|
"parent_dir": path,
|
||||||
"replace": "1",
|
"replace": "1",
|
||||||
})
|
}).
|
||||||
|
SetContext(ctx)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -66,11 +66,33 @@ func (d *Template) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return errs.NotImplement
|
return errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Template) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
func (d *Template) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
|
||||||
// TODO upload file, optional
|
// TODO upload file, optional
|
||||||
return nil, errs.NotImplement
|
return nil, errs.NotImplement
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Template) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) {
|
||||||
|
// TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Template) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) {
|
||||||
|
// TODO list args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Template) Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error) {
|
||||||
|
// TODO return link of file args.InnerPath in the archive obj, return errs.NotImplement to use an internal archive tool, optional
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) {
|
||||||
|
// TODO extract args.InnerPath path in the archive srcObj to the dstDir location, optional
|
||||||
|
// a folder with the same name as the archive file needs to be created to store the extracted results if args.PutIntoNewDir
|
||||||
|
// return errs.NotImplement to use an internal archive tool
|
||||||
|
return nil, errs.NotImplement
|
||||||
|
}
|
||||||
|
|
||||||
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
//func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
// return nil, errs.NotSupport
|
// return nil, errs.NotSupport
|
||||||
//}
|
//}
|
||||||
|
@ -3,6 +3,7 @@ package thunder
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -332,16 +333,16 @@ func (xc *XunLeiCommon) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
hi := stream.GetHash()
|
hi := file.GetHash()
|
||||||
gcid := hi.GetHash(hash_extend.GCID)
|
gcid := hi.GetHash(hash_extend.GCID)
|
||||||
if len(gcid) < hash_extend.GCID.Width {
|
if len(gcid) < hash_extend.GCID.Width {
|
||||||
tFile, err := stream.CacheFullInTempFile()
|
tFile, err := file.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -353,8 +354,8 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
|
|||||||
r.SetBody(&base.Json{
|
r.SetBody(&base.Json{
|
||||||
"kind": FILE,
|
"kind": FILE,
|
||||||
"parent_id": dstDir.GetID(),
|
"parent_id": dstDir.GetID(),
|
||||||
"name": stream.GetName(),
|
"name": file.GetName(),
|
||||||
"size": stream.GetSize(),
|
"size": file.GetSize(),
|
||||||
"hash": gcid,
|
"hash": gcid,
|
||||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||||
})
|
})
|
||||||
@ -375,14 +376,17 @@ func (xc *XunLeiCommon) Put(ctx context.Context, dstDir model.Obj, stream model.
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
Bucket: aws.String(param.Bucket),
|
Bucket: aws.String(param.Bucket),
|
||||||
Key: aws.String(param.Key),
|
Key: aws.String(param.Key),
|
||||||
Expires: aws.Time(param.Expiration),
|
Expires: aws.Time(param.Expiration),
|
||||||
Body: stream,
|
Body: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/errs"
|
"github.com/alist-org/alist/v3/internal/errs"
|
||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
"github.com/alist-org/alist/v3/internal/op"
|
"github.com/alist-org/alist/v3/internal/op"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"github.com/alist-org/alist/v3/pkg/utils"
|
"github.com/alist-org/alist/v3/pkg/utils"
|
||||||
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
hash_extend "github.com/alist-org/alist/v3/pkg/utils/hash"
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
@ -363,16 +364,16 @@ func (xc *XunLeiXCommon) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
hi := stream.GetHash()
|
hi := file.GetHash()
|
||||||
gcid := hi.GetHash(hash_extend.GCID)
|
gcid := hi.GetHash(hash_extend.GCID)
|
||||||
if len(gcid) < hash_extend.GCID.Width {
|
if len(gcid) < hash_extend.GCID.Width {
|
||||||
tFile, err := stream.CacheFullInTempFile()
|
tFile, err := file.CacheFullInTempFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gcid, err = utils.HashFile(hash_extend.GCID, tFile, stream.GetSize())
|
gcid, err = utils.HashFile(hash_extend.GCID, tFile, file.GetSize())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -384,8 +385,8 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
|
|||||||
r.SetBody(&base.Json{
|
r.SetBody(&base.Json{
|
||||||
"kind": FILE,
|
"kind": FILE,
|
||||||
"parent_id": dstDir.GetID(),
|
"parent_id": dstDir.GetID(),
|
||||||
"name": stream.GetName(),
|
"name": file.GetName(),
|
||||||
"size": stream.GetSize(),
|
"size": file.GetSize(),
|
||||||
"hash": gcid,
|
"hash": gcid,
|
||||||
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
"upload_type": UPLOAD_TYPE_RESUMABLE,
|
||||||
})
|
})
|
||||||
@ -406,14 +407,17 @@ func (xc *XunLeiXCommon) Put(ctx context.Context, dstDir model.Obj, stream model
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
uploader := s3manager.NewUploader(s)
|
uploader := s3manager.NewUploader(s)
|
||||||
if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
if file.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize {
|
||||||
uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1)
|
uploader.PartSize = file.GetSize() / (s3manager.MaxUploadParts - 1)
|
||||||
}
|
}
|
||||||
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
_, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||||
Bucket: aws.String(param.Bucket),
|
Bucket: aws.String(param.Bucket),
|
||||||
Key: aws.String(param.Key),
|
Key: aws.String(param.Key),
|
||||||
Expires: aws.Time(param.Expiration),
|
Expires: aws.Time(param.Expiration),
|
||||||
Body: stream,
|
Body: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: file,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -114,23 +115,18 @@ func (d *Trainbit) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
|
endpoint, _ := url.Parse("https://tb28.trainbit.com/api/upload/send_raw/")
|
||||||
query := &url.Values{}
|
query := &url.Values{}
|
||||||
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
|
query.Add("q", strings.Split(dstDir.GetID(), "_")[1])
|
||||||
query.Add("guid", guid)
|
query.Add("guid", guid)
|
||||||
query.Add("name", url.QueryEscape(local2provider(stream.GetName(), false)+"."))
|
query.Add("name", url.QueryEscape(local2provider(s.GetName(), false)+"."))
|
||||||
endpoint.RawQuery = query.Encode()
|
endpoint.RawQuery = query.Encode()
|
||||||
var total int64
|
progressReader := &stream.ReaderUpdatingProgress{
|
||||||
total = 0
|
Reader: s,
|
||||||
progressReader := &ProgressReader{
|
UpdateProgress: up,
|
||||||
stream,
|
|
||||||
func(byteNum int) {
|
|
||||||
total += int64(byteNum)
|
|
||||||
up(float64(total) / float64(stream.GetSize()) * 100)
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint.String(), progressReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -13,17 +13,6 @@ import (
|
|||||||
"github.com/alist-org/alist/v3/internal/model"
|
"github.com/alist-org/alist/v3/internal/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ProgressReader struct {
|
|
||||||
io.Reader
|
|
||||||
reporter func(byteNum int)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (progressReader *ProgressReader) Read(data []byte) (int, error) {
|
|
||||||
byteNum, err := progressReader.Reader.Read(data)
|
|
||||||
progressReader.reporter(byteNum)
|
|
||||||
return byteNum, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
|
func get(url string, apiKey string, AUSHELLPORTAL string) (*http.Response, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -3,6 +3,7 @@ package uss
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@ -122,11 +123,16 @@ func (d *USS) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *USS) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *USS) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
// TODO not support cancel??
|
|
||||||
return d.client.Put(&upyun.PutObjectConfig{
|
return d.client.Put(&upyun.PutObjectConfig{
|
||||||
Path: getKey(path.Join(dstDir.GetPath(), stream.GetName()), false),
|
Path: getKey(path.Join(dstDir.GetPath(), s.GetName()), false),
|
||||||
Reader: stream,
|
Reader: &stream.ReaderWithCtx{
|
||||||
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package webdav
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -93,13 +94,18 @@ func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return d.client.RemoveAll(getPath(obj))
|
return d.client.RemoveAll(getPath(obj))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
callback := func(r *http.Request) {
|
callback := func(r *http.Request) {
|
||||||
r.Header.Set("Content-Type", stream.GetMimetype())
|
r.Header.Set("Content-Type", s.GetMimetype())
|
||||||
r.ContentLength = stream.GetSize()
|
r.ContentLength = s.GetSize()
|
||||||
}
|
}
|
||||||
// TODO: support cancel
|
err := d.client.WriteStream(path.Join(dstDir.GetPath(), s.GetName()), &stream.ReaderWithCtx{
|
||||||
err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback)
|
Reader: &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
},
|
||||||
|
Ctx: ctx,
|
||||||
|
}, 0644, callback)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"math"
|
"math"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/alist-org/alist/v3/drivers/base"
|
"github.com/alist-org/alist/v3/drivers/base"
|
||||||
@ -311,77 +312,82 @@ func (d *WeiYun) Put(ctx context.Context, dstDir model.Obj, stream model.FileStr
|
|||||||
// NOTE:
|
// NOTE:
|
||||||
// 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??)
|
// 秒传需要sha1最后一个状态,但sha1无法逆运算需要读完整个文件(或许可以??)
|
||||||
// 服务器支持上传进度恢复,不需要额外实现
|
// 服务器支持上传进度恢复,不需要额外实现
|
||||||
if folder, ok := dstDir.(*Folder); ok {
|
var folder *Folder
|
||||||
file, err := stream.CacheFullInTempFile()
|
var ok bool
|
||||||
if err != nil {
|
if folder, ok = dstDir.(*Folder); !ok {
|
||||||
return nil, err
|
return nil, errs.NotSupport
|
||||||
}
|
}
|
||||||
|
file, err := stream.CacheFullInTempFile()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// step 1.
|
// step 1.
|
||||||
preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
|
preData, err := d.client.PreUpload(ctx, weiyunsdkgo.UpdloadFileParam{
|
||||||
PdirKey: folder.GetPKey(),
|
PdirKey: folder.GetPKey(),
|
||||||
DirKey: folder.DirKey,
|
DirKey: folder.DirKey,
|
||||||
|
|
||||||
FileName: stream.GetName(),
|
FileName: stream.GetName(),
|
||||||
FileSize: stream.GetSize(),
|
FileSize: stream.GetSize(),
|
||||||
File: file,
|
File: file,
|
||||||
|
|
||||||
ChannelCount: 4,
|
ChannelCount: 4,
|
||||||
FileExistOption: 1,
|
FileExistOption: 1,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// not fast upload
|
// not fast upload
|
||||||
if !preData.FileExist {
|
if !preData.FileExist {
|
||||||
// step.2 增加上传通道
|
// step.2 增加上传通道
|
||||||
if len(preData.ChannelList) < d.uploadThread {
|
if len(preData.ChannelList) < d.uploadThread {
|
||||||
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
|
newCh, err := d.client.AddUploadChannel(len(preData.ChannelList), d.uploadThread, preData.UploadAuthData)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
|
|
||||||
}
|
|
||||||
// step.3 上传
|
|
||||||
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
|
|
||||||
retry.Attempts(3),
|
|
||||||
retry.Delay(time.Second),
|
|
||||||
retry.DelayType(retry.BackOffDelay))
|
|
||||||
|
|
||||||
for _, channel := range preData.ChannelList {
|
|
||||||
if utils.IsCanceled(upCtx) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var channel = channel
|
|
||||||
threadG.Go(func(ctx context.Context) error {
|
|
||||||
for {
|
|
||||||
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
|
||||||
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
|
||||||
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// 上传完成
|
|
||||||
if upData.UploadState != 1 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
channel = upData.Channel
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err = threadG.Wait(); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
preData.ChannelList = append(preData.ChannelList, newCh.AddChannels...)
|
||||||
}
|
}
|
||||||
|
// step.3 上传
|
||||||
|
threadG, upCtx := errgroup.NewGroupWithContext(ctx, len(preData.ChannelList),
|
||||||
|
retry.Attempts(3),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
retry.DelayType(retry.BackOffDelay))
|
||||||
|
|
||||||
return &File{
|
total := atomic.Int64{}
|
||||||
PFolder: folder,
|
for _, channel := range preData.ChannelList {
|
||||||
File: preData.File,
|
if utils.IsCanceled(upCtx) {
|
||||||
}, nil
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var channel = channel
|
||||||
|
threadG.Go(func(ctx context.Context) error {
|
||||||
|
for {
|
||||||
|
channel.Len = int(math.Min(float64(stream.GetSize()-channel.Offset), float64(channel.Len)))
|
||||||
|
upData, err := d.client.UploadFile(upCtx, channel, preData.UploadAuthData,
|
||||||
|
io.NewSectionReader(file, channel.Offset, int64(channel.Len)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cur := total.Add(int64(channel.Len))
|
||||||
|
up(float64(cur) * 100.0 / float64(stream.GetSize()))
|
||||||
|
// 上传完成
|
||||||
|
if upData.UploadState != 1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
channel = upData.Channel
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err = threadG.Wait(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil, errs.NotSupport
|
|
||||||
|
return &File{
|
||||||
|
PFolder: folder,
|
||||||
|
File: preData.File,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
// func (d *WeiYun) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) {
|
||||||
|
@ -161,6 +161,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
|||||||
OnProgress: func(current, total int64) {
|
OnProgress: func(current, total int64) {
|
||||||
up(100 * float64(current) / float64(total))
|
up(100 * float64(current) / float64(total))
|
||||||
},
|
},
|
||||||
|
Ctx: ctx,
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package yandex_disk
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/alist-org/alist/v3/internal/stream"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -106,25 +107,30 @@ func (d *YandexDisk) Remove(ctx context.Context, obj model.Obj) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
func (d *YandexDisk) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
|
||||||
var resp UploadResp
|
var resp UploadResp
|
||||||
_, err := d.request("/upload", http.MethodGet, func(req *resty.Request) {
|
_, err := d.request("/upload", http.MethodGet, func(req *resty.Request) {
|
||||||
req.SetQueryParams(map[string]string{
|
req.SetQueryParams(map[string]string{
|
||||||
"path": path.Join(dstDir.GetPath(), stream.GetName()),
|
"path": path.Join(dstDir.GetPath(), s.GetName()),
|
||||||
"overwrite": "true",
|
"overwrite": "true",
|
||||||
})
|
})
|
||||||
}, &resp)
|
}, &resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest(resp.Method, resp.Href, stream)
|
req, err := http.NewRequestWithContext(ctx, resp.Method, resp.Href, &stream.ReaderUpdatingProgress{
|
||||||
|
Reader: s,
|
||||||
|
UpdateProgress: up,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
req = req.WithContext(ctx)
|
req.Header.Set("Content-Length", strconv.FormatInt(s.GetSize(), 10))
|
||||||
req.Header.Set("Content-Length", strconv.FormatInt(stream.GetSize(), 10))
|
|
||||||
req.Header.Set("Content-Type", "application/octet-stream")
|
req.Header.Set("Content-Type", "application/octet-stream")
|
||||||
res, err := base.HttpClient.Do(req)
|
res, err := base.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
_ = res.Body.Close()
|
_ = res.Body.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ type Remove interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Put interface {
|
type Put interface {
|
||||||
Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutURL interface {
|
type PutURL interface {
|
||||||
@ -113,7 +113,7 @@ type CopyResult interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PutResult interface {
|
type PutResult interface {
|
||||||
Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PutURLResult interface {
|
type PutURLResult interface {
|
||||||
@ -159,7 +159,7 @@ type ArchiveDecompressResult interface {
|
|||||||
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type UpdateProgress model.UpdateProgress
|
type UpdateProgress = model.UpdateProgress
|
||||||
|
|
||||||
type Progress struct {
|
type Progress struct {
|
||||||
Total int64
|
Total int64
|
||||||
|
@ -562,3 +562,17 @@ func (f *FileReadAtSeeker) Seek(offset int64, whence int) (int64, error) {
|
|||||||
func (f *FileReadAtSeeker) Close() error {
|
func (f *FileReadAtSeeker) Close() error {
|
||||||
return f.ss.Close()
|
return f.ss.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ReaderWithCtx struct {
|
||||||
|
io.Reader
|
||||||
|
Ctx context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReaderWithCtx) Read(p []byte) (n int, err error) {
|
||||||
|
select {
|
||||||
|
case <-r.Ctx.Done():
|
||||||
|
return 0, r.Ctx.Err()
|
||||||
|
default:
|
||||||
|
return r.Reader.Read(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user