refactor: change type of percentage to float64
This commit is contained in:
parent
7db3975b18
commit
ea9a3432ab
@ -107,7 +107,7 @@ func (d *Pan123) newUpload(ctx context.Context, upReq *UploadResp, file model.Fi
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(j * 100 / chunkCount)
|
||||
up(float64(j) * 100 / float64(chunkCount))
|
||||
}
|
||||
}
|
||||
// complete s3 upload
|
||||
|
@ -380,7 +380,7 @@ func (d *Cloud189) newUpload(ctx context.Context, dstDir model.Obj, file model.F
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(i * 100 / count))
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
fileMd5 := hex.EncodeToString(md5Sum.Sum(nil))
|
||||
sliceMd5 := fileMd5
|
||||
|
@ -513,7 +513,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / count)
|
||||
up(float64(threadG.Success()) * 100 / float64(count))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@ -676,7 +676,7 @@ func (y *Cloud189PC) FastUpload(ctx context.Context, dstDir model.Obj, file mode
|
||||
return err
|
||||
}
|
||||
|
||||
up(int(threadG.Success()) * 100 / len(uploadUrls))
|
||||
up(float64(threadG.Success()) * 100 / float64(len(uploadUrls)))
|
||||
uploadProgress.UploadParts[i] = ""
|
||||
return nil
|
||||
})
|
||||
@ -812,7 +812,7 @@ func (y *Cloud189PC) OldUpload(ctx context.Context, dstDir model.Obj, file model
|
||||
if _, err := tempFile.Seek(status.GetSize(), io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(int(status.GetSize()/file.GetSize()) * 100)
|
||||
up(float64(status.GetSize()) / float64(file.GetSize()) * 100)
|
||||
}
|
||||
|
||||
return y.OldUploadCommit(ctx, status.FileCommitUrl, status.UploadFileId)
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
@ -15,6 +14,8 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/drivers/base"
|
||||
"github.com/alist-org/alist/v3/internal/conf"
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
@ -304,7 +305,7 @@ func (d *AliDrive) Put(ctx context.Context, dstDir model.Obj, streamer model.Fil
|
||||
}
|
||||
res.Body.Close()
|
||||
if count > 0 {
|
||||
up(i * 100 / count)
|
||||
up(float64(i) * 100 / float64(count))
|
||||
}
|
||||
}
|
||||
var resp2 base.Json
|
||||
|
@ -274,7 +274,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
|
@ -329,7 +329,7 @@ func (d *BaiduPhoto) Put(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(int(threadG.Success()) * 100 / len(precreateResp.BlockList))
|
||||
up(float64(threadG.Success()) * 100 / float64(len(precreateResp.BlockList)))
|
||||
precreateResp.BlockList[i] = -1
|
||||
return nil
|
||||
})
|
||||
|
@ -203,7 +203,7 @@ func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
_ = res.Body.Close()
|
||||
|
||||
if count > 0 {
|
||||
up((i + 1) * 100 / count)
|
||||
up(float64(i+1) * 100 / float64(count))
|
||||
}
|
||||
|
||||
offset += byteSize
|
||||
|
@ -4,11 +4,12 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/pkg/http_range"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/errs"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
@ -169,7 +170,7 @@ func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up(id * 100 / u.Chunks())
|
||||
up(float64(id) * 100 / float64(u.Chunks()))
|
||||
}
|
||||
|
||||
_, err = u.Finish()
|
||||
|
@ -308,7 +308,7 @@ func (d *MoPan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("upload err,code=%d", resp.StatusCode)
|
||||
}
|
||||
up(100 * int(threadG.Success()) / len(parts))
|
||||
up(100 * float64(threadG.Success()) / float64(len(parts)))
|
||||
initUpdload.PartInfos[i] = ""
|
||||
return nil
|
||||
})
|
||||
|
@ -203,7 +203,7 @@ func (d *Onedrive) upBig(ctx context.Context, dstDir model.Obj, stream model.Fil
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(int(finish * 100 / stream.GetSize()))
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -194,7 +194,7 @@ func (d *OnedriveAPP) upBig(ctx context.Context, dstDir model.Obj, stream model.
|
||||
return errors.New(string(data))
|
||||
}
|
||||
res.Body.Close()
|
||||
up(int(finish * 100 / stream.GetSize()))
|
||||
up(float64(finish) * 100 / float64(stream.GetSize()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ func (d *QuarkOrUC) Put(ctx context.Context, dstDir model.Obj, stream model.File
|
||||
}
|
||||
md5s = append(md5s, m)
|
||||
partNumber++
|
||||
up(int(100 * (total - left) / total))
|
||||
up(100 * float64(total-left) / float64(total))
|
||||
}
|
||||
err = d.upCommit(pre, md5s)
|
||||
if err != nil {
|
||||
|
@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"io"
|
||||
"net/url"
|
||||
stdpath "path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/driver"
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
@ -104,7 +105,7 @@ func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) e
|
||||
},
|
||||
Reader: io.NopCloser(bytes.NewReader([]byte{})),
|
||||
Mimetype: "application/octet-stream",
|
||||
}, func(int) {})
|
||||
}, func(float64) {})
|
||||
}
|
||||
|
||||
func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
||||
|
@ -189,7 +189,7 @@ func (d *Teambition) chunkUpload(ctx context.Context, file model.FileStreamer, t
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up(i * 100 / newChunk.Chunks)
|
||||
up(float64(i) * 100 / float64(newChunk.Chunks))
|
||||
}
|
||||
_, err = base.RestyClient.R().SetHeader("Authorization", token).Post(
|
||||
fmt.Sprintf("https://%s.teambition.net/upload/chunk/%s",
|
||||
|
@ -213,7 +213,7 @@ func (d *Terabox) Put(ctx context.Context, dstDir model.Obj, stream model.FileSt
|
||||
}
|
||||
log.Debugln(res.String())
|
||||
if len(precreateResp.BlockList) > 0 {
|
||||
up(i * 100 / len(precreateResp.BlockList))
|
||||
up(float64(i) * 100 / float64(len(precreateResp.BlockList)))
|
||||
}
|
||||
}
|
||||
_, err = d.create(rawPath, stream.GetSize(), 0, precreateResp.Uploadid, block_list_str)
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -128,7 +127,7 @@ func (d *Trainbit) Put(ctx context.Context, dstDir model.Obj, stream model.FileS
|
||||
stream,
|
||||
func(byteNum int) {
|
||||
total += int64(byteNum)
|
||||
up(int(math.Round(float64(total) / float64(stream.GetSize()) * 100)))
|
||||
up(float64(total) / float64(stream.GetSize()) * 100)
|
||||
},
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodPost, endpoint.String(), progressReader)
|
||||
|
@ -159,7 +159,7 @@ func (d *Wopan) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre
|
||||
ContentType: stream.GetMimetype(),
|
||||
}, dstDir.GetID(), d.FamilyID, wopan.Upload2COption{
|
||||
OnProgress: func(current, total int64) {
|
||||
up(int(100 * current / total))
|
||||
up(100 * float64(current) / float64(total))
|
||||
},
|
||||
})
|
||||
return err
|
||||
|
@ -2,7 +2,6 @@ package aria2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@ -11,6 +10,8 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
@ -100,7 +101,7 @@ func (m *Monitor) Update() (bool, error) {
|
||||
downloaded = 0
|
||||
}
|
||||
progress := float64(downloaded) / float64(total) * 100
|
||||
m.tsk.SetProgress(int(progress))
|
||||
m.tsk.SetProgress(progress)
|
||||
switch info.Status {
|
||||
case "complete":
|
||||
err := m.Complete()
|
||||
|
@ -109,7 +109,7 @@ type PutResult interface {
|
||||
Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
||||
}
|
||||
|
||||
type UpdateProgress func(percentage int)
|
||||
type UpdateProgress func(percentage float64)
|
||||
|
||||
type Progress struct {
|
||||
Total int64
|
||||
@ -120,7 +120,7 @@ type Progress struct {
|
||||
func (p *Progress) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
p.Done += int64(n)
|
||||
p.up(int(float64(p.Done) / float64(p.Total) * 100))
|
||||
p.up(float64(p.Done) / float64(p.Total) * 100)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,7 @@ package offline_download
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
)
|
||||
@ -15,7 +16,7 @@ type AddUriArgs struct {
|
||||
}
|
||||
|
||||
type Status struct {
|
||||
Progress int
|
||||
Progress float64
|
||||
NewTID string
|
||||
Completed bool
|
||||
Status string
|
||||
@ -39,9 +40,10 @@ type Tool interface {
|
||||
|
||||
type File struct {
|
||||
io.ReadCloser
|
||||
Name string
|
||||
Size int64
|
||||
Path string
|
||||
Name string
|
||||
Size int64
|
||||
Path string
|
||||
Modified time.Time
|
||||
}
|
||||
|
||||
func (f *File) GetReadCloser() (io.ReadCloser, error) {
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
"github.com/alist-org/alist/v3/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
@ -134,22 +135,24 @@ func (m *Monitor) Complete() error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open file %s", file.Path)
|
||||
}
|
||||
stream := &model.FileStream{
|
||||
s := &stream.FileStream{
|
||||
Ctx: nil,
|
||||
Obj: &model.Object{
|
||||
Name: path.Base(file.Path),
|
||||
Size: file.Size,
|
||||
Modified: time.Now(),
|
||||
Modified: file.Modified,
|
||||
IsFolder: false,
|
||||
},
|
||||
ReadCloser: rc,
|
||||
Mimetype: mimetype,
|
||||
Reader: rc,
|
||||
Mimetype: mimetype,
|
||||
Closers: utils.NewClosers(rc),
|
||||
}
|
||||
relDir, err := filepath.Rel(m.tempDir, filepath.Dir(file.Path))
|
||||
if err != nil {
|
||||
log.Errorf("find relation directory error: %v", err)
|
||||
}
|
||||
newDistDir := filepath.Join(dstDirActualPath, relDir)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, stream, tsk.SetProgress)
|
||||
return op.Put(tsk.Ctx, storage, newDistDir, s, tsk.SetProgress)
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
@ -13,9 +13,10 @@ func GetFiles(dir string) ([]*File, error) {
|
||||
}
|
||||
if !info.IsDir() {
|
||||
files = append(files, &File{
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
Path: path,
|
||||
Name: info.Name(),
|
||||
Size: info.Size(),
|
||||
Path: path,
|
||||
Modified: info.ModTime(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
@ -534,7 +534,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
|
||||
}
|
||||
// if up is nil, set a default to prevent panic
|
||||
if up == nil {
|
||||
up = func(p int) {}
|
||||
up = func(p float64) {}
|
||||
}
|
||||
|
||||
switch s := storage.(type) {
|
||||
|
@ -2,13 +2,14 @@ package qbittorrent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/stream"
|
||||
|
||||
"github.com/alist-org/alist/v3/internal/model"
|
||||
"github.com/alist-org/alist/v3/internal/op"
|
||||
"github.com/alist-org/alist/v3/pkg/task"
|
||||
@ -85,7 +86,7 @@ func (m *Monitor) update() (bool, error) {
|
||||
}
|
||||
|
||||
progress := float64(info.Completed) / float64(info.Size) * 100
|
||||
m.tsk.SetProgress(int(progress))
|
||||
m.tsk.SetProgress(progress)
|
||||
switch info.State {
|
||||
case UPLOADING, PAUSEDUP, QUEUEDUP, STALLEDUP, FORCEDUP, CHECKINGUP:
|
||||
err = m.complete()
|
||||
|
@ -26,7 +26,7 @@ type Task[K comparable] struct {
|
||||
Name string
|
||||
state string // pending, running, finished, canceling, canceled, errored
|
||||
status string
|
||||
progress int
|
||||
progress float64
|
||||
|
||||
Error error
|
||||
|
||||
@ -41,11 +41,11 @@ func (t *Task[K]) SetStatus(status string) {
|
||||
t.status = status
|
||||
}
|
||||
|
||||
func (t *Task[K]) SetProgress(percentage int) {
|
||||
func (t *Task[K]) SetProgress(percentage float64) {
|
||||
t.progress = percentage
|
||||
}
|
||||
|
||||
func (t Task[K]) GetProgress() int {
|
||||
func (t Task[K]) GetProgress() float64 {
|
||||
return t.progress
|
||||
}
|
||||
|
||||
|
@ -5,10 +5,11 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/exp/constraints"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -21,7 +22,7 @@ func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
|
||||
// CopyWithCtx slightly modified function signature:
|
||||
// - context has been added in order to propagate cancellation
|
||||
// - I do not return the number of bytes written, has it is not useful in my use case
|
||||
func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, progress func(percentage int)) error {
|
||||
func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, progress func(percentage float64)) error {
|
||||
// Copy will call the Reader and Writer interface multiple time, in order
|
||||
// to copy by chunk (avoiding loading the whole file in memory).
|
||||
// I insert the ability to cancel before read time as it is the earliest
|
||||
@ -40,7 +41,7 @@ func CopyWithCtx(ctx context.Context, out io.Writer, in io.Reader, size int64, p
|
||||
n, err := in.Read(p)
|
||||
if s > 0 && (err == nil || err == io.EOF) {
|
||||
finish += int64(n)
|
||||
progress(int(finish / s))
|
||||
progress(float64(finish) / float64(s))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
@ -12,12 +12,12 @@ import (
|
||||
)
|
||||
|
||||
type TaskInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
State string `json:"state"`
|
||||
Status string `json:"status"`
|
||||
Progress int `json:"progress"`
|
||||
Error string `json:"error"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
State string `json:"state"`
|
||||
Status string `json:"status"`
|
||||
Progress float64 `json:"progress"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type K2Str[K comparable] func(k K) string
|
||||
|
Loading…
x
Reference in New Issue
Block a user