feat: Crypt driver, improve http/webdav handling (#4884)

this PR has several enhancements, fixes, and features:
- [x] Crypt: a transparent encryption driver. Anyone can easily, and safely store encrypted data on the remote storage provider.  Consider your data is safely stored in the safe, and the storage provider can only see the safe, but not your data.
  - [x] Optional: compatible with [Rclone Crypt](https://rclone.org/crypt/). More ways to manipulate the encrypted data.
  - [x] directory and filename encryption
  - [x] server-side encryption mode (server encrypts & decrypts all data, all data flows thru the server)
- [x] obfuscate sensitive information internally
- [x] introduced a server memory-cached multi-thread downloader.
  - [x] Driver: **Quark** enabled this feature, faster load in any single thread scenario. e.g. media player directly playing from the link, now it's faster.
- [x] general improvement on HTTP/WebDAV stream processing & header handling & response handling
  - [x] Driver: **Mega** driver support ranged http header
  - [x] Driver: **Quark** fix bug of not closing HTTP request to Quark server while user end has closed connection to alist

## Crypt, a transparent Encrypt/Decrypt Driver. (Rclone Crypt compatible)

e.g.  
Crypt mount path ->  /vault 
Crypt remote path -> /ali/encrypted
Aliyun mount paht -> /ali

when the user uploads a.jpg to /vault, the data will be encrypted and saved to /ali/encrypted/xxxxx. And when the user wants to access a.jpg,  it's automatically decrypted, and the user can do anything with it.
Since it's Rclone Crypt compatible, users can download /ali/encrypted/xxxxx  and decrypt it with rclone crypt tool. Or the user can mount this folder using rclone, then mount the decrypted folder in Linux...

NB.  Some breaking changes is made to make it follow global standard, e.g. processing the HTTP header properly.

close #4679 
close #4827 

Co-authored-by: Sean He <866155+seanhe26@users.noreply.github.com>
Co-authored-by: Andy Hsu <i@nn.ci>
This commit is contained in:
Sean
2023-08-02 14:40:36 +08:00
committed by GitHub
parent 1dc1dd1f07
commit 3c21a9a520
38 changed files with 2861 additions and 335 deletions

View File

@ -4,7 +4,11 @@ import (
"context"
"errors"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/rclone/rclone/lib/readers"
"io"
"time"
"github.com/alist-org/alist/v3/internal/driver"
"github.com/alist-org/alist/v3/internal/errs"
@ -64,51 +68,41 @@ func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) {
func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
if node, ok := file.(*MegaNode); ok {
//link, err := d.c.Link(node.Node, true)
//down, err := d.c.NewDownload(node.Node)
//if err != nil {
// return nil, err
// return nil, fmt.Errorf("open download file failed: %w", err)
//}
//return &model.Link{URL: link}, nil
down, err := d.c.NewDownload(node.Node)
if err != nil {
return nil, err
}
//u := down.GetResourceUrl()
//u = strings.Replace(u, "http", "https", 1)
//return &model.Link{URL: u}, nil
r, w := io.Pipe()
go func() {
defer func() {
_ = recover()
}()
log.Debugf("chunk size: %d", down.Chunks())
var (
chunk []byte
err error
)
for id := 0; id < down.Chunks(); id++ {
chunk, err = down.DownloadChunk(id)
if err != nil {
log.Errorf("mega down: %+v", err)
break
}
log.Debugf("id: %d,len: %d", id, len(chunk))
//_, _, err = down.ChunkLocation(id)
//if err != nil {
// log.Errorf("mega down: %+v", err)
// return
//}
//_, err = c.Write(chunk)
if _, err = w.Write(chunk); err != nil {
break
}
size := file.GetSize()
var finalClosers base.Closers
resultRangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
length := httpRange.Length
if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size {
length = -1
}
err = w.CloseWithError(err)
var down *mega.Download
err := utils.Retry(3, time.Second, func() (err error) {
down, err = d.c.NewDownload(node.Node)
return err
})
if err != nil {
log.Errorf("mega down: %+v", err)
return nil, fmt.Errorf("open download file failed: %w", err)
}
}()
return &model.Link{Data: r}, nil
oo := &openObject{
ctx: ctx,
d: down,
skip: httpRange.Start,
}
finalClosers.Add(oo)
return readers.NewLimitedReadCloser(oo, length), nil
}
resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closer: &finalClosers}
resultLink := &model.Link{
RangeReadCloser: *resultRangeReadCloser,
}
return resultLink, nil
}
return nil, fmt.Errorf("unable to convert dir to mega node")
}