feat: Crypt driver, improve http/webdav handling (#4884)

this PR has several enhancements, fixes, and features:
- [x] Crypt: a transparent encryption driver. Anyone can easily, and safely store encrypted data on the remote storage provider.  Consider your data is safely stored in the safe, and the storage provider can only see the safe, but not your data.
  - [x] Optional: compatible with [Rclone Crypt](https://rclone.org/crypt/). More ways to manipulate the encrypted data.
  - [x] directory and filename encryption
  - [x] server-side encryption mode (server encrypts & decrypts all data, all data flows thru the server)
- [x] obfuscate sensitive information internally
- [x] introduced a server memory-cached multi-thread downloader.
  - [x] Driver: **Quark** enabled this feature, faster load in any single thread scenario. e.g. media player directly playing from the link, now it's faster.
- [x] general improvement on HTTP/WebDAV stream processing & header handling & response handling
  - [x] Driver: **Mega** driver support ranged http header
  - [x] Driver: **Quark** fix bug of not closing HTTP request to Quark server while user end has closed connection to alist

## Crypt, a transparent Encrypt/Decrypt Driver. (Rclone Crypt compatible)

e.g.  
Crypt mount path ->  /vault 
Crypt remote path -> /ali/encrypted
Aliyun mount paht -> /ali

when the user uploads a.jpg to /vault, the data will be encrypted and saved to /ali/encrypted/xxxxx. And when the user wants to access a.jpg,  it's automatically decrypted, and the user can do anything with it.
Since it's Rclone Crypt compatible, users can download /ali/encrypted/xxxxx  and decrypt it with rclone crypt tool. Or the user can mount this folder using rclone, then mount the decrypted folder in Linux...

NB.  Some breaking changes is made to make it follow global standard, e.g. processing the HTTP header properly.

close #4679 
close #4827 

Co-authored-by: Sean He <866155+seanhe26@users.noreply.github.com>
Co-authored-by: Andy Hsu <i@nn.ci>
This commit is contained in:
Sean
2023-08-02 14:40:36 +08:00
committed by GitHub
parent 1dc1dd1f07
commit 3c21a9a520
38 changed files with 2861 additions and 335 deletions

View File

@ -1,21 +1,17 @@
package common
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/internal/net"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/pkg/errors"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
func HttpClient() *http.Client {
@ -36,108 +32,53 @@ var once sync.Once
var httpClient *http.Client
func Proxy(w http.ResponseWriter, r *http.Request, link *model.Link, file model.Obj) error {
// read data with native
var err error
if link.Data != nil {
defer func() {
_ = link.Data.Close()
}()
filename := file.GetName()
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)))
w.Header().Set("Content-Length", strconv.FormatInt(file.GetSize(), 10))
if link.Header != nil {
// TODO clean header with blacklist or whitelist
link.Header.Del("set-cookie")
for h, val := range link.Header {
w.Header()[h] = val
}
}
if link.Status == 0 {
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(link.Status)
}
if r.Method == http.MethodHead {
return nil
}
_, err = io.Copy(w, link.Data)
if err != nil {
return err
}
return nil
}
// local file
if link.FilePath != nil && *link.FilePath != "" {
f, err := os.Open(*link.FilePath)
if err != nil {
return err
}
defer func() {
_ = f.Close()
}()
fileStat, err := os.Stat(*link.FilePath)
if err != nil {
return err
}
if link.ReadSeekCloser != nil {
filename := file.GetName()
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)))
http.ServeContent(w, r, file.GetName(), fileStat.ModTime(), f)
http.ServeContent(w, r, file.GetName(), file.ModTime(), link.ReadSeekCloser)
defer link.ReadSeekCloser.Close()
return nil
} else if link.Writer != nil {
if link.Header != nil {
for h, v := range link.Header {
w.Header()[h] = v
} else if link.RangeReadCloser.RangeReader != nil {
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), link.RangeReadCloser.RangeReader)
defer func() {
if link.RangeReadCloser.Closer != nil {
link.RangeReadCloser.Closer.Close()
}
}()
return nil
} else if link.Concurrency != 0 || link.PartSize != 0 {
size := file.GetSize()
//var finalClosers model.Closers
header := net.ProcessHeader(&r.Header, &link.Header)
rangeReader := func(httpRange http_range.Range) (io.ReadCloser, error) {
down := net.NewDownloader(func(d *net.Downloader) {
d.Concurrency = link.Concurrency
d.PartSize = link.PartSize
})
req := &net.HttpRequestParams{
URL: link.URL,
Range: httpRange,
Size: size,
HeaderRef: header,
}
rc, err := down.Download(context.Background(), req)
return *rc, err
}
if cd := w.Header().Get("Content-Disposition"); cd == "" {
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, file.GetName(), url.PathEscape(file.GetName())))
}
if link.Status == 0 {
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(link.Status)
}
if r.Method == http.MethodHead {
return nil
}
return link.Writer(w)
net.ServeHTTP(w, r, file.GetName(), file.ModTime(), file.GetSize(), rangeReader)
return nil
} else {
req, err := http.NewRequest(r.Method, link.URL, nil)
//transparent proxy
header := net.ProcessHeader(&r.Header, &link.Header)
res, err := net.RequestHttp(r.Method, header, link.URL)
if err != nil {
return err
}
// client header
for h, val := range r.Header {
if utils.SliceContains(conf.SlicesMap[conf.ProxyIgnoreHeaders], strings.ToLower(h)) {
continue
}
req.Header[h] = val
}
// needed header
for h, val := range link.Header {
req.Header[h] = val
}
res, err := HttpClient().Do(req)
if err != nil {
return err
}
defer func() {
_ = res.Body.Close()
}()
log.Debugf("proxy status: %d", res.StatusCode)
// TODO clean header with blacklist or whitelist
res.Header.Del("set-cookie")
defer res.Body.Close()
for h, v := range res.Header {
w.Header()[h] = v
}
w.WriteHeader(res.StatusCode)
if res.StatusCode >= 400 {
all, _ := io.ReadAll(res.Body)
msg := string(all)
log.Debugln(msg)
return errors.New(msg)
}
if r.Method == http.MethodHead {
return nil
}