feat: Crypt driver, improve http/webdav handling (#4884)

this PR has several enhancements, fixes, and features:
- [x] Crypt: a transparent encryption driver. Anyone can easily, and safely store encrypted data on the remote storage provider.  Consider your data is safely stored in the safe, and the storage provider can only see the safe, but not your data.
  - [x] Optional: compatible with [Rclone Crypt](https://rclone.org/crypt/). More ways to manipulate the encrypted data.
  - [x] directory and filename encryption
  - [x] server-side encryption mode (server encrypts & decrypts all data, all data flows thru the server)
- [x] obfuscate sensitive information internally
- [x] introduced a server memory-cached multi-thread downloader.
  - [x] Driver: **Quark** enabled this feature, faster load in any single thread scenario. e.g. media player directly playing from the link, now it's faster.
- [x] general improvement on HTTP/WebDAV stream processing & header handling & response handling
  - [x] Driver: **Mega** driver support ranged http header
  - [x] Driver: **Quark** fix bug of not closing HTTP request to Quark server while user end has closed connection to alist

## Crypt, a transparent Encrypt/Decrypt Driver. (Rclone Crypt compatible)

e.g.  
Crypt mount path ->  /vault 
Crypt remote path -> /ali/encrypted
Aliyun mount paht -> /ali

when the user uploads a.jpg to /vault, the data will be encrypted and saved to /ali/encrypted/xxxxx. And when the user wants to access a.jpg,  it's automatically decrypted, and the user can do anything with it.
Since it's Rclone Crypt compatible, users can download /ali/encrypted/xxxxx  and decrypt it with rclone crypt tool. Or the user can mount this folder using rclone, then mount the decrypted folder in Linux...

NB.  Some breaking changes is made to make it follow global standard, e.g. processing the HTTP header properly.

close #4679 
close #4827 

Co-authored-by: Sean He <866155+seanhe26@users.noreply.github.com>
Co-authored-by: Andy Hsu <i@nn.ci>
This commit is contained in:
Sean
2023-08-02 14:40:36 +08:00
committed by GitHub
parent 1dc1dd1f07
commit 3c21a9a520
38 changed files with 2861 additions and 335 deletions

587
internal/net/request.go Normal file
View File

@ -0,0 +1,587 @@
package net
import (
"context"
"fmt"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/aws/aws-sdk-go/aws/awsutil"
log "github.com/sirupsen/logrus"
"io"
"math"
"net/http"
"strconv"
"strings"
"sync"
"time"
)
// DefaultDownloadPartSize is the default range of bytes to get at a time when
// using Download().
const DefaultDownloadPartSize = 1024 * 1024 * 10
// DefaultDownloadConcurrency is the default number of goroutines to spin up
// when using Download().
const DefaultDownloadConcurrency = 2
// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download.
const DefaultPartBodyMaxRetries = 3
type Downloader struct {
PartSize int
// PartBodyMaxRetries is the number of retry attempts to make for failed part downloads.
PartBodyMaxRetries int
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
//
// Concurrency of 1 will download the parts sequentially.
Concurrency int
//RequestParam HttpRequestParams
HttpClient HttpRequestFunc
}
type HttpRequestFunc func(params *HttpRequestParams) (*http.Response, error)
func NewDownloader(options ...func(*Downloader)) *Downloader {
d := &Downloader{
HttpClient: DefaultHttpRequestFunc,
PartSize: DefaultDownloadPartSize,
PartBodyMaxRetries: DefaultPartBodyMaxRetries,
Concurrency: DefaultDownloadConcurrency,
}
for _, option := range options {
option(d)
}
return d
}
// Download The Downloader makes multi-thread http requests to remote URL, each chunk(except last one) has PartSize,
// cache some data, then return Reader with assembled data
// Supports range, do not support unknown FileSize, and will fail if FileSize is incorrect
// memory usage is at about Concurrency*PartSize, use this wisely
func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readCloser *io.ReadCloser, err error) {
var finalP HttpRequestParams
awsutil.Copy(&finalP, p)
if finalP.Range.Length == -1 {
finalP.Range.Length = finalP.Size - finalP.Range.Start
}
impl := downloader{params: &finalP, cfg: d, ctx: ctx}
// Ensures we don't need nil checks later on
impl.partBodyMaxRetries = d.PartBodyMaxRetries
if impl.cfg.Concurrency == 0 {
impl.cfg.Concurrency = DefaultDownloadConcurrency
}
if impl.cfg.PartSize == 0 {
impl.cfg.PartSize = DefaultDownloadPartSize
}
return impl.download()
}
// downloader is the implementation structure used internally by Downloader.
type downloader struct {
ctx context.Context
cfg Downloader
params *HttpRequestParams //http request params
chunkChannel chan chunk //chunk chanel
//wg sync.WaitGroup
m sync.Mutex
nextChunk int //next chunk id
chunks []chunk
bufs []*Buf
//totalBytes int64
written int64 //total bytes of file downloaded from remote
err error
partBodyMaxRetries int
}
// download performs the implementation of the object download across ranged GETs.
func (d *downloader) download() (*io.ReadCloser, error) {
pos := d.params.Range.Start
maxPos := d.params.Range.Start + d.params.Range.Length
id := 0
for pos < maxPos {
finalSize := int64(d.cfg.PartSize)
//check boundary
if pos+finalSize > maxPos {
finalSize = maxPos - pos
}
c := chunk{start: pos, size: finalSize, id: id}
d.chunks = append(d.chunks, c)
pos += finalSize
id++
}
if len(d.chunks) < d.cfg.Concurrency {
d.cfg.Concurrency = len(d.chunks)
}
if d.cfg.Concurrency == 1 {
resp, err := d.cfg.HttpClient(d.params)
if err != nil {
return nil, err
}
return &resp.Body, nil
}
// workers
d.chunkChannel = make(chan chunk, d.cfg.Concurrency)
for i := 0; i < d.cfg.Concurrency; i++ {
buf := NewBuf(d.cfg.PartSize, i)
d.bufs = append(d.bufs, buf)
go d.downloadPart()
}
// initial tasks
for i := 0; i < d.cfg.Concurrency; i++ {
d.sendChunkTask()
}
var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf)
// Return error
return &rc, d.err
}
func (d *downloader) sendChunkTask() *chunk {
ch := &d.chunks[d.nextChunk]
ch.buf = d.getBuf(d.nextChunk)
ch.buf.Reset(int(ch.size))
d.chunkChannel <- *ch
d.nextChunk++
return ch
}
// when the final reader Close, we interrupt
func (d *downloader) interrupt() error {
if d.written != d.params.Range.Length {
log.Debugf("Downloader interrupt before finish")
if d.getErr() == nil {
d.setErr(fmt.Errorf("interrupted"))
}
}
defer func() {
close(d.chunkChannel)
for _, buf := range d.bufs {
buf.Close()
}
}()
return d.err
}
func (d *downloader) getBuf(id int) (b *Buf) {
return d.bufs[id%d.cfg.Concurrency]
}
func (d *downloader) finishBuf(id int) (isLast bool, buf *Buf) {
if id >= len(d.chunks)-1 {
return true, nil
}
if d.nextChunk > id+1 {
return false, d.getBuf(id + 1)
}
ch := d.sendChunkTask()
return false, ch.buf
}
// downloadPart is an individual goroutine worker reading from the ch channel
// and performing Http request on the data with a given byte range.
func (d *downloader) downloadPart() {
//defer d.wg.Done()
for {
c, ok := <-d.chunkChannel
log.Debugf("downloadPart tried to get chunk")
if !ok {
break
}
if d.getErr() != nil {
// Drain the channel if there is an error, to prevent deadlocking
// of download producer.
continue
}
if err := d.downloadChunk(&c); err != nil {
d.setErr(err)
}
}
}
// downloadChunk downloads the chunk
func (d *downloader) downloadChunk(ch *chunk) error {
log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.buf.buffer.id)
var n int64
var err error
params := d.getParamsFromChunk(ch)
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
if d.getErr() != nil {
return d.getErr()
}
n, err = d.tryDownloadChunk(params, ch)
if err == nil {
break
}
// Check if the returned error is an errReadingBody.
// If err is errReadingBody this indicates that an error
// occurred while copying the http response body.
// If this occurs we unwrap the err to set the underlying error
// and attempt any remaining retries.
if bodyErr, ok := err.(*errReadingBody); ok {
err = bodyErr.Unwrap()
} else {
return err
}
//ch.cur = 0
log.Debugf("object part body download interrupted %s, err, %v, retrying attempt %d",
params.URL, err, retry)
}
d.incrWritten(n)
log.Debugf("down_%d downloaded chunk", ch.id)
//ch.buf.buffer.wg1.Wait()
//log.Debugf("down_%d downloaded chunk,wg wait passed", ch.id)
return err
}
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
resp, err := d.cfg.HttpClient(params)
if err != nil {
return 0, err
}
//only check file size on the first task
if ch.id == 0 {
err = d.checkTotalBytes(resp)
if err != nil {
return 0, err
}
}
n, err := io.Copy(ch.buf, resp.Body)
if err != nil {
return n, &errReadingBody{err: err}
}
if n != ch.size {
err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n)
return n, &errReadingBody{err: err}
}
defer resp.Body.Close()
return n, nil
}
func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams {
var params HttpRequestParams
awsutil.Copy(&params, d.params)
// Get the getBuf byte range of data
params.Range = http_range.Range{Start: ch.start, Length: ch.size}
return &params
}
func (d *downloader) checkTotalBytes(resp *http.Response) error {
var err error
var totalBytes int64 = math.MinInt64
contentRange := resp.Header.Get("Content-Range")
if len(contentRange) == 0 {
// ContentRange is nil when the full file contents is provided, and
// is not chunked. Use ContentLength instead.
if resp.ContentLength > 0 {
totalBytes = resp.ContentLength
}
} else {
parts := strings.Split(contentRange, "/")
total := int64(-1)
// Checking for whether a numbered total exists
// If one does not exist, we will assume the total to be -1, undefined,
// and sequentially download each chunk until hitting a 416 error
totalStr := parts[len(parts)-1]
if totalStr != "*" {
total, err = strconv.ParseInt(totalStr, 10, 64)
if err != nil {
err = fmt.Errorf("failed extracting file size")
}
} else {
err = fmt.Errorf("file size unknown")
}
totalBytes = total
}
if totalBytes != d.params.Size && err == nil {
err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes)
}
if err != nil {
_ = d.interrupt()
d.setErr(err)
}
return err
}
func (d *downloader) incrWritten(n int64) {
d.m.Lock()
defer d.m.Unlock()
d.written += n
}
// getErr is a thread-safe getter for the error object
func (d *downloader) getErr() error {
d.m.Lock()
defer d.m.Unlock()
return d.err
}
// setErr is a thread-safe setter for the error object
func (d *downloader) setErr(e error) {
d.m.Lock()
defer d.m.Unlock()
d.err = e
}
// Chunk represents a single chunk of data to write by the worker routine.
// This structure also implements an io.SectionReader style interface for
// io.WriterAt, effectively making it an io.SectionWriter (which does not
// exist).
type chunk struct {
start int64
size int64
buf *Buf
id int
// Downloader takes range (start,length), but this chunk is requesting equal/sub range of it.
// To convert the writer to reader eventually, we need to write within the boundary
//boundary http_range.Range
}
func DefaultHttpRequestFunc(params *HttpRequestParams) (*http.Response, error) {
header := http_range.ApplyRangeToHttpHeader(params.Range, params.HeaderRef)
res, err := RequestHttp("GET", header, params.URL)
if err != nil {
return nil, err
}
return res, nil
}
type HttpRequestParams struct {
URL string
//only want data within this range
Range http_range.Range
HeaderRef *http.Header
//total file size
Size int64
}
type errReadingBody struct {
err error
}
func (e *errReadingBody) Error() string {
return fmt.Sprintf("failed to read part body: %v", e.err)
}
func (e *errReadingBody) Unwrap() error {
return e.err
}
type MultiReadCloser struct {
io.ReadCloser
//total int //total bufArr
//wPos int //current reader wPos
cfg *cfg
closer closerFunc
//getBuf getBufFunc
finish finishBufFUnc
}
type cfg struct {
rPos int //current reader position, start from 0
curBuf *Buf
}
type closerFunc func() error
type finishBufFUnc func(id int) (isLast bool, buf *Buf)
// NewMultiReadCloser to save memory, we re-use limited Buf, and feed data to Read()
func NewMultiReadCloser(buf *Buf, c closerFunc, fb finishBufFUnc) *MultiReadCloser {
return &MultiReadCloser{closer: c, finish: fb, cfg: &cfg{curBuf: buf}}
}
func (mr MultiReadCloser) Read(p []byte) (n int, err error) {
if mr.cfg.curBuf == nil {
return 0, io.EOF
}
n, err = mr.cfg.curBuf.Read(p)
//log.Debugf("read_%d read current buffer, n=%d ,err=%+v", mr.cfg.rPos, n, err)
if err == io.EOF {
log.Debugf("read_%d finished current buffer", mr.cfg.rPos)
isLast, next := mr.finish(mr.cfg.rPos)
if isLast {
return n, io.EOF
}
mr.cfg.curBuf = next
mr.cfg.rPos++
//current.Close()
return n, nil
}
return n, err
}
func (mr MultiReadCloser) Close() error {
return mr.closer()
}
type Buffer struct {
data []byte
wPos int //writer position
id int
rPos int //reader position
lock sync.Mutex
once bool //combined use with notify & lock, to get notify once
notify chan int // notifies new writes
}
func (buf *Buffer) Write(p []byte) (n int, err error) {
inSize := len(p)
if inSize == 0 {
return 0, nil
}
if inSize > len(buf.data)-buf.wPos {
return 0, fmt.Errorf("exceeding buffer max size,inSize=%d ,buf.data.len=%d , buf.wPos=%d",
inSize, len(buf.data), buf.wPos)
}
copy(buf.data[buf.wPos:], p)
buf.wPos += inSize
//give read a notice if once==true
buf.lock.Lock()
if buf.once == true {
buf.notify <- inSize //struct{}{}
}
buf.once = false
buf.lock.Unlock()
return inSize, nil
}
func (buf *Buffer) getPos() (n int) {
return buf.wPos
}
func (buf *Buffer) reset() {
buf.wPos = 0
buf.rPos = 0
}
// waitTillNewWrite notify caller that new write happens
func (buf *Buffer) waitTillNewWrite(pos int) error {
//log.Debugf("waitTillNewWrite, current wPos=%d", pos)
var err error
//defer buffer.lock.Unlock()
if pos >= len(buf.data) {
err = fmt.Errorf("there will not be any new write")
} else if pos > buf.wPos {
err = fmt.Errorf("illegal read position")
} else if pos == buf.wPos {
buf.lock.Lock()
buf.once = true
//buffer.wg1.Add(1)
buf.lock.Unlock()
//wait for write
log.Debugf("waitTillNewWrite wait for notify")
writes := <-buf.notify
log.Debugf("waitTillNewWrite got new write from notify, last writes:%+v", writes)
//if pos >= buf.wPos {
// //wrote 0 bytes
// return fmt.Errorf("write has error")
//}
return nil
}
//only case: wPos < buffer.wPos
return err
}
type Buf struct {
buffer *Buffer // Buffer we read from
size int //expected size
}
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
// when read is faster write, immediately feed data to read after written
func NewBuf(maxSize int, id int) *Buf {
d := make([]byte, maxSize)
buffer := &Buffer{data: d, id: id, notify: make(chan int)}
buffer.reset()
return &Buf{buffer: buffer, size: maxSize}
}
func (br *Buf) Reset(size int) {
br.buffer.reset()
br.size = size
}
func (br *Buf) GetId() int {
return br.buffer.id
}
func (br *Buf) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return 0, nil
}
if br.buffer.rPos == br.size {
return 0, io.EOF
}
//persist buffer position as another thread is keep increasing it
bufPos := br.buffer.getPos()
outSize := bufPos - br.buffer.rPos
if outSize == 0 {
//var wg sync.WaitGroup
err := br.waitTillNewWrite(br.buffer.rPos)
if err != nil {
return 0, err
}
bufPos = br.buffer.getPos()
outSize = bufPos - br.buffer.rPos
}
if len(p) < outSize {
// p is not big enough
outSize = len(p)
}
copy(p, br.buffer.data[br.buffer.rPos:br.buffer.rPos+outSize])
br.buffer.rPos += outSize
if br.buffer.rPos == br.size {
err = io.EOF
}
return outSize, err
}
// waitTillNewWrite is expensive, since we just checked that no new data, wait 0.2s
func (br *Buf) waitTillNewWrite(pos int) error {
time.Sleep(200 * time.Millisecond)
return br.buffer.waitTillNewWrite(br.buffer.rPos)
}
func (br *Buf) Write(p []byte) (n int, err error) {
return br.buffer.Write(p)
}
func (br *Buf) Close() {
close(br.buffer.notify)
}

View File

@ -0,0 +1,178 @@
package net
//no http range
//
import (
"bytes"
"context"
"fmt"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"io"
"io/ioutil"
"net/http"
"sync"
"testing"
)
var buf22MB = make([]byte, 1024*1024*22)
func dummyHttpRequest(data []byte, p http_range.Range) io.ReadCloser {
end := p.Start + p.Length - 1
if end >= int64(len(data)) {
end = int64(len(data))
}
bodyBytes := data[p.Start:end]
return io.NopCloser(bytes.NewReader(bodyBytes))
}
func TestDownloadOrder(t *testing.T) {
buff := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
downloader, invocations, ranges := newDownloadRangeClient(buff)
con, partSize := 3, 3
d := NewDownloader(func(d *Downloader) {
d.Concurrency = con
d.PartSize = partSize
d.HttpClient = downloader.HttpRequest
})
var start, length int64 = 2, 10
length2 := length
if length2 == -1 {
length2 = int64(len(buff)) - start
}
req := &HttpRequestParams{
Range: http_range.Range{Start: start, Length: length},
Size: int64(len(buff)),
}
readCloser, err := d.Download(context.Background(), req)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
resultBuf, err := io.ReadAll(*readCloser)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if exp, a := int(length), len(resultBuf); exp != a {
t.Errorf("expect buffer length=%d, got %d", exp, a)
}
chunkSize := int(length)/partSize + 1
if int(length)%partSize == 0 {
chunkSize--
}
if e, a := chunkSize, *invocations; e != a {
t.Errorf("expect %v API calls, got %v", e, a)
}
expectRngs := []string{"2-3", "5-3", "8-3", "11-1"}
for _, rng := range expectRngs {
if !slices.Contains(*ranges, rng) {
t.Errorf("expect range %v, but absent in return", rng)
}
}
if e, a := expectRngs, *ranges; len(e) != len(a) {
t.Errorf("expect %v ranges, got %v", e, a)
}
}
func init() {
Formatter := new(logrus.TextFormatter)
Formatter.TimestampFormat = "2006-01-02T15:04:05.999999999"
Formatter.FullTimestamp = true
Formatter.ForceColors = true
logrus.SetFormatter(Formatter)
logrus.SetLevel(logrus.DebugLevel)
logrus.Debugf("Download start")
}
func TestDownloadSingle(t *testing.T) {
buff := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
downloader, invocations, ranges := newDownloadRangeClient(buff)
con, partSize := 1, 3
d := NewDownloader(func(d *Downloader) {
d.Concurrency = con
d.PartSize = partSize
d.HttpClient = downloader.HttpRequest
})
var start, length int64 = 2, 10
req := &HttpRequestParams{
Range: http_range.Range{Start: start, Length: length},
Size: int64(len(buff)),
}
readCloser, err := d.Download(context.Background(), req)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
resultBuf, err := io.ReadAll(*readCloser)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if exp, a := int(length), len(resultBuf); exp != a {
t.Errorf("expect buffer length=%d, got %d", exp, a)
}
if e, a := 1, *invocations; e != a {
t.Errorf("expect %v API calls, got %v", e, a)
}
expectRngs := []string{"2-10"}
for _, rng := range expectRngs {
if !slices.Contains(*ranges, rng) {
t.Errorf("expect range %v, but absent in return", rng)
}
}
if e, a := expectRngs, *ranges; len(e) != len(a) {
t.Errorf("expect %v ranges, got %v", e, a)
}
}
type downloadCaptureClient struct {
mockedHttpRequest func(params *HttpRequestParams) (*http.Response, error)
GetObjectInvocations int
RetrievedRanges []string
lock sync.Mutex
}
func (c *downloadCaptureClient) HttpRequest(params *HttpRequestParams) (*http.Response, error) {
c.lock.Lock()
defer c.lock.Unlock()
c.GetObjectInvocations++
if &params.Range != nil {
c.RetrievedRanges = append(c.RetrievedRanges, fmt.Sprintf("%d-%d", params.Range.Start, params.Range.Length))
}
return c.mockedHttpRequest(params)
}
func newDownloadRangeClient(data []byte) (*downloadCaptureClient, *int, *[]string) {
capture := &downloadCaptureClient{}
capture.mockedHttpRequest = func(params *HttpRequestParams) (*http.Response, error) {
start, fin := params.Range.Start, params.Range.Start+params.Range.Length
if params.Range.Length == -1 || fin >= int64(len(data)) {
fin = int64(len(data))
}
bodyBytes := data[start:fin]
header := &http.Header{}
header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, fin-1, len(data)))
return &http.Response{
Body: ioutil.NopCloser(bytes.NewReader(bodyBytes)),
Header: *header,
ContentLength: int64(len(bodyBytes)),
}, nil
}
return capture, &capture.GetObjectInvocations, &capture.RetrievedRanges
}

252
internal/net/serve.go Normal file
View File

@ -0,0 +1,252 @@
package net
import (
"fmt"
"github.com/alist-org/alist/v3/drivers/base"
"github.com/alist-org/alist/v3/internal/conf"
"github.com/alist-org/alist/v3/internal/model"
"github.com/alist-org/alist/v3/pkg/http_range"
"github.com/alist-org/alist/v3/pkg/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"io"
"mime"
"mime/multipart"
"net/http"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
)
//this file is inspired by GO_SDK net.http.ServeContent
//type RangeReadCloser struct {
// GetReaderForRange RangeReaderFunc
//}
// ServeHTTP replies to the request using the content in the
// provided RangeReadCloser. The main benefit of ServeHTTP over io.Copy
// is that it handles Range requests properly, sets the MIME type, and
// handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
// and If-Range requests.
//
// If the response's Content-Type header is not set, ServeHTTP
// first tries to deduce the type from name's file extension and,
// if that fails, falls back to reading the first block of the content
// and passing it to DetectContentType.
// The name is otherwise unused; in particular it can be empty and is
// never sent in the response.
//
// If modtime is not the zero time or Unix epoch, ServeHTTP
// includes it in a Last-Modified header in the response. If the
// request includes an If-Modified-Since header, ServeHTTP uses
// modtime to decide whether the content needs to be sent at all.
//
// The content's RangeReadCloser method must work: ServeHTTP gives a range,
// caller will give the reader for that Range.
//
// If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
// ServeHTTP uses it to handle requests using If-Match, If-None-Match, or If-Range.
func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time.Time, size int64, RangeReaderFunc model.RangeReaderFunc) {
setLastModified(w, modTime)
done, rangeReq := checkPreconditions(w, r, modTime)
if done {
return
}
if size < 0 {
// since too many functions need file size to work,
// will not implement the support of unknown file size here
http.Error(w, "negative content size not supported", http.StatusInternalServerError)
return
}
code := http.StatusOK
// If Content-Type isn't set, use the file's extension to find it, but
// if the Content-Type is unset explicitly, do not sniff the type.
contentTypes, haveType := w.Header()["Content-Type"]
var contentType string
if !haveType {
contentType = mime.TypeByExtension(filepath.Ext(name))
if contentType == "" {
// most modern application can handle the default contentType
contentType = "application/octet-stream"
}
w.Header().Set("Content-Type", contentType)
} else if len(contentTypes) > 0 {
contentType = contentTypes[0]
}
// handle Content-Range header.
sendSize := size
var sendContent io.ReadCloser
ranges, err := http_range.ParseRange(rangeReq, size)
switch err {
case nil:
case http_range.ErrNoOverlap:
if size == 0 {
// Some clients add a Range header to all requests to
// limit the size of the response. If the file is empty,
// ignore the range header and respond with a 200 rather
// than a 416.
ranges = nil
break
}
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
fallthrough
default:
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
if sumRangesSize(ranges) > size || size < 0 {
// The total number of bytes in all the ranges is larger than the size of the file
// or unknown file size, ignore the range request.
ranges = nil
}
switch {
case len(ranges) == 0:
reader, err := RangeReaderFunc(http_range.Range{0, -1})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sendContent = reader
case len(ranges) == 1:
// RFC 7233, Section 4.1:
// "If a single part is being transferred, the server
// generating the 206 response MUST generate a
// Content-Range header field, describing what range
// of the selected representation is enclosed, and a
// payload consisting of the range.
// ...
// A server MUST NOT generate a multipart response to
// a request for a single range, since a client that
// does not request multiple parts might not support
// multipart responses."
ra := ranges[0]
sendContent, err = RangeReaderFunc(ra)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
return
}
sendSize = ra.Length
code = http.StatusPartialContent
w.Header().Set("Content-Range", ra.ContentRange(size))
case len(ranges) > 1:
sendSize, err = rangesMIMESize(ranges, contentType, size)
if err != nil {
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
}
code = http.StatusPartialContent
pr, pw := io.Pipe()
mw := multipart.NewWriter(pw)
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
sendContent = pr
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
go func() {
for _, ra := range ranges {
part, err := mw.CreatePart(ra.MimeHeader(contentType, size))
if err != nil {
pw.CloseWithError(err)
return
}
reader, err := RangeReaderFunc(ra)
if err != nil {
pw.CloseWithError(err)
return
}
if _, err := io.CopyN(part, reader, ra.Length); err != nil {
pw.CloseWithError(err)
return
}
//defer reader.Close()
}
mw.Close()
pw.Close()
}()
}
w.Header().Set("Accept-Ranges", "bytes")
if w.Header().Get("Content-Encoding") == "" {
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
}
w.WriteHeader(code)
if r.Method != "HEAD" {
written, err := io.CopyN(w, sendContent, sendSize)
if err != nil {
log.Warnf("ServeHttp error. err: %s ", err)
if written != sendSize {
log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize)
}
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
//defer sendContent.Close()
}
func ProcessHeader(origin, override *http.Header) *http.Header {
result := http.Header{}
// client header
for h, val := range *origin {
if utils.SliceContains(conf.SlicesMap[conf.ProxyIgnoreHeaders], strings.ToLower(h)) {
continue
}
result[h] = val
}
// needed header
for h, val := range *override {
result[h] = val
}
return &result
}
// RequestHttp deal with Header properly then send the request
func RequestHttp(httpMethod string, headerOverride *http.Header, URL string) (*http.Response, error) {
req, err := http.NewRequest(httpMethod, URL, nil)
if err != nil {
return nil, err
}
req.Header = *headerOverride
log.Debugln("request Header: ", req.Header)
log.Debugln("request URL: ", URL)
res, err := HttpClient().Do(req)
if err != nil {
return nil, err
}
log.Debugf("response status: %d", res.StatusCode)
log.Debugln("response Header: ", res.Header)
// TODO clean header with blocklist or passlist
res.Header.Del("set-cookie")
if res.StatusCode >= 400 {
all, _ := io.ReadAll(res.Body)
msg := string(all)
log.Debugln(msg)
return res, errors.New(msg)
}
return res, nil
}
var once sync.Once
var httpClient *http.Client
func HttpClient() *http.Client {
once.Do(func() {
httpClient = base.NewHttpClient()
httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
req.Header.Del("Referer")
return nil
}
})
return httpClient
}

339
internal/net/util.go Normal file
View File

@ -0,0 +1,339 @@
package net
import (
"fmt"
"io"
"math"
"mime/multipart"
"net/http"
"net/textproto"
"strings"
"time"
"github.com/alist-org/alist/v3/pkg/http_range"
log "github.com/sirupsen/logrus"
)
// scanETag determines if a syntactically valid ETag is present at s. If so,
// the ETag and remaining text after consuming ETag is returned. Otherwise,
// it returns "", "".
func scanETag(s string) (etag string, remain string) {
s = textproto.TrimString(s)
start := 0
if strings.HasPrefix(s, "W/") {
start = 2
}
if len(s[start:]) < 2 || s[start] != '"' {
return "", ""
}
// ETag is either W/"text" or "text".
// See RFC 7232 2.3.
for i := start + 1; i < len(s); i++ {
c := s[i]
switch {
// Character values allowed in ETags.
case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
case c == '"':
return s[:i+1], s[i+1:]
default:
return "", ""
}
}
return "", ""
}
// etagStrongMatch reports whether a and b match using strong ETag comparison.
// Assumes a and b are valid ETags.
func etagStrongMatch(a, b string) bool {
return a == b && a != "" && a[0] == '"'
}
// etagWeakMatch reports whether a and b match using weak ETag comparison.
// Assumes a and b are valid ETags.
func etagWeakMatch(a, b string) bool {
return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
}
// condResult is the result of an HTTP request precondition check.
// See https://tools.ietf.org/html/rfc7232 section 3.
type condResult int
const (
condNone condResult = iota
condTrue
condFalse
)
func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult {
im := r.Header.Get("If-Match")
if im == "" {
return condNone
}
for {
im = textproto.TrimString(im)
if len(im) == 0 {
break
}
if im[0] == ',' {
im = im[1:]
continue
}
if im[0] == '*' {
return condTrue
}
etag, remain := scanETag(im)
if etag == "" {
break
}
if etagStrongMatch(etag, w.Header().Get("Etag")) {
return condTrue
}
im = remain
}
return condFalse
}
func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult {
ius := r.Header.Get("If-Unmodified-Since")
if ius == "" || isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ius)
if err != nil {
return condNone
}
// The Last-Modified header truncates sub-second precision so
// the modtime needs to be truncated too.
modtime = modtime.Truncate(time.Second)
if ret := modtime.Compare(t); ret <= 0 {
return condTrue
}
return condFalse
}
func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult {
inm := r.Header.Get("If-None-Match")
if inm == "" {
return condNone
}
buf := inm
for {
buf = textproto.TrimString(buf)
if len(buf) == 0 {
break
}
if buf[0] == ',' {
buf = buf[1:]
continue
}
if buf[0] == '*' {
return condFalse
}
etag, remain := scanETag(buf)
if etag == "" {
break
}
if etagWeakMatch(etag, w.Header().Get("Etag")) {
return condFalse
}
buf = remain
}
return condTrue
}
func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult {
if r.Method != "GET" && r.Method != "HEAD" {
return condNone
}
ims := r.Header.Get("If-Modified-Since")
if ims == "" || isZeroTime(modtime) {
return condNone
}
t, err := http.ParseTime(ims)
if err != nil {
return condNone
}
// The Last-Modified header truncates sub-second precision so
// the modtime needs to be truncated too.
modtime = modtime.Truncate(time.Second)
if ret := modtime.Compare(t); ret <= 0 {
return condFalse
}
return condTrue
}
func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) condResult {
if r.Method != "GET" && r.Method != "HEAD" {
return condNone
}
ir := r.Header.Get("If-Range")
if ir == "" {
return condNone
}
etag, _ := scanETag(ir)
if etag != "" {
if etagStrongMatch(etag, w.Header().Get("Etag")) {
return condTrue
}
return condFalse
}
// The If-Range value is typically the ETag value, but it may also be
// the modtime date. See golang.org/issue/8367.
if modtime.IsZero() {
return condFalse
}
t, err := http.ParseTime(ir)
if err != nil {
return condFalse
}
if t.Unix() == modtime.Unix() {
return condTrue
}
return condFalse
}
var unixEpochTime = time.Unix(0, 0)
// isZeroTime reports whether t is obviously unspecified (either zero or Unix()=0).
func isZeroTime(t time.Time) bool {
return t.IsZero() || t.Equal(unixEpochTime)
}
func setLastModified(w http.ResponseWriter, modtime time.Time) {
if !isZeroTime(modtime) {
w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat))
}
}
func writeNotModified(w http.ResponseWriter) {
// RFC 7232 section 4.1:
// a sender SHOULD NOT generate representation metadata other than the
// above listed fields unless said metadata exists for the purpose of
// guiding cache updates (e.g., Last-Modified might be useful if the
// response does not have an ETag field).
h := w.Header()
delete(h, "Content-Type")
delete(h, "Content-Length")
delete(h, "Content-Encoding")
if h.Get("Etag") != "" {
delete(h, "Last-Modified")
}
w.WriteHeader(http.StatusNotModified)
}
// checkPreconditions evaluates request preconditions and reports whether a precondition
// resulted in sending StatusNotModified or StatusPreconditionFailed.
func checkPreconditions(w http.ResponseWriter, r *http.Request, modtime time.Time) (done bool, rangeHeader string) {
// This function carefully follows RFC 7232 section 6.
ch := checkIfMatch(w, r)
if ch == condNone {
ch = checkIfUnmodifiedSince(r, modtime)
}
if ch == condFalse {
w.WriteHeader(http.StatusPreconditionFailed)
return true, ""
}
switch checkIfNoneMatch(w, r) {
case condFalse:
if r.Method == "GET" || r.Method == "HEAD" {
writeNotModified(w)
return true, ""
}
w.WriteHeader(http.StatusPreconditionFailed)
return true, ""
case condNone:
if checkIfModifiedSince(r, modtime) == condFalse {
writeNotModified(w)
return true, ""
}
}
rangeHeader = r.Header.Get("Range")
if rangeHeader != "" && checkIfRange(w, r, modtime) == condFalse {
rangeHeader = ""
}
return false, rangeHeader
}
func sumRangesSize(ranges []http_range.Range) (size int64) {
for _, ra := range ranges {
size += ra.Length
}
return
}
// countingWriter counts how many bytes have been written to it.
type countingWriter int64
func (w *countingWriter) Write(p []byte) (n int, err error) {
*w += countingWriter(len(p))
return len(p), nil
}
// rangesMIMESize returns the number of bytes it takes to encode the
// provided ranges as a multipart response.
func rangesMIMESize(ranges []http_range.Range, contentType string, contentSize int64) (encSize int64, err error) {
var w countingWriter
mw := multipart.NewWriter(&w)
for _, ra := range ranges {
_, err := mw.CreatePart(ra.MimeHeader(contentType, contentSize))
if err != nil {
return 0, err
}
encSize += ra.Length
}
err = mw.Close()
if err != nil {
return 0, err
}
encSize += int64(w)
return encSize, nil
}
// LimitedReadCloser wraps a io.ReadCloser and limits the number of bytes that can be read from it.
type LimitedReadCloser struct {
rc io.ReadCloser
remaining int
}
func (l *LimitedReadCloser) Read(buf []byte) (int, error) {
if l.remaining <= 0 {
return 0, io.EOF
}
if len(buf) > l.remaining {
buf = buf[0:l.remaining]
}
n, err := l.rc.Read(buf)
l.remaining -= n
return n, err
}
func (l *LimitedReadCloser) Close() error {
return l.rc.Close()
}
// GetRangedHttpReader some http server doesn't support "Range" header,
// so this function read readCloser with whole data, skip offset, then return ReaderCloser.
func GetRangedHttpReader(readCloser io.ReadCloser, offset, length int64) (io.ReadCloser, error) {
var length_int int
if length > math.MaxInt {
return nil, fmt.Errorf("doesnot support length bigger than int32 max ")
}
length_int = int(length)
if offset > 100*1024*1024 {
log.Warnf("offset is more than 100MB, if loading data from internet, high-latency and wasting of bandwith is expected")
}
if _, err := io.Copy(io.Discard, io.LimitReader(readCloser, offset)); err != nil {
return nil, err
}
// return an io.ReadCloser that is limited to `length` bytes.
return &LimitedReadCloser{readCloser, length_int}, nil
}