feat(alias): add DownloadConcurrency
and DownloadPartSize
option (#7829)
* fix(net): goroutine logic bug (AlistGo/alist#7215) * Fix goroutine logic bug * Fix bug --------- Co-authored-by: hpy hs <hshpy.pengyu@gmail.com> * perf(net): sequential and dynamic concurrency * fix(net): incorrect error return * feat(alias): add `DownloadConcurrency` and `DownloadPartSize` option * feat(net): add `ConcurrencyLimit` * pref(net): create `chunk` on demand * refactor * refactor * fix(net): `r.Closers.Add` has no effect * refactor --------- Co-authored-by: hpy hs <hshpy.pengyu@gmail.com>
This commit is contained in:
@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -21,7 +20,7 @@ import (
|
||||
|
||||
// DefaultDownloadPartSize is the default range of bytes to get at a time when
|
||||
// using Download().
|
||||
const DefaultDownloadPartSize = 1024 * 1024 * 10
|
||||
const DefaultDownloadPartSize = utils.MB * 10
|
||||
|
||||
// DefaultDownloadConcurrency is the default number of goroutines to spin up
|
||||
// when using Download().
|
||||
@ -30,6 +29,8 @@ const DefaultDownloadConcurrency = 2
|
||||
// DefaultPartBodyMaxRetries is the default number of retries to make when a part fails to download.
|
||||
const DefaultPartBodyMaxRetries = 3
|
||||
|
||||
var DefaultConcurrencyLimit *ConcurrencyLimit
|
||||
|
||||
type Downloader struct {
|
||||
PartSize int
|
||||
|
||||
@ -44,15 +45,15 @@ type Downloader struct {
|
||||
|
||||
//RequestParam HttpRequestParams
|
||||
HttpClient HttpRequestFunc
|
||||
|
||||
*ConcurrencyLimit
|
||||
}
|
||||
type HttpRequestFunc func(ctx context.Context, params *HttpRequestParams) (*http.Response, error)
|
||||
|
||||
func NewDownloader(options ...func(*Downloader)) *Downloader {
|
||||
d := &Downloader{
|
||||
HttpClient: DefaultHttpRequestFunc,
|
||||
PartSize: DefaultDownloadPartSize,
|
||||
d := &Downloader{ //允许不设置的选项
|
||||
PartBodyMaxRetries: DefaultPartBodyMaxRetries,
|
||||
Concurrency: DefaultDownloadConcurrency,
|
||||
ConcurrencyLimit: DefaultConcurrencyLimit,
|
||||
}
|
||||
for _, option := range options {
|
||||
option(d)
|
||||
@ -74,16 +75,16 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
|
||||
impl := downloader{params: &finalP, cfg: d, ctx: ctx}
|
||||
|
||||
// Ensures we don't need nil checks later on
|
||||
|
||||
impl.partBodyMaxRetries = d.PartBodyMaxRetries
|
||||
|
||||
// 必需的选项
|
||||
if impl.cfg.Concurrency == 0 {
|
||||
impl.cfg.Concurrency = DefaultDownloadConcurrency
|
||||
}
|
||||
|
||||
if impl.cfg.PartSize == 0 {
|
||||
impl.cfg.PartSize = DefaultDownloadPartSize
|
||||
}
|
||||
if impl.cfg.HttpClient == nil {
|
||||
impl.cfg.HttpClient = DefaultHttpRequestFunc
|
||||
}
|
||||
|
||||
return impl.download()
|
||||
}
|
||||
@ -91,7 +92,7 @@ func (d Downloader) Download(ctx context.Context, p *HttpRequestParams) (readClo
|
||||
// downloader is the implementation structure used internally by Downloader.
|
||||
type downloader struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
cancel context.CancelCauseFunc
|
||||
cfg Downloader
|
||||
|
||||
params *HttpRequestParams //http request params
|
||||
@ -101,38 +102,78 @@ type downloader struct {
|
||||
m sync.Mutex
|
||||
|
||||
nextChunk int //next chunk id
|
||||
chunks []chunk
|
||||
bufs []*Buf
|
||||
//totalBytes int64
|
||||
written int64 //total bytes of file downloaded from remote
|
||||
err error
|
||||
written int64 //total bytes of file downloaded from remote
|
||||
err error
|
||||
|
||||
partBodyMaxRetries int
|
||||
concurrency int //剩余的并发数,递减。到0时停止并发
|
||||
maxPart int //有多少个分片
|
||||
pos int64
|
||||
maxPos int64
|
||||
m2 sync.Mutex
|
||||
readingID int // 正在被读取的id
|
||||
}
|
||||
|
||||
type ConcurrencyLimit struct {
|
||||
_m sync.Mutex
|
||||
Limit int // 需要大于0
|
||||
}
|
||||
|
||||
var ErrExceedMaxConcurrency = fmt.Errorf("ExceedMaxConcurrency")
|
||||
|
||||
func (l *ConcurrencyLimit) sub() error {
|
||||
l._m.Lock()
|
||||
defer l._m.Unlock()
|
||||
if l.Limit-1 < 0 {
|
||||
return ErrExceedMaxConcurrency
|
||||
}
|
||||
l.Limit--
|
||||
// log.Debugf("ConcurrencyLimit.sub: %d", l.Limit)
|
||||
return nil
|
||||
}
|
||||
func (l *ConcurrencyLimit) add() {
|
||||
l._m.Lock()
|
||||
defer l._m.Unlock()
|
||||
l.Limit++
|
||||
// log.Debugf("ConcurrencyLimit.add: %d", l.Limit)
|
||||
}
|
||||
|
||||
// 检测是否超过限制
|
||||
func (d *downloader) concurrencyCheck() error {
|
||||
if d.cfg.ConcurrencyLimit != nil {
|
||||
return d.cfg.ConcurrencyLimit.sub()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (d *downloader) concurrencyFinish() {
|
||||
if d.cfg.ConcurrencyLimit != nil {
|
||||
d.cfg.ConcurrencyLimit.add()
|
||||
}
|
||||
}
|
||||
|
||||
// download performs the implementation of the object download across ranged GETs.
|
||||
func (d *downloader) download() (io.ReadCloser, error) {
|
||||
d.ctx, d.cancel = context.WithCancel(d.ctx)
|
||||
if err := d.concurrencyCheck(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.ctx, d.cancel = context.WithCancelCause(d.ctx)
|
||||
|
||||
pos := d.params.Range.Start
|
||||
maxPos := d.params.Range.Start + d.params.Range.Length
|
||||
id := 0
|
||||
for pos < maxPos {
|
||||
finalSize := int64(d.cfg.PartSize)
|
||||
//check boundary
|
||||
if pos+finalSize > maxPos {
|
||||
finalSize = maxPos - pos
|
||||
}
|
||||
c := chunk{start: pos, size: finalSize, id: id}
|
||||
d.chunks = append(d.chunks, c)
|
||||
pos += finalSize
|
||||
id++
|
||||
maxPart := int(d.params.Range.Length / int64(d.cfg.PartSize))
|
||||
if d.params.Range.Length%int64(d.cfg.PartSize) > 0 {
|
||||
maxPart++
|
||||
}
|
||||
if len(d.chunks) < d.cfg.Concurrency {
|
||||
d.cfg.Concurrency = len(d.chunks)
|
||||
if maxPart < d.cfg.Concurrency {
|
||||
d.cfg.Concurrency = maxPart
|
||||
}
|
||||
log.Debugf("cfgConcurrency:%d", d.cfg.Concurrency)
|
||||
|
||||
if d.cfg.Concurrency == 1 {
|
||||
if d.cfg.ConcurrencyLimit != nil {
|
||||
go func() {
|
||||
<-d.ctx.Done()
|
||||
d.concurrencyFinish()
|
||||
}()
|
||||
}
|
||||
resp, err := d.cfg.HttpClient(d.ctx, d.params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -143,61 +184,114 @@ func (d *downloader) download() (io.ReadCloser, error) {
|
||||
// workers
|
||||
d.chunkChannel = make(chan chunk, d.cfg.Concurrency)
|
||||
|
||||
for i := 0; i < d.cfg.Concurrency; i++ {
|
||||
buf := NewBuf(d.ctx, d.cfg.PartSize, i)
|
||||
d.bufs = append(d.bufs, buf)
|
||||
go d.downloadPart()
|
||||
}
|
||||
// initial tasks
|
||||
for i := 0; i < d.cfg.Concurrency; i++ {
|
||||
d.sendChunkTask()
|
||||
}
|
||||
d.maxPart = maxPart
|
||||
d.pos = d.params.Range.Start
|
||||
d.maxPos = d.params.Range.Start + d.params.Range.Length
|
||||
d.concurrency = d.cfg.Concurrency
|
||||
d.sendChunkTask(true)
|
||||
|
||||
var rc io.ReadCloser = NewMultiReadCloser(d.chunks[0].buf, d.interrupt, d.finishBuf)
|
||||
var rc io.ReadCloser = NewMultiReadCloser(d.bufs[0], d.interrupt, d.finishBuf)
|
||||
|
||||
// Return error
|
||||
return rc, d.err
|
||||
}
|
||||
func (d *downloader) sendChunkTask() *chunk {
|
||||
ch := &d.chunks[d.nextChunk]
|
||||
ch.buf = d.getBuf(d.nextChunk)
|
||||
ch.buf.Reset(int(ch.size))
|
||||
d.chunkChannel <- *ch
|
||||
d.nextChunk++
|
||||
return ch
|
||||
|
||||
func (d *downloader) sendChunkTask(newConcurrency bool) error {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
isNewBuf := d.concurrency > 0
|
||||
if newConcurrency {
|
||||
if d.concurrency <= 0 {
|
||||
return nil
|
||||
}
|
||||
if d.nextChunk > 0 { // 第一个不检查,因为已经检查过了
|
||||
if err := d.concurrencyCheck(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.concurrency--
|
||||
go d.downloadPart()
|
||||
}
|
||||
|
||||
var buf *Buf
|
||||
if isNewBuf {
|
||||
buf = NewBuf(d.ctx, d.cfg.PartSize)
|
||||
d.bufs = append(d.bufs, buf)
|
||||
} else {
|
||||
buf = d.getBuf(d.nextChunk)
|
||||
}
|
||||
|
||||
if d.pos < d.maxPos {
|
||||
finalSize := int64(d.cfg.PartSize)
|
||||
switch d.nextChunk {
|
||||
case 0:
|
||||
// 最小分片在前面有助视频播放?
|
||||
firstSize := d.params.Range.Length % finalSize
|
||||
if firstSize > 0 {
|
||||
minSize := finalSize / 2
|
||||
if firstSize < minSize { // 最小分片太小就调整到一半
|
||||
finalSize = minSize
|
||||
} else {
|
||||
finalSize = firstSize
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
firstSize := d.params.Range.Length % finalSize
|
||||
minSize := finalSize / 2
|
||||
if firstSize > 0 && firstSize < minSize {
|
||||
finalSize += firstSize - minSize
|
||||
}
|
||||
}
|
||||
buf.Reset(int(finalSize))
|
||||
ch := chunk{
|
||||
start: d.pos,
|
||||
size: finalSize,
|
||||
id: d.nextChunk,
|
||||
buf: buf,
|
||||
}
|
||||
ch.newConcurrency = newConcurrency
|
||||
d.pos += finalSize
|
||||
d.nextChunk++
|
||||
d.chunkChannel <- ch
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// when the final reader Close, we interrupt
|
||||
func (d *downloader) interrupt() error {
|
||||
|
||||
d.cancel()
|
||||
if d.written != d.params.Range.Length {
|
||||
log.Debugf("Downloader interrupt before finish")
|
||||
if d.getErr() == nil {
|
||||
d.setErr(fmt.Errorf("interrupted"))
|
||||
}
|
||||
}
|
||||
d.cancel(d.err)
|
||||
defer func() {
|
||||
close(d.chunkChannel)
|
||||
for _, buf := range d.bufs {
|
||||
buf.Close()
|
||||
}
|
||||
if d.concurrency > 0 {
|
||||
d.concurrency = -d.concurrency
|
||||
}
|
||||
log.Debugf("maxConcurrency:%d", d.cfg.Concurrency+d.concurrency)
|
||||
}()
|
||||
return d.err
|
||||
}
|
||||
func (d *downloader) getBuf(id int) (b *Buf) {
|
||||
|
||||
return d.bufs[id%d.cfg.Concurrency]
|
||||
return d.bufs[id%len(d.bufs)]
|
||||
}
|
||||
func (d *downloader) finishBuf(id int) (isLast bool, buf *Buf) {
|
||||
if id >= len(d.chunks)-1 {
|
||||
func (d *downloader) finishBuf(id int) (isLast bool, nextBuf *Buf) {
|
||||
id++
|
||||
if id >= d.maxPart {
|
||||
return true, nil
|
||||
}
|
||||
if d.nextChunk > id+1 {
|
||||
return false, d.getBuf(id + 1)
|
||||
}
|
||||
ch := d.sendChunkTask()
|
||||
return false, ch.buf
|
||||
|
||||
d.sendChunkTask(false)
|
||||
|
||||
d.readingID = id
|
||||
return false, d.getBuf(id)
|
||||
}
|
||||
|
||||
// downloadPart is an individual goroutine worker reading from the ch channel
|
||||
@ -212,58 +306,119 @@ func (d *downloader) downloadPart() {
|
||||
if d.getErr() != nil {
|
||||
// Drain the channel if there is an error, to prevent deadlocking
|
||||
// of download producer.
|
||||
continue
|
||||
break
|
||||
}
|
||||
log.Debugf("downloadPart tried to get chunk")
|
||||
if err := d.downloadChunk(&c); err != nil {
|
||||
if err == errCancelConcurrency {
|
||||
break
|
||||
}
|
||||
if err == context.Canceled {
|
||||
if e := context.Cause(d.ctx); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
d.setErr(err)
|
||||
d.cancel(err)
|
||||
}
|
||||
}
|
||||
d.concurrencyFinish()
|
||||
}
|
||||
|
||||
// downloadChunk downloads the chunk
|
||||
func (d *downloader) downloadChunk(ch *chunk) error {
|
||||
log.Debugf("start new chunk %+v buffer_id =%d", ch, ch.id)
|
||||
log.Debugf("start chunk_%d, %+v", ch.id, ch)
|
||||
params := d.getParamsFromChunk(ch)
|
||||
var n int64
|
||||
var err error
|
||||
params := d.getParamsFromChunk(ch)
|
||||
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
|
||||
for retry := 0; retry <= d.cfg.PartBodyMaxRetries; retry++ {
|
||||
if d.getErr() != nil {
|
||||
return d.getErr()
|
||||
return nil
|
||||
}
|
||||
n, err = d.tryDownloadChunk(params, ch)
|
||||
if err == nil {
|
||||
d.incrWritten(n)
|
||||
log.Debugf("chunk_%d downloaded", ch.id)
|
||||
break
|
||||
}
|
||||
// Check if the returned error is an errReadingBody.
|
||||
// If err is errReadingBody this indicates that an error
|
||||
// occurred while copying the http response body.
|
||||
if d.getErr() != nil {
|
||||
return nil
|
||||
}
|
||||
if utils.IsCanceled(d.ctx) {
|
||||
return d.ctx.Err()
|
||||
}
|
||||
// Check if the returned error is an errNeedRetry.
|
||||
// If this occurs we unwrap the err to set the underlying error
|
||||
// and attempt any remaining retries.
|
||||
if bodyErr, ok := err.(*errReadingBody); ok {
|
||||
err = bodyErr.Unwrap()
|
||||
if e, ok := err.(*errNeedRetry); ok {
|
||||
err = e.Unwrap()
|
||||
if n > 0 {
|
||||
// 测试:下载时 断开 alist向云盘发起的下载连接
|
||||
// 校验:下载完后校验文件哈希值 一致
|
||||
d.incrWritten(n)
|
||||
ch.start += n
|
||||
ch.size -= n
|
||||
params.Range.Start = ch.start
|
||||
params.Range.Length = ch.size
|
||||
}
|
||||
log.Warnf("err chunk_%d, object part download error %s, retrying attempt %d. %v",
|
||||
ch.id, params.URL, retry, err)
|
||||
} else if err == errInfiniteRetry {
|
||||
retry--
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
break
|
||||
}
|
||||
|
||||
//ch.cur = 0
|
||||
|
||||
log.Debugf("object part body download interrupted %s, err, %v, retrying attempt %d",
|
||||
params.URL, err, retry)
|
||||
}
|
||||
|
||||
d.incrWritten(n)
|
||||
log.Debugf("down_%d downloaded chunk", ch.id)
|
||||
//ch.buf.buffer.wg1.Wait()
|
||||
//log.Debugf("down_%d downloaded chunk,wg wait passed", ch.id)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
var errCancelConcurrency = fmt.Errorf("cancel concurrency")
|
||||
var errInfiniteRetry = fmt.Errorf("infinite retry")
|
||||
|
||||
func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) {
|
||||
resp, err := d.cfg.HttpClient(d.ctx, params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
if resp == nil {
|
||||
return 0, err
|
||||
}
|
||||
if ch.id == 0 { //第1个任务 有限的重试,超过重试就会结束请求
|
||||
switch resp.StatusCode {
|
||||
default:
|
||||
return 0, err
|
||||
case http.StatusTooManyRequests:
|
||||
case http.StatusBadGateway:
|
||||
case http.StatusServiceUnavailable:
|
||||
case http.StatusGatewayTimeout:
|
||||
}
|
||||
<-time.After(time.Millisecond * 200)
|
||||
return 0, &errNeedRetry{err: fmt.Errorf("http request failure,status: %d", resp.StatusCode)}
|
||||
}
|
||||
|
||||
// 来到这 说明第1个分片下载 连接成功了
|
||||
// 后续分片下载出错都当超载处理
|
||||
log.Debugf("err chunk_%d, try downloading:%v", ch.id, err)
|
||||
|
||||
d.m.Lock()
|
||||
isCancelConcurrency := ch.newConcurrency
|
||||
if d.concurrency > 0 { // 取消剩余的并发任务
|
||||
// 用于计算实际的并发数
|
||||
d.concurrency = -d.concurrency
|
||||
isCancelConcurrency = true
|
||||
}
|
||||
if isCancelConcurrency {
|
||||
d.concurrency--
|
||||
d.chunkChannel <- *ch
|
||||
d.m.Unlock()
|
||||
return 0, errCancelConcurrency
|
||||
}
|
||||
d.m.Unlock()
|
||||
if ch.id != d.readingID { //正在被读取的优先重试
|
||||
d.m2.Lock()
|
||||
defer d.m2.Unlock()
|
||||
<-time.After(time.Millisecond * 200)
|
||||
}
|
||||
return 0, errInfiniteRetry
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
//only check file size on the first task
|
||||
@ -273,15 +428,15 @@ func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
d.sendChunkTask(true)
|
||||
n, err := utils.CopyWithBuffer(ch.buf, resp.Body)
|
||||
|
||||
if err != nil {
|
||||
return n, &errReadingBody{err: err}
|
||||
return n, &errNeedRetry{err: err}
|
||||
}
|
||||
if n != ch.size {
|
||||
err = fmt.Errorf("chunk download size incorrect, expected=%d, got=%d", ch.size, n)
|
||||
return n, &errReadingBody{err: err}
|
||||
return n, &errNeedRetry{err: err}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
@ -297,7 +452,7 @@ func (d *downloader) getParamsFromChunk(ch *chunk) *HttpRequestParams {
|
||||
|
||||
func (d *downloader) checkTotalBytes(resp *http.Response) error {
|
||||
var err error
|
||||
var totalBytes int64 = math.MinInt64
|
||||
totalBytes := int64(-1)
|
||||
contentRange := resp.Header.Get("Content-Range")
|
||||
if len(contentRange) == 0 {
|
||||
// ContentRange is nil when the full file contents is provided, and
|
||||
@ -329,8 +484,9 @@ func (d *downloader) checkTotalBytes(resp *http.Response) error {
|
||||
err = fmt.Errorf("expect file size=%d unmatch remote report size=%d, need refresh cache", d.params.Size, totalBytes)
|
||||
}
|
||||
if err != nil {
|
||||
_ = d.interrupt()
|
||||
// _ = d.interrupt()
|
||||
d.setErr(err)
|
||||
d.cancel(err)
|
||||
}
|
||||
return err
|
||||
|
||||
@ -369,9 +525,7 @@ type chunk struct {
|
||||
buf *Buf
|
||||
id int
|
||||
|
||||
// Downloader takes range (start,length), but this chunk is requesting equal/sub range of it.
|
||||
// To convert the writer to reader eventually, we need to write within the boundary
|
||||
//boundary http_range.Range
|
||||
newConcurrency bool
|
||||
}
|
||||
|
||||
func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*http.Response, error) {
|
||||
@ -379,7 +533,7 @@ func DefaultHttpRequestFunc(ctx context.Context, params *HttpRequestParams) (*ht
|
||||
|
||||
res, err := RequestHttp(ctx, "GET", header, params.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return res, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@ -392,15 +546,15 @@ type HttpRequestParams struct {
|
||||
//total file size
|
||||
Size int64
|
||||
}
|
||||
type errReadingBody struct {
|
||||
type errNeedRetry struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *errReadingBody) Error() string {
|
||||
return fmt.Sprintf("failed to read part body: %v", e.err)
|
||||
func (e *errNeedRetry) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e *errReadingBody) Unwrap() error {
|
||||
func (e *errNeedRetry) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
@ -438,9 +592,13 @@ func (mr MultiReadCloser) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
mr.cfg.curBuf = next
|
||||
mr.cfg.rPos++
|
||||
//current.Close()
|
||||
return n, nil
|
||||
}
|
||||
if err == context.Canceled {
|
||||
if e := context.Cause(mr.cfg.curBuf.ctx); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
func (mr MultiReadCloser) Close() error {
|
||||
@ -453,18 +611,16 @@ type Buf struct {
|
||||
ctx context.Context
|
||||
off int
|
||||
rw sync.Mutex
|
||||
//notify chan struct{}
|
||||
}
|
||||
|
||||
// NewBuf is a buffer that can have 1 read & 1 write at the same time.
|
||||
// when read is faster write, immediately feed data to read after written
|
||||
func NewBuf(ctx context.Context, maxSize int, id int) *Buf {
|
||||
func NewBuf(ctx context.Context, maxSize int) *Buf {
|
||||
d := make([]byte, 0, maxSize)
|
||||
return &Buf{
|
||||
ctx: ctx,
|
||||
buffer: bytes.NewBuffer(d),
|
||||
size: maxSize,
|
||||
//notify: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
func (br *Buf) Reset(size int) {
|
||||
@ -502,8 +658,6 @@ func (br *Buf) Read(p []byte) (n int, err error) {
|
||||
select {
|
||||
case <-br.ctx.Done():
|
||||
return 0, br.ctx.Err()
|
||||
//case <-br.notify:
|
||||
// return 0, nil
|
||||
case <-time.After(time.Millisecond * 200):
|
||||
return 0, nil
|
||||
}
|
||||
@ -516,13 +670,9 @@ func (br *Buf) Write(p []byte) (n int, err error) {
|
||||
br.rw.Lock()
|
||||
defer br.rw.Unlock()
|
||||
n, err = br.buffer.Write(p)
|
||||
select {
|
||||
//case br.notify <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (br *Buf) Close() {
|
||||
//close(br.notify)
|
||||
br.buffer.Reset()
|
||||
}
|
||||
|
Reference in New Issue
Block a user