Compare commits

...

23 Commits

Author SHA1 Message Date
b1e662cd34 🐛 fix onedrive slow response 2021-11-28 17:01:27 +08:00
0f0e1104a4 WebDAV auth 2021-11-28 16:18:09 +08:00
3041da35ab 🐛 fix webdav proxy link auth 2021-11-28 16:05:41 +08:00
9eab54a7c8 webdav(view only) 2021-11-28 15:10:48 +08:00
0b8d3a0a2c 🐛 fix can't get file 2021-11-28 15:10:06 +08:00
f9945a14a8 🎇 webdav unfinished 2021-11-28 00:12:04 +08:00
c39752ceb4 🎇 optimization googledrive 2021-11-27 20:40:03 +08:00
53b383d2cf 🔨 refactor driver: onedrive 2021-11-27 20:20:14 +08:00
e76fc3e616 🔨 refactor driver: native 2021-11-27 20:07:32 +08:00
eb21b87020 🔨 refactor driver: google 2021-11-27 19:52:38 +08:00
f577d82242 🔨 refactor driver: ali 2021-11-27 19:40:36 +08:00
98691b2aa8 🔨 refactor driver(123 and 189) 2021-11-27 18:55:09 +08:00
4fe6ed6c3e 🐛 fix log 2021-11-26 20:23:33 +08:00
fe73ece57d change log 2021-11-26 20:20:32 +08:00
59b8f1084a 🐛 fixed checking the parent folder when checked the password of the current folder 2021-11-25 16:24:39 +08:00
2f669ac45c 📝 update doc 2021-11-24 18:58:17 +08:00
d03d91d518 🐛 fix google drive root folder label 2021-11-24 13:34:15 +08:00
fe981f67ec 🐛 fix windows check parent 2021-11-23 16:22:23 +08:00
8cfabfd0f5 resolved #170 native driver sort 2021-11-23 16:09:42 +08:00
163ee1159e change path interface 2021-11-23 15:58:03 +08:00
e31402e94f resolved #169 2021-11-23 15:46:10 +08:00
5500980d63 🐛 fix onedrive get files 2021-11-23 15:39:09 +08:00
b1695445e0 🐛 index add write header now 2021-11-22 18:10:09 +08:00
46 changed files with 8844 additions and 1668 deletions

View File

@ -39,7 +39,7 @@
### 如何使用
- https://www.nn.ci/archives/alist.html
- https://alist-doc.nn.ci/
### License

View File

@ -52,9 +52,9 @@ func main() {
}
r := gin.Default()
server.InitApiRouter(r)
log.Info("starting server")
err := r.Run(fmt.Sprintf("%s:%d", conf.Conf.Address, conf.Conf.Port))
base := fmt.Sprintf("%s:%d", conf.Conf.Address, conf.Conf.Port)
log.Infof("start server @ %s", base)
err := r.Run(base)
if err != nil {
log.Errorf("failed to start: %s", err.Error())
}

View File

@ -17,7 +17,7 @@ func InitAccounts() {
model.RegisterAccount(account)
driver, ok := drivers.GetDriver(account.Type)
if !ok {
log.Errorf("no [%s] driver", driver)
log.Errorf("no [%s] driver", account.Type)
} else {
err := driver.Save(&accounts[i], nil)
if err != nil {

10
bootstrap/drivers.go Normal file
View File

@ -0,0 +1,10 @@
package bootstrap
import (
_ "github.com/Xhofe/alist/drivers/123pan"
_ "github.com/Xhofe/alist/drivers/189cloud"
_ "github.com/Xhofe/alist/drivers/alidrive"
_ "github.com/Xhofe/alist/drivers/googledrive"
_ "github.com/Xhofe/alist/drivers/native"
_ "github.com/Xhofe/alist/drivers/onedrive"
)

View File

@ -71,7 +71,7 @@ func InitModel() {
default:
log.Fatalf("not supported database type: %s", databaseConfig.Type)
}
log.Infof("auto migrate model")
log.Infof("auto migrate model...")
err := conf.DB.AutoMigrate(&model.SettingItem{}, &model.Account{}, &model.Meta{})
if err != nil {
log.Fatalf("failed to auto migrate")

View File

@ -141,6 +141,20 @@ func InitSettings() {
Description: "check down link password, your link will be 'https://alist.com/d/filename?pw=xxx'",
Group: model.PUBLIC,
},
{
Key: "WebDAV username",
Value: "alist",
Description: "WebDAV username",
Type: "string",
Group: model.PRIVATE,
},
{
Key: "WebDAV password",
Value: "alist",
Description: "WebDAV password",
Type: "string",
Group: model.PRIVATE,
},
}
for _, v := range settings {
_, err := model.GetSettingByKey(v.Key)

View File

@ -45,4 +45,7 @@ var (
//CustomizeScript string
//Favicon string
CheckDown bool
DavUsername string
DavPassword string
)

View File

@ -1,310 +0,0 @@
package drivers
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"path/filepath"
"strconv"
"time"
)
type Pan123 struct {
}
var pan123Client = resty.New()
func (p Pan123) Items() []Item {
return []Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "username",
Label: "username",
Type: "string",
Required: true,
Description: "account username/phone number",
},
{
Name: "password",
Label: "password",
Type: "string",
Required: true,
Description: "account password",
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: false,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,fileId,updateAt,createAt",
Required: true,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "asc,desc",
Required: true,
},
}
}
type Pan123TokenResp struct {
Code int `json:"code"`
Data struct {
Token string `json:"token"`
} `json:"data"`
Message string `json:"message"`
}
func (p Pan123) Login(account *model.Account) error {
var resp Pan123TokenResp
_, err := pan123Client.R().
SetResult(&resp).
SetBody(Json{
"passport": account.Username,
"password": account.Password,
}).Post("https://www.123pan.com/api/user/sign_in")
if err != nil {
return err
}
if resp.Code != 200 {
err = fmt.Errorf(resp.Message)
account.Status = resp.Message
} else {
account.Status = "work"
account.AccessToken = resp.Data.Token
}
_ = model.SaveAccount(account)
return err
}
func (p Pan123) Save(account *model.Account, old *model.Account) error {
if account.RootFolder == "" {
account.RootFolder = "0"
}
err := p.Login(account)
return err
}
type Pan123File struct {
FileName string `json:"FileName"`
Size int64 `json:"Size"`
UpdateAt *time.Time `json:"UpdateAt"`
FileId int64 `json:"FileId"`
Type int `json:"Type"`
Etag string `json:"Etag"`
S3KeyFlag string `json:"S3KeyFlag"`
}
func (p Pan123) FormatFile(file *Pan123File) *model.File {
f := &model.File{
Name: file.FileName,
Size: file.Size,
Driver: "123Pan",
UpdatedAt: file.UpdateAt,
}
if file.Type == 1 {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(filepath.Ext(file.FileName))
}
return f
}
type Pan123Files struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
InfoList []Pan123File `json:"InfoList"`
Next string `json:"Next"`
} `json:"data"`
}
func (p Pan123) GetFiles(parentId string, account *model.Account) ([]Pan123File, error) {
next := "0"
res := make([]Pan123File, 0)
for next != "-1" {
var resp Pan123Files
_, err := pan123Client.R().SetResult(&resp).
SetHeader("authorization", "Bearer "+account.AccessToken).
SetQueryParams(map[string]string{
"driveId": "0",
"limit": "100",
"next": next,
"orderBy": account.OrderBy,
"orderDirection": account.OrderDirection,
"parentFileId": parentId,
"trashed": "false",
}).Get("https://www.123pan.com/api/file/list")
if err != nil {
return nil, err
}
log.Debugf("%+v", resp)
if resp.Code != 0 {
if resp.Code == 401 {
err := p.Login(account)
if err != nil {
return nil, err
}
return p.GetFiles(parentId, account)
}
return nil, fmt.Errorf(resp.Message)
}
next = resp.Data.Next
res = append(res, resp.Data.InfoList...)
}
return res, nil
}
func (p Pan123) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
path = utils.ParsePath(path)
log.Debugf("pan123 path: %s", path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]Pan123File)
if len(files) != 0 {
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, p.FormatFile(&file))
}
return nil, res, nil
}
}
// no cache or len(files) == 0
fileId := account.RootFolder
if path != "/" {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err = p.Path(dir, account)
if err != nil {
return nil, nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]Pan123File)
found := false
for _, file := range parentFiles {
if file.FileName == name {
found = true
if file.Type != 1 {
url, err := p.Link(path, account)
if err != nil {
return nil, nil, err
}
f := p.FormatFile(&file)
f.Url = url
return f, nil, nil
} else {
fileId = strconv.FormatInt(file.FileId, 10)
break
}
}
}
if !found {
return nil, nil, fmt.Errorf("path not found")
}
}
files, err := p.GetFiles(fileId, account)
if err != nil {
return nil, nil, err
}
log.Debugf("%+v", files)
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), files, nil)
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, p.FormatFile(&file))
}
return nil, res, nil
}
func (p Pan123) GetFile(path string, account *model.Account) (*Pan123File, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err := p.Path(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]Pan123File)
for _, file := range parentFiles {
if file.FileName == name {
if file.Type != 1 {
return &file, err
} else {
return nil, fmt.Errorf("not file")
}
}
}
return nil, fmt.Errorf("path not found")
}
type Pan123DownResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
DownloadUrl string `json:"DownloadUrl"`
} `json:"data"`
}
func (p Pan123) Link(path string, account *model.Account) (string, error) {
file, err := p.GetFile(utils.ParsePath(path), account)
if err != nil {
return "", err
}
var resp Pan123DownResp
_, err = pan123Client.R().SetResult(&resp).SetHeader("authorization", "Bearer "+account.AccessToken).
SetBody(Json{
"driveId": 0,
"etag": file.Etag,
"fileId": file.FileId,
"fileName": file.FileName,
"s3keyFlag": file.S3KeyFlag,
"size": file.Size,
"type": file.Type,
}).Post("https://www.123pan.com/api/file/download_info")
if err != nil {
return "", err
}
if resp.Code != 0 {
if resp.Code == 401 {
err := p.Login(account)
if err != nil {
return "", err
}
return p.Link(path, account)
}
return "", fmt.Errorf(resp.Message)
}
return resp.Data.DownloadUrl, nil
}
func (p Pan123) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("origin")
}
func (p Pan123) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ Driver = (*Pan123)(nil)
func init() {
RegisterDriver("123Pan", &Pan123{})
pan123Client.SetRetryCount(3)
}

151
drivers/123pan/123pan.go Normal file
View File

@ -0,0 +1,151 @@
package _23pan
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"path/filepath"
"strconv"
"time"
)
var pan123Client = resty.New()
type Pan123TokenResp struct {
Code int `json:"code"`
Data struct {
Token string `json:"token"`
} `json:"data"`
Message string `json:"message"`
}
type Pan123File struct {
FileName string `json:"FileName"`
Size int64 `json:"Size"`
UpdateAt *time.Time `json:"UpdateAt"`
FileId int64 `json:"FileId"`
Type int `json:"Type"`
Etag string `json:"Etag"`
S3KeyFlag string `json:"S3KeyFlag"`
}
type Pan123Files struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
InfoList []Pan123File `json:"InfoList"`
Next string `json:"Next"`
} `json:"data"`
}
type Pan123DownResp struct {
Code int `json:"code"`
Message string `json:"message"`
Data struct {
DownloadUrl string `json:"DownloadUrl"`
} `json:"data"`
}
func (driver Pan123) Login(account *model.Account) error {
var resp Pan123TokenResp
_, err := pan123Client.R().
SetResult(&resp).
SetBody(drivers.Json{
"passport": account.Username,
"password": account.Password,
}).Post("https://www.123pan.com/api/user/sign_in")
if err != nil {
return err
}
if resp.Code != 200 {
err = fmt.Errorf(resp.Message)
account.Status = resp.Message
} else {
account.Status = "work"
account.AccessToken = resp.Data.Token
}
_ = model.SaveAccount(account)
return err
}
func (driver Pan123) FormatFile(file *Pan123File) *model.File {
f := &model.File{
Id: strconv.FormatInt(file.FileId, 10),
Name: file.FileName,
Size: file.Size,
Driver: driverName,
UpdatedAt: file.UpdateAt,
}
if file.Type == 1 {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(filepath.Ext(file.FileName))
}
return f
}
func (driver Pan123) GetFiles(parentId string, account *model.Account) ([]Pan123File, error) {
next := "0"
res := make([]Pan123File, 0)
for next != "-1" {
var resp Pan123Files
_, err := pan123Client.R().SetResult(&resp).
SetHeader("authorization", "Bearer "+account.AccessToken).
SetQueryParams(map[string]string{
"driveId": "0",
"limit": "100",
"next": next,
"orderBy": account.OrderBy,
"orderDirection": account.OrderDirection,
"parentFileId": parentId,
"trashed": "false",
}).Get("https://www.123pan.com/api/file/list")
if err != nil {
return nil, err
}
log.Debugf("%+v", resp)
if resp.Code != 0 {
if resp.Code == 401 {
err := driver.Login(account)
if err != nil {
return nil, err
}
return driver.GetFiles(parentId, account)
}
return nil, fmt.Errorf(resp.Message)
}
next = resp.Data.Next
res = append(res, resp.Data.InfoList...)
}
return res, nil
}
func (driver Pan123) GetFile(path string, account *model.Account) (*Pan123File, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]Pan123File)
for _, file := range parentFiles {
if file.FileName == name {
if file.Type != conf.FOLDER {
return &file, err
} else {
return nil, drivers.NotFile
}
}
}
return nil, drivers.PathNotFound
}
func init() {
drivers.RegisterDriver(driverName, &Pan123{})
pan123Client.SetRetryCount(3)
}

181
drivers/123pan/driver.go Normal file
View File

@ -0,0 +1,181 @@
package _23pan
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"path/filepath"
)
type Pan123 struct {}
var driverName = "123Pan"
func (driver Pan123) Items() []drivers.Item {
return []drivers.Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "username",
Label: "username",
Type: "string",
Required: true,
Description: "account username/phone number",
},
{
Name: "password",
Label: "password",
Type: "string",
Required: true,
Description: "account password",
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: false,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,fileId,updateAt,createAt",
Required: true,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "asc,desc",
Required: true,
},
}
}
func (driver Pan123) Save(account *model.Account, old *model.Account) error {
if account.RootFolder == "" {
account.RootFolder = "0"
}
err := driver.Login(account)
return err
}
func (driver Pan123) File(path string, account *model.Account) (*model.File, error) {
path = utils.ParsePath(path)
if path == "/" {
return &model.File{
Id: account.RootFolder,
Name: account.Name,
Size: 0,
Type: conf.FOLDER,
Driver: driverName,
UpdatedAt: account.UpdatedAt,
}, nil
}
dir, name := filepath.Split(path)
files, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == name {
return &file, nil
}
}
return nil, drivers.PathNotFound
}
func (driver Pan123) Files(path string, account *model.Account) ([]model.File, error) {
path = utils.ParsePath(path)
var rawFiles []Pan123File
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
rawFiles, _ = cache.([]Pan123File)
} else {
file, err := driver.File(path, account)
if err != nil {
return nil, err
}
rawFiles, err = driver.GetFiles(file.Id, account)
if err != nil {
return nil, err
}
if len(rawFiles) > 0 {
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), rawFiles, nil)
}
}
files := make([]model.File, 0)
for _, file := range rawFiles {
files = append(files, *driver.FormatFile(&file))
}
return files, nil
}
func (driver Pan123) Link(path string, account *model.Account) (string, error) {
file, err := driver.GetFile(utils.ParsePath(path), account)
if err != nil {
return "", err
}
var resp Pan123DownResp
_, err = pan123Client.R().SetResult(&resp).SetHeader("authorization", "Bearer "+account.AccessToken).
SetBody(drivers.Json{
"driveId": 0,
"etag": file.Etag,
"fileId": file.FileId,
"fileName": file.FileName,
"s3keyFlag": file.S3KeyFlag,
"size": file.Size,
"type": file.Type,
}).Post("https://www.123pan.com/api/file/download_info")
if err != nil {
return "", err
}
if resp.Code != 0 {
if resp.Code == 401 {
err := driver.Login(account)
if err != nil {
return "", err
}
return driver.Link(path, account)
}
return "", fmt.Errorf(resp.Message)
}
return resp.Data.DownloadUrl, nil
}
func (driver Pan123) Path(path string, account *model.Account) (*model.File, []model.File, error) {
path = utils.ParsePath(path)
log.Debugf("pan123 path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
return nil, files, nil
}
func (driver Pan123) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("origin")
}
func (driver Pan123) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ drivers.Driver = (*Pan123)(nil)

View File

@ -1,4 +1,4 @@
package drivers
package _89cloud
import (
"crypto/rand"
@ -9,9 +9,9 @@ import (
"encoding/pem"
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
mathRand "math/rand"
@ -22,76 +22,12 @@ import (
"time"
)
type Cloud189 struct {
}
var client189Map map[string]*resty.Client
func (c Cloud189) Items() []Item {
return []Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "username",
Label: "username",
Type: "string",
Required: true,
Description: "account username/phone number",
},
{
Name: "password",
Label: "password",
Type: "string",
Required: true,
Description: "account password",
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: true,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,lastOpTime,createdDate",
Required: true,
},
{
Name: "order_direction",
Label: "desc",
Type: "select",
Values: "true,false",
Required: true,
},
}
}
func (c Cloud189) Save(account *model.Account, old *model.Account) error {
if old != nil && old.Name != account.Name {
delete(client189Map, old.Name)
}
if err := c.Login(account); err != nil {
account.Status = err.Error()
_ = model.SaveAccount(account)
return err
}
account.Status = "work"
err := model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (c Cloud189) FormatFile(file *Cloud189File) *model.File {
func (driver Cloud189) FormatFile(file *Cloud189File) *model.File {
f := &model.File{
Id: strconv.FormatInt(file.Id, 10),
Name: file.Name,
Size: file.Size,
Driver: "189Cloud",
@ -113,84 +49,26 @@ func (c Cloud189) FormatFile(file *Cloud189File) *model.File {
return f
}
func (c Cloud189) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
path = utils.ParsePath(path)
log.Debugf("189 path: %s", path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]Cloud189File)
if len(files) != 0 {
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, c.FormatFile(&file))
}
return nil, res, nil
}
}
// no cache or len(files) == 0
fileId := account.RootFolder
if path != "/" {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err = c.Path(dir, account)
if err != nil {
return nil, nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]Cloud189File)
found := false
for _, file := range parentFiles {
if file.Name == name {
found = true
if file.Size != -1 {
url, err := c.Link(path, account)
if err != nil {
return nil, nil, err
}
file.Url = url
return c.FormatFile(&file), nil, nil
} else {
fileId = strconv.FormatInt(file.Id, 10)
break
}
}
}
if !found {
return nil, nil, fmt.Errorf("path not found")
}
}
files, err := c.GetFiles(fileId, account)
if err != nil {
return nil, nil, err
}
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), files, nil)
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, c.FormatFile(&file))
}
return nil, res, nil
}
func (c Cloud189) GetFile(path string, account *model.Account) (*Cloud189File, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err := c.Path(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]Cloud189File)
for _, file := range parentFiles {
if file.Name == name {
if file.Size != -1 {
return &file, err
} else {
return nil, fmt.Errorf("not file")
}
}
}
return nil, fmt.Errorf("path not found")
}
//func (c Cloud189) GetFile(path string, account *model.Account) (*Cloud189File, error) {
// dir, name := filepath.Split(path)
// dir = utils.ParsePath(dir)
// _, _, err := c.Path(dir, account)
// if err != nil {
// return nil, err
// }
// parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
// parentFiles, _ := parentFiles_.([]Cloud189File)
// for _, file := range parentFiles {
// if file.Name == name {
// if file.Size != -1 {
// return &file, err
// } else {
// return nil, NotFile
// }
// }
// }
// return nil, PathNotFound
//}
type Cloud189Down struct {
ResCode int `json:"res_code"`
@ -198,63 +76,6 @@ type Cloud189Down struct {
FileDownloadUrl string `json:"fileDownloadUrl"`
}
func (c Cloud189) Link(path string, account *model.Account) (string, error) {
file, err := c.GetFile(utils.ParsePath(path), account)
if err != nil {
return "", err
}
client, ok := client189Map[account.Name]
if !ok {
return "", fmt.Errorf("can't find [%s] client", account.Name)
}
var e Cloud189Error
var resp Cloud189Down
_, err = client.R().SetResult(&resp).SetError(&e).
SetHeader("Accept", "application/json;charset=UTF-8").
SetQueryParams(map[string]string{
"noCache": random(),
"fileId": strconv.FormatInt(file.Id, 10),
}).Get("https://cloud.189.cn/api/open/file/getFileDownloadUrl.action")
if err != nil {
return "", err
}
if e.ErrorCode != "" {
if e.ErrorCode == "InvalidSessionKey" {
err = c.Login(account)
if err != nil {
return "", err
}
return c.Link(path, account)
}
}
if resp.ResCode != 0 {
return "", fmt.Errorf(resp.ResMessage)
}
res, err := noRedirectClient.R().Get(resp.FileDownloadUrl)
if err != nil {
return "", err
}
if res.StatusCode() == 302 {
return res.Header().Get("location"), nil
}
return resp.FileDownloadUrl, nil
}
func (c Cloud189) Proxy(ctx *gin.Context, account *model.Account) {
ctx.Request.Header.Del("Origin")
}
func (c Cloud189) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ Driver = (*Cloud189)(nil)
func init() {
RegisterDriver("189Cloud", &Cloud189{})
client189Map = make(map[string]*resty.Client, 0)
}
type LoginResp struct {
Msg string `json:"msg"`
Result int `json:"result"`
@ -262,7 +83,7 @@ type LoginResp struct {
}
// Login refer to PanIndex
func (c Cloud189) Login(account *model.Account) error {
func (driver Cloud189) Login(account *model.Account) error {
client, ok := client189Map[account.Name]
if !ok {
//cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
@ -271,18 +92,25 @@ func (c Cloud189) Login(account *model.Account) error {
client.SetRetryCount(3)
}
url := "https://cloud.189.cn/api/portal/loginUrl.action?redirectURL=https%3A%2F%2Fcloud.189.cn%2Fmain.action"
res, err := client.R().Get(url)
if err != nil {
return err
}
b := res.String()
b := ""
lt := ""
ltText := regexp.MustCompile(`lt = "(.+?)"`)
ltTextArr := ltText.FindStringSubmatch(b)
if len(ltTextArr) > 0 {
lt = ltTextArr[1]
} else {
return fmt.Errorf("ltTextArr = 0")
for i := 0; i < 3; i++ {
res, err := client.R().Get(url)
if err != nil {
return err
}
b = res.String()
ltTextArr := ltText.FindStringSubmatch(b)
if len(ltTextArr) > 0 {
lt = ltTextArr[1]
break
} else {
<-time.After(time.Second)
}
}
if lt == "" {
return fmt.Errorf("get empty login page")
}
captchaToken := regexp.MustCompile(`captchaToken' value='(.+?)'`).FindStringSubmatch(b)[1]
returnUrl := regexp.MustCompile(`returnUrl = '(.+?)'`).FindStringSubmatch(b)[1]
@ -298,7 +126,7 @@ func (c Cloud189) Login(account *model.Account) error {
passwordRsa := RsaEncode([]byte(account.Password), jRsakey)
url = "https://open.e.189.cn/api/logbox/oauth2/loginSubmit.do"
var loginResp LoginResp
res, err = client.R().
res, err := client.R().
SetHeaders(map[string]string{
"lt": lt,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
@ -372,7 +200,7 @@ type Cloud189Files struct {
} `json:"fileListAO"`
}
func (c Cloud189) GetFiles(fileId string, account *model.Account) ([]Cloud189File, error) {
func (driver Cloud189) GetFiles(fileId string, account *model.Account) ([]Cloud189File, error) {
client, ok := client189Map[account.Name]
if !ok {
return nil, fmt.Errorf("can't find [%s] client", account.Name)
@ -399,11 +227,11 @@ func (c Cloud189) GetFiles(fileId string, account *model.Account) ([]Cloud189Fil
}
if e.ErrorCode != "" {
if e.ErrorCode == "InvalidSessionKey" {
err = c.Login(account)
err = driver.Login(account)
if err != nil {
return nil, err
}
return c.GetFiles(fileId, account)
return driver.GetFiles(fileId, account)
}
}
if resp.ResCode != 0 {
@ -483,3 +311,8 @@ func b64tohex(a string) string {
}
return d
}
func init() {
drivers.RegisterDriver(driverName, &Cloud189{})
client189Map = make(map[string]*resty.Client, 0)
}

203
drivers/189cloud/driver.go Normal file
View File

@ -0,0 +1,203 @@
package _89cloud
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"path/filepath"
)
type Cloud189 struct {}
var driverName = "189Cloud"
func (driver Cloud189) Items() []drivers.Item {
return []drivers.Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "username",
Label: "username",
Type: "string",
Required: true,
Description: "account username/phone number",
},
{
Name: "password",
Label: "password",
Type: "string",
Required: true,
Description: "account password",
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: true,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,lastOpTime,createdDate",
Required: true,
},
{
Name: "order_direction",
Label: "desc",
Type: "select",
Values: "true,false",
Required: true,
},
}
}
func (driver Cloud189) Save(account *model.Account, old *model.Account) error {
if old != nil && old.Name != account.Name {
delete(client189Map, old.Name)
}
if err := driver.Login(account); err != nil {
account.Status = err.Error()
_ = model.SaveAccount(account)
return err
}
account.Status = "work"
err := model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (driver Cloud189) File(path string, account *model.Account) (*model.File, error) {
path = utils.ParsePath(path)
if path == "/" {
return &model.File{
Id: account.RootFolder,
Name: account.Name,
Size: 0,
Type: conf.FOLDER,
Driver: driverName,
UpdatedAt: account.UpdatedAt,
}, nil
}
dir, name := filepath.Split(path)
files, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == name {
return &file, nil
}
}
return nil, drivers.PathNotFound
}
func (driver Cloud189) Files(path string, account *model.Account) ([]model.File, error) {
path = utils.ParsePath(path)
var rawFiles []Cloud189File
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
rawFiles, _ = cache.([]Cloud189File)
} else {
file, err := driver.File(path, account)
if err != nil {
return nil, err
}
rawFiles, err = driver.GetFiles(file.Id, account)
if err != nil {
return nil, err
}
if len(rawFiles) > 0 {
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), rawFiles, nil)
}
}
files := make([]model.File, 0)
for _, file := range rawFiles {
files = append(files, *driver.FormatFile(&file))
}
return files, nil
}
func (driver Cloud189) Link(path string, account *model.Account) (string, error) {
file, err := driver.File(utils.ParsePath(path), account)
if err != nil {
return "", err
}
if file.Type == conf.FOLDER {
return "", drivers.NotFile
}
client, ok := client189Map[account.Name]
if !ok {
return "", fmt.Errorf("can't find [%s] client", account.Name)
}
var e Cloud189Error
var resp Cloud189Down
_, err = client.R().SetResult(&resp).SetError(&e).
SetHeader("Accept", "application/json;charset=UTF-8").
SetQueryParams(map[string]string{
"noCache": random(),
"fileId": file.Id,
}).Get("https://cloud.189.cn/api/open/file/getFileDownloadUrl.action")
if err != nil {
return "", err
}
if e.ErrorCode != "" {
if e.ErrorCode == "InvalidSessionKey" {
err = driver.Login(account)
if err != nil {
return "", err
}
return driver.Link(path, account)
}
}
if resp.ResCode != 0 {
return "", fmt.Errorf(resp.ResMessage)
}
res, err := drivers.NoRedirectClient.R().Get(resp.FileDownloadUrl)
if err != nil {
return "", err
}
if res.StatusCode() == 302 {
return res.Header().Get("location"), nil
}
return resp.FileDownloadUrl, nil
}
func (driver Cloud189) Path(path string, account *model.Account) (*model.File, []model.File, error) {
path = utils.ParsePath(path)
log.Debugf("189 path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
return nil, files, nil
}
func (driver Cloud189) Proxy(ctx *gin.Context, account *model.Account) {
ctx.Request.Header.Del("Origin")
}
func (driver Cloud189) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ drivers.Driver = (*Cloud189)(nil)

View File

@ -1,394 +0,0 @@
package drivers
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
"github.com/robfig/cron/v3"
log "github.com/sirupsen/logrus"
"path/filepath"
"time"
)
var aliClient = resty.New()
func init() {
RegisterDriver("AliDrive", &AliDrive{})
aliClient.
SetRetryCount(3).
SetHeader("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36").
SetHeader("content-type", "application/json").
SetHeader("origin", "https://aliyundrive.com")
}
type AliDrive struct{}
func (a AliDrive) Preview(path string, account *model.Account) (interface{}, error) {
file, err := a.GetFile(path, account)
if err != nil {
return nil, err
}
// office
var resp Json
var e AliRespError
var url string
req := Json{
"drive_id": account.DriveId,
"file_id": file.FileId,
}
switch file.Category {
case "doc":
{
url = "https://api.aliyundrive.com/v2/file/get_office_preview_url"
req["access_token"] = account.AccessToken
}
case "video":
{
url = "https://api.aliyundrive.com/v2/file/get_video_preview_play_info"
req["category"] = "live_transcoding"
}
default:
return nil, fmt.Errorf("don't support")
}
_, err = aliClient.R().SetResult(&resp).SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(req).Post(url)
if err != nil {
return nil, err
}
if e.Code != "" {
return nil, fmt.Errorf("%s", e.Message)
}
return resp, nil
}
func (a AliDrive) Items() []Item {
return []Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,updated_at,created_at",
Required: false,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "ASC,DESC",
Required: false,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: false,
},
{
Name: "limit",
Label: "limit",
Type: "number",
Required: false,
Description: ">0 and <=200",
},
}
}
func (a AliDrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("Origin")
c.Request.Header.Set("Referer", "https://www.aliyundrive.com/")
}
type AliRespError struct {
Code string `json:"code"`
Message string `json:"message"`
}
type AliFiles struct {
Items []AliFile `json:"items"`
NextMarker string `json:"next_marker"`
}
type AliFile struct {
DriveId string `json:"drive_id"`
CreatedAt *time.Time `json:"created_at"`
FileExtension string `json:"file_extension"`
FileId string `json:"file_id"`
Type string `json:"type"`
Name string `json:"name"`
Category string `json:"category"`
ParentFileId string `json:"parent_file_id"`
UpdatedAt *time.Time `json:"updated_at"`
Size int64 `json:"size"`
Thumbnail string `json:"thumbnail"`
Url string `json:"url"`
}
func (a AliDrive) FormatFile(file *AliFile) *model.File {
f := &model.File{
Name: file.Name,
Size: file.Size,
UpdatedAt: file.UpdatedAt,
Thumbnail: file.Thumbnail,
Driver: "AliDrive",
Url: file.Url,
}
if file.Type == "folder" {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(file.FileExtension)
}
if file.Category == "video" {
f.Type = conf.VIDEO
}
if file.Category == "image" {
f.Type = conf.IMAGE
}
return f
}
func (a AliDrive) GetFiles(fileId string, account *model.Account) ([]AliFile, error) {
marker := "first"
res := make([]AliFile, 0)
for marker != "" {
if marker == "first" {
marker = ""
}
var resp AliFiles
var e AliRespError
_, err := aliClient.R().
SetResult(&resp).
SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(Json{
"drive_id": account.DriveId,
"fields": "*",
"image_thumbnail_process": "image/resize,w_400/format,jpeg",
"image_url_process": "image/resize,w_1920/format,jpeg",
"limit": account.Limit,
"marker": marker,
"order_by": account.OrderBy,
"order_direction": account.OrderDirection,
"parent_file_id": fileId,
"video_thumbnail_process": "video/snapshot,t_0,f_jpg,ar_auto,w_300",
"url_expire_sec": 14400,
}).Post("https://api.aliyundrive.com/v2/file/list")
if err != nil {
return nil, err
}
if e.Code != "" {
if e.Code == "AccessTokenInvalid" {
err = a.RefreshToken(account)
if err != nil {
return nil, err
} else {
_ = model.SaveAccount(account)
return a.GetFiles(fileId, account)
}
}
return nil, fmt.Errorf("%s", e.Message)
}
marker = resp.NextMarker
res = append(res, resp.Items...)
}
return res, nil
}
func (a AliDrive) GetFile(path string, account *model.Account) (*AliFile, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err := a.Path(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]AliFile)
for _, file := range parentFiles {
if file.Name == name {
if file.Type == "file" {
return &file, err
} else {
return nil, fmt.Errorf("not file")
}
}
}
return nil, fmt.Errorf("path not found")
}
// path: /aaa/bbb
func (a AliDrive) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
path = utils.ParsePath(path)
log.Debugf("ali path: %s", path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]AliFile)
if len(files) != 0 {
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, a.FormatFile(&file))
}
return nil, res, nil
}
}
// no cache or len(files) == 0
fileId := account.RootFolder
if path != "/" {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err = a.Path(dir, account)
if err != nil {
return nil, nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]AliFile)
found := false
for _, file := range parentFiles {
if file.Name == name {
found = true
if file.Type == "file" {
url, err := a.Link(path, account)
if err != nil {
return nil, nil, err
}
file.Url = url
return a.FormatFile(&file), nil, nil
} else {
fileId = file.FileId
break
}
}
}
if !found {
return nil, nil, fmt.Errorf("path not found")
}
}
files, err := a.GetFiles(fileId, account)
if err != nil {
return nil, nil, err
}
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), files, nil)
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, a.FormatFile(&file))
}
return nil, res, nil
}
func (a AliDrive) Link(path string, account *model.Account) (string, error) {
file, err := a.GetFile(utils.ParsePath(path), account)
if err != nil {
return "", err
}
var resp Json
var e AliRespError
_, err = aliClient.R().SetResult(&resp).
SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(Json{
"drive_id": account.DriveId,
"file_id": file.FileId,
"expire_sec": 14400,
}).Post("https://api.aliyundrive.com/v2/file/get_download_url")
if err != nil {
return "", err
}
if e.Code != "" {
if e.Code == "AccessTokenInvalid" {
err = a.RefreshToken(account)
if err != nil {
return "", err
} else {
_ = model.SaveAccount(account)
return a.Link(path, account)
}
}
return "", fmt.Errorf("%s", e.Message)
}
return resp["url"].(string), nil
}
func (a AliDrive) RefreshToken(account *model.Account) error {
url := "https://auth.aliyundrive.com/v2/account/token"
var resp TokenResp
var e AliRespError
_, err := aliClient.R().
//ForceContentType("application/json").
SetBody(Json{"refresh_token": account.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
Post(url)
if err != nil {
account.Status = err.Error()
return err
}
log.Debugf("%+v,%+v", resp, e)
if e.Code != "" {
account.Status = e.Message
return fmt.Errorf("failed to refresh token: %s", e.Message)
}else {
account.Status = "work"
}
account.RefreshToken, account.AccessToken = resp.RefreshToken, resp.AccessToken
return nil
}
func (a AliDrive) Save(account *model.Account, old *model.Account) error {
if old != nil {
conf.Cron.Remove(cron.EntryID(old.CronId))
}
if account.RootFolder == "" {
account.RootFolder = "root"
}
if account.Limit == 0 {
account.Limit = 200
}
err := a.RefreshToken(account)
if err != nil {
return err
}
var resp Json
_, _ = aliClient.R().SetResult(&resp).
SetBody("{}").
SetHeader("authorization", "Bearer\t"+account.AccessToken).
Post("https://api.aliyundrive.com/v2/user/get")
log.Debugf("user info: %+v", resp)
account.DriveId = resp["default_drive_id"].(string)
cronId, err := conf.Cron.AddFunc("@every 2h", func() {
name := account.Name
log.Debugf("ali account name: %s", name)
newAccount, ok := model.GetAccount(name)
log.Debugf("ali account: %+v", newAccount)
if !ok {
return
}
err = a.RefreshToken(&newAccount)
_ = model.SaveAccount(&newAccount)
})
if err != nil {
return err
}
account.CronId = int(cronId)
err = model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
var _ Driver = (*AliDrive)(nil)

View File

@ -0,0 +1,166 @@
package alidrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"path/filepath"
"time"
)
var aliClient = resty.New()
type AliRespError struct {
Code string `json:"code"`
Message string `json:"message"`
}
type AliFiles struct {
Items []AliFile `json:"items"`
NextMarker string `json:"next_marker"`
}
type AliFile struct {
DriveId string `json:"drive_id"`
CreatedAt *time.Time `json:"created_at"`
FileExtension string `json:"file_extension"`
FileId string `json:"file_id"`
Type string `json:"type"`
Name string `json:"name"`
Category string `json:"category"`
ParentFileId string `json:"parent_file_id"`
UpdatedAt *time.Time `json:"updated_at"`
Size int64 `json:"size"`
Thumbnail string `json:"thumbnail"`
Url string `json:"url"`
}
func (driver AliDrive) FormatFile(file *AliFile) *model.File {
f := &model.File{
Id: file.FileId,
Name: file.Name,
Size: file.Size,
UpdatedAt: file.UpdatedAt,
Thumbnail: file.Thumbnail,
Driver: driverName,
Url: file.Url,
}
if file.Type == "folder" {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(file.FileExtension)
}
if file.Category == "video" {
f.Type = conf.VIDEO
}
if file.Category == "image" {
f.Type = conf.IMAGE
}
return f
}
func (driver AliDrive) GetFiles(fileId string, account *model.Account) ([]AliFile, error) {
marker := "first"
res := make([]AliFile, 0)
for marker != "" {
if marker == "first" {
marker = ""
}
var resp AliFiles
var e AliRespError
_, err := aliClient.R().
SetResult(&resp).
SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(drivers.Json{
"drive_id": account.DriveId,
"fields": "*",
"image_thumbnail_process": "image/resize,w_400/format,jpeg",
"image_url_process": "image/resize,w_1920/format,jpeg",
"limit": account.Limit,
"marker": marker,
"order_by": account.OrderBy,
"order_direction": account.OrderDirection,
"parent_file_id": fileId,
"video_thumbnail_process": "video/snapshot,t_0,f_jpg,ar_auto,w_300",
"url_expire_sec": 14400,
}).Post("https://api.aliyundrive.com/v2/file/list")
if err != nil {
return nil, err
}
if e.Code != "" {
if e.Code == "AccessTokenInvalid" {
err = driver.RefreshToken(account)
if err != nil {
return nil, err
} else {
_ = model.SaveAccount(account)
return driver.GetFiles(fileId, account)
}
}
return nil, fmt.Errorf("%s", e.Message)
}
marker = resp.NextMarker
res = append(res, resp.Items...)
}
return res, nil
}
func (driver AliDrive) GetFile(path string, account *model.Account) (*AliFile, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]AliFile)
for _, file := range parentFiles {
if file.Name == name {
if file.Type == "file" {
return &file, err
} else {
return nil, fmt.Errorf("not file")
}
}
}
return nil, drivers.PathNotFound
}
func (driver AliDrive) RefreshToken(account *model.Account) error {
url := "https://auth.aliyundrive.com/v2/account/token"
var resp drivers.TokenResp
var e AliRespError
_, err := aliClient.R().
//ForceContentType("application/json").
SetBody(drivers.Json{"refresh_token": account.RefreshToken, "grant_type": "refresh_token"}).
SetResult(&resp).
SetError(&e).
Post(url)
if err != nil {
account.Status = err.Error()
return err
}
log.Debugf("%+v,%+v", resp, e)
if e.Code != "" {
account.Status = e.Message
return fmt.Errorf("failed to refresh token: %s", e.Message)
} else {
account.Status = "work"
}
account.RefreshToken, account.AccessToken = resp.RefreshToken, resp.AccessToken
return nil
}
func init() {
drivers.RegisterDriver(driverName, &AliDrive{})
aliClient.
SetRetryCount(3).
SetHeader("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36").
SetHeader("content-type", "application/json").
SetHeader("origin", "https://aliyundrive.com")
}

253
drivers/alidrive/driver.go Normal file
View File

@ -0,0 +1,253 @@
package alidrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/robfig/cron/v3"
log "github.com/sirupsen/logrus"
"path/filepath"
)
type AliDrive struct{}
var driverName = "AliDrive"
func (driver AliDrive) Items() []drivers.Item {
return []drivers.Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,updated_at,created_at",
Required: false,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "ASC,DESC",
Required: false,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: false,
},
{
Name: "limit",
Label: "limit",
Type: "number",
Required: false,
Description: ">0 and <=200",
},
}
}
func (driver AliDrive) Save(account *model.Account, old *model.Account) error {
if old != nil {
conf.Cron.Remove(cron.EntryID(old.CronId))
}
if account.RootFolder == "" {
account.RootFolder = "root"
}
if account.Limit == 0 {
account.Limit = 200
}
err := driver.RefreshToken(account)
if err != nil {
return err
}
var resp drivers.Json
_, _ = aliClient.R().SetResult(&resp).
SetBody("{}").
SetHeader("authorization", "Bearer\t"+account.AccessToken).
Post("https://api.aliyundrive.com/v2/user/get")
log.Debugf("user info: %+v", resp)
account.DriveId = resp["default_drive_id"].(string)
cronId, err := conf.Cron.AddFunc("@every 2h", func() {
name := account.Name
log.Debugf("ali account name: %s", name)
newAccount, ok := model.GetAccount(name)
log.Debugf("ali account: %+v", newAccount)
if !ok {
return
}
err = driver.RefreshToken(&newAccount)
_ = model.SaveAccount(&newAccount)
})
if err != nil {
return err
}
account.CronId = int(cronId)
err = model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (driver AliDrive) File(path string, account *model.Account) (*model.File, error) {
path = utils.ParsePath(path)
if path == "/" {
return &model.File{
Id: account.RootFolder,
Name: account.Name,
Size: 0,
Type: conf.FOLDER,
Driver: driverName,
UpdatedAt: account.UpdatedAt,
}, nil
}
dir, name := filepath.Split(path)
files, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == name {
return &file, nil
}
}
return nil, drivers.PathNotFound
}
func (driver AliDrive) Files(path string, account *model.Account) ([]model.File, error) {
path = utils.ParsePath(path)
var rawFiles []AliFile
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
rawFiles, _ = cache.([]AliFile)
} else {
file, err := driver.File(path, account)
if err != nil {
return nil, err
}
rawFiles, err = driver.GetFiles(file.Id, account)
if err != nil {
return nil, err
}
if len(rawFiles) > 0 {
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), rawFiles, nil)
}
}
files := make([]model.File, 0)
for _, file := range rawFiles {
files = append(files, *driver.FormatFile(&file))
}
return files, nil
}
func (driver AliDrive) Link(path string, account *model.Account) (string, error) {
file, err := driver.File(path, account)
if err != nil {
return "", err
}
var resp drivers.Json
var e AliRespError
_, err = aliClient.R().SetResult(&resp).
SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(drivers.Json{
"drive_id": account.DriveId,
"file_id": file.Id,
"expire_sec": 14400,
}).Post("https://api.aliyundrive.com/v2/file/get_download_url")
if err != nil {
return "", err
}
if e.Code != "" {
if e.Code == "AccessTokenInvalid" {
err = driver.RefreshToken(account)
if err != nil {
return "", err
} else {
_ = model.SaveAccount(account)
return driver.Link(path, account)
}
}
return "", fmt.Errorf("%s", e.Message)
}
return resp["url"].(string), nil
}
func (driver AliDrive) Path(path string, account *model.Account) (*model.File, []model.File, error) {
path = utils.ParsePath(path)
log.Debugf("ali path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
return nil, files, nil
}
func (driver AliDrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("Origin")
c.Request.Header.Set("Referer", "https://www.aliyundrive.com/")
}
func (driver AliDrive) Preview(path string, account *model.Account) (interface{}, error) {
file, err := driver.GetFile(path, account)
if err != nil {
return nil, err
}
// office
var resp drivers.Json
var e AliRespError
var url string
req := drivers.Json{
"drive_id": account.DriveId,
"file_id": file.FileId,
}
switch file.Category {
case "doc":
{
url = "https://api.aliyundrive.com/v2/file/get_office_preview_url"
req["access_token"] = account.AccessToken
}
case "video":
{
url = "https://api.aliyundrive.com/v2/file/get_video_preview_play_info"
req["category"] = "live_transcoding"
}
default:
return nil, fmt.Errorf("don't support")
}
_, err = aliClient.R().SetResult(&resp).SetError(&e).
SetHeader("authorization", "Bearer\t"+account.AccessToken).
SetBody(req).Post(url)
if err != nil {
return nil, err
}
if e.Code != "" {
return nil, fmt.Errorf("%s", e.Message)
}
return resp, nil
}
var _ drivers.Driver = (*AliDrive)(nil)

View File

@ -4,14 +4,17 @@ import (
"github.com/Xhofe/alist/model"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"net/http"
)
type Driver interface {
Items() []Item
Save(account *model.Account, old *model.Account) error
Path(path string, account *model.Account) (*model.File, []*model.File, error)
File(path string, account *model.Account) (*model.File, error)
Files(path string, account *model.Account) ([]model.File, error)
Link(path string, account *model.Account) (string, error)
Path(path string, account *model.Account) (*model.File, []model.File, error)
Proxy(c *gin.Context, account *model.Account)
Preview(path string, account *model.Account) (interface{}, error)
// TODO
@ -39,6 +42,7 @@ type TokenResp struct {
var driversMap = map[string]Driver{}
func RegisterDriver(name string, driver Driver) {
log.Infof("register driver: [%s]", name)
driversMap[name] = driver
}
@ -57,10 +61,10 @@ func GetDrivers() map[string][]Item {
type Json map[string]interface{}
var noRedirectClient *resty.Client
var NoRedirectClient *resty.Client
func init() {
noRedirectClient = resty.New().SetRedirectPolicy(
NoRedirectClient = resty.New().SetRedirectPolicy(
resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}),

8
drivers/error.go Normal file
View File

@ -0,0 +1,8 @@
package drivers
import "fmt"
var (
PathNotFound = fmt.Errorf("path not found")
NotFile = fmt.Errorf("not file")
)

View File

@ -1,290 +0,0 @@
package drivers
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"path/filepath"
"strconv"
"time"
)
type GoogleDrive struct {
}
var googleClient = resty.New()
func (g GoogleDrive) Items() []Item {
return []Item{
{
Name: "client_id",
Label: "client id",
Type: "string",
Required: true,
},
{
Name: "client_secret",
Label: "client secret",
Type: "string",
Required: true,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "root_folder",
Label: "root folder path",
Type: "string",
Required: true,
},
}
}
type GoogleTokenError struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
func (g GoogleDrive) RefreshToken(account *model.Account) error {
url := "https://www.googleapis.com/oauth2/v4/token"
var resp TokenResp
var e GoogleTokenError
_, err := googleClient.R().SetResult(&resp).SetError(&e).
SetFormData(map[string]string{
"client_id": account.ClientId,
"client_secret": account.ClientSecret,
"refresh_token": account.RefreshToken,
"grant_type": "refresh_token",
}).Post(url)
if err != nil {
return err
}
if e.Error != "" {
return fmt.Errorf(e.Error)
}
account.AccessToken = resp.AccessToken
account.Status = "work"
return nil
}
func (g GoogleDrive) Save(account *model.Account, old *model.Account) error {
account.Proxy = true
err := g.RefreshToken(account)
if err != nil {
account.Status = err.Error()
_ = model.SaveAccount(account)
return err
}
account.Status = "work"
_ = model.SaveAccount(account)
return nil
}
type GoogleFile struct {
Id string `json:"id"`
Name string `json:"name"`
MimeType string `json:"mimeType"`
ModifiedTime *time.Time `json:"modifiedTime"`
Size string `json:"size"`
}
func (g GoogleDrive) IsDir(mimeType string) bool {
return mimeType == "application/vnd.google-apps.folder" || mimeType == "application/vnd.google-apps.shortcut"
}
func (g GoogleDrive) FormatFile(file *GoogleFile) *model.File {
f := &model.File{
Name: file.Name,
Driver: "GoogleDrive",
UpdatedAt: file.ModifiedTime,
Thumbnail: "",
Url: "",
}
if g.IsDir(file.MimeType) {
f.Type = conf.FOLDER
} else {
size, _ := strconv.ParseInt(file.Size, 10, 64)
f.Size = size
f.Type = utils.GetFileType(filepath.Ext(file.Name))
}
return f
}
type GoogleFiles struct {
NextPageToken string `json:"nextPageToken"`
Files []GoogleFile `json:"files"`
}
type GoogleError struct {
Error struct {
Errors []struct {
Domain string `json:"domain"`
Reason string `json:"reason"`
Message string `json:"message"`
LocationType string `json:"location_type"`
Location string `json:"location"`
}
Code int `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
func (g GoogleDrive) GetFiles(id string, account *model.Account) ([]GoogleFile, error) {
pageToken := "first"
res := make([]GoogleFile, 0)
for pageToken != "" {
if pageToken == "first" {
pageToken = ""
}
var resp GoogleFiles
var e GoogleError
_, err := googleClient.R().SetResult(&resp).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
SetQueryParams(map[string]string{
"orderBy": "folder,name,modifiedTime desc",
"fields": "files(id,name,mimeType,size,modifiedTime),nextPageToken",
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
"includeItemsFromAllDrives": "true",
"supportsAllDrives": "true",
"pageToken": pageToken,
}).Get("https://www.googleapis.com/drive/v3/files")
if err != nil {
return nil, err
}
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = g.RefreshToken(account)
if err != nil {
_ = model.SaveAccount(account)
return nil, err
}
return g.GetFiles(id, account)
}
return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
pageToken = resp.NextPageToken
res = append(res, resp.Files...)
}
return res, nil
}
func (g GoogleDrive) GetFile(path string, account *model.Account) (*GoogleFile, error) {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err := g.Path(dir, account)
if err != nil {
return nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]GoogleFile)
for _, file := range parentFiles {
if file.Name == name {
if !g.IsDir(file.MimeType) {
return &file, err
} else {
return nil, fmt.Errorf("not file")
}
}
}
return nil, fmt.Errorf("path not found")
}
func (g GoogleDrive) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
path = utils.ParsePath(path)
log.Debugf("google path: %s", path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]GoogleFile)
if len(files) != 0 {
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, g.FormatFile(&file))
}
return nil, res, nil
}
}
// no cache or len(files) == 0
fileId := account.RootFolder
if path != "/" {
dir, name := filepath.Split(path)
dir = utils.ParsePath(dir)
_, _, err = g.Path(dir, account)
if err != nil {
return nil, nil, err
}
parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
parentFiles, _ := parentFiles_.([]GoogleFile)
found := false
for _, file := range parentFiles {
if file.Name == name {
found = true
if !g.IsDir(file.MimeType) {
return g.FormatFile(&file), nil, nil
} else {
fileId = file.Id
break
}
}
}
if !found {
return nil, nil, fmt.Errorf("path not found")
}
}
files, err := g.GetFiles(fileId, account)
if err != nil {
return nil, nil, err
}
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), files, nil)
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, g.FormatFile(&file))
}
return nil, res, nil
}
func (g GoogleDrive) Link(path string, account *model.Account) (string, error) {
file, err := g.GetFile(utils.ParsePath(path), account)
if err != nil {
return "", err
}
link := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s?includeItemsFromAllDrives=true&supportsAllDrives=true", file.Id)
var e GoogleError
_, _ = googleClient.R().SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(link)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = g.RefreshToken(account)
if err != nil {
_ = model.SaveAccount(account)
return "", err
}
return g.Link(path, account)
}
return "", fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
return link + "&alt=media", nil
}
func (g GoogleDrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Add("Authorization", "Bearer "+account.AccessToken)
}
func (g GoogleDrive) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ Driver = (*GoogleDrive)(nil)
func init() {
RegisterDriver("GoogleDrive", &GoogleDrive{})
googleClient.SetRetryCount(3)
}

View File

@ -0,0 +1,162 @@
package googledrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"path/filepath"
)
type GoogleDrive struct{}
var driverName = "GoogleDrive"
func (driver GoogleDrive) Items() []drivers.Item {
return []drivers.Item{
{
Name: "client_id",
Label: "client id",
Type: "string",
Required: true,
},
{
Name: "client_secret",
Label: "client secret",
Type: "string",
Required: true,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "root_folder",
Label: "root folder file_id",
Type: "string",
Required: true,
},
}
}
func (driver GoogleDrive) Save(account *model.Account, old *model.Account) error {
account.Proxy = true
err := driver.RefreshToken(account)
if err != nil {
account.Status = err.Error()
_ = model.SaveAccount(account)
return err
}
account.Status = "work"
_ = model.SaveAccount(account)
return nil
}
func (driver GoogleDrive) File(path string, account *model.Account) (*model.File, error) {
path = utils.ParsePath(path)
if path == "/" {
return &model.File{
Id: account.RootFolder,
Name: account.Name,
Size: 0,
Type: conf.FOLDER,
Driver: driverName,
UpdatedAt: account.UpdatedAt,
}, nil
}
dir, name := filepath.Split(path)
files, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == name {
return &file, nil
}
}
return nil, drivers.PathNotFound
}
func (driver GoogleDrive) Files(path string, account *model.Account) ([]model.File, error) {
path = utils.ParsePath(path)
var rawFiles []GoogleFile
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
rawFiles, _ = cache.([]GoogleFile)
} else {
file, err := driver.File(path, account)
if err != nil {
return nil, err
}
rawFiles, err = driver.GetFiles(file.Id, account)
if err != nil {
return nil, err
}
if len(rawFiles) > 0 {
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), rawFiles, nil)
}
}
files := make([]model.File, 0)
for _, file := range rawFiles {
files = append(files, *driver.FormatFile(&file))
}
return files, nil
}
func (driver GoogleDrive) Link(path string, account *model.Account) (string, error) {
file, err := driver.File(path, account)
if err != nil {
return "", err
}
if file.Type == conf.FOLDER {
return "", drivers.NotFile
}
link := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s?includeItemsFromAllDrives=true&supportsAllDrives=true", file.Id)
var e GoogleError
_, _ = googleClient.R().SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(link)
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = driver.RefreshToken(account)
if err != nil {
_ = model.SaveAccount(account)
return "", err
}
return driver.Link(path, account)
}
return "", fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
return link + "&alt=media", nil
}
func (driver GoogleDrive) Path(path string, account *model.Account) (*model.File, []model.File, error) {
path = utils.ParsePath(path)
log.Debugf("google path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
//file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
return nil, files, nil
}
func (driver GoogleDrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Add("Authorization", "Bearer "+account.AccessToken)
}
func (driver GoogleDrive) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}

View File

@ -0,0 +1,160 @@
package googledrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/go-resty/resty/v2"
"path/filepath"
"strconv"
"time"
)
var googleClient = resty.New()
type GoogleTokenError struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
func (driver GoogleDrive) RefreshToken(account *model.Account) error {
url := "https://www.googleapis.com/oauth2/v4/token"
var resp drivers.TokenResp
var e GoogleTokenError
_, err := googleClient.R().SetResult(&resp).SetError(&e).
SetFormData(map[string]string{
"client_id": account.ClientId,
"client_secret": account.ClientSecret,
"refresh_token": account.RefreshToken,
"grant_type": "refresh_token",
}).Post(url)
if err != nil {
return err
}
if e.Error != "" {
return fmt.Errorf(e.Error)
}
account.AccessToken = resp.AccessToken
account.Status = "work"
return nil
}
type GoogleFile struct {
Id string `json:"id"`
Name string `json:"name"`
MimeType string `json:"mimeType"`
ModifiedTime *time.Time `json:"modifiedTime"`
Size string `json:"size"`
}
func (driver GoogleDrive) IsDir(mimeType string) bool {
return mimeType == "application/vnd.google-apps.folder" || mimeType == "application/vnd.google-apps.shortcut"
}
func (driver GoogleDrive) FormatFile(file *GoogleFile) *model.File {
f := &model.File{
Id: file.Id,
Name: file.Name,
Driver: driverName,
UpdatedAt: file.ModifiedTime,
Thumbnail: "",
Url: "",
}
if driver.IsDir(file.MimeType) {
f.Type = conf.FOLDER
} else {
size, _ := strconv.ParseInt(file.Size, 10, 64)
f.Size = size
f.Type = utils.GetFileType(filepath.Ext(file.Name))
}
return f
}
type GoogleFiles struct {
NextPageToken string `json:"nextPageToken"`
Files []GoogleFile `json:"files"`
}
type GoogleError struct {
Error struct {
Errors []struct {
Domain string `json:"domain"`
Reason string `json:"reason"`
Message string `json:"message"`
LocationType string `json:"location_type"`
Location string `json:"location"`
}
Code int `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
func (driver GoogleDrive) GetFiles(id string, account *model.Account) ([]GoogleFile, error) {
pageToken := "first"
res := make([]GoogleFile, 0)
for pageToken != "" {
if pageToken == "first" {
pageToken = ""
}
var resp GoogleFiles
var e GoogleError
_, err := googleClient.R().SetResult(&resp).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
SetQueryParams(map[string]string{
"orderBy": "folder,name,modifiedTime desc",
"fields": "files(id,name,mimeType,size,modifiedTime),nextPageToken",
"pageSize": "1000",
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
"includeItemsFromAllDrives": "true",
"supportsAllDrives": "true",
"pageToken": pageToken,
}).Get("https://www.googleapis.com/drive/v3/files")
if err != nil {
return nil, err
}
if e.Error.Code != 0 {
if e.Error.Code == 401 {
err = driver.RefreshToken(account)
if err != nil {
_ = model.SaveAccount(account)
return nil, err
}
return driver.GetFiles(id, account)
}
return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
pageToken = resp.NextPageToken
res = append(res, resp.Files...)
}
return res, nil
}
//func (driver GoogleDrive) GetFile(path string, account *model.Account) (*GoogleFile, error) {
// dir, name := filepath.Split(path)
// dir = utils.ParsePath(dir)
// _, _, err := driver.Path(dir, account)
// if err != nil {
// return nil, err
// }
// parentFiles_, _ := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, dir))
// parentFiles, _ := parentFiles_.([]GoogleFile)
// for _, file := range parentFiles {
// if file.Name == name {
// if !driver.IsDir(file.MimeType) {
// return &file, err
// } else {
// return nil, drivers.NotFile
// }
// }
// }
// return nil, drivers.PathNotFound
//}
var _ drivers.Driver = (*GoogleDrive)(nil)
func init() {
drivers.RegisterDriver(driverName, &GoogleDrive{})
googleClient.SetRetryCount(3)
}

View File

@ -1,119 +0,0 @@
package drivers
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type Native struct {
}
func (n Native) Preview(path string, account *model.Account) (interface{}, error) {
return nil, fmt.Errorf("no need")
}
func (n Native) Items() []Item {
return []Item{
{
Name: "root_folder",
Label: "root folder path",
Type: "string",
Required: true,
},
}
}
func (n Native) Proxy(c *gin.Context, account *model.Account) {
// unnecessary
}
func (n Native) Save(account *model.Account, old *model.Account) error {
log.Debugf("save a account: [%s]", account.Name)
if !utils.Exists(account.RootFolder) {
account.Status = fmt.Sprintf("[%s] not exist", account.RootFolder)
_ = model.SaveAccount(account)
return fmt.Errorf("[%s] not exist", account.RootFolder)
}
account.Status = "work"
account.Proxy = true
err := model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
// TODO sort files
func (n Native) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
fullPath := filepath.Join(account.RootFolder, path)
log.Debugf("%s-%s-%s", account.RootFolder, path, fullPath)
if !utils.Exists(fullPath) {
return nil, nil, fmt.Errorf("path not found")
}
if utils.IsDir(fullPath) {
result := make([]*model.File, 0)
files, err := ioutil.ReadDir(fullPath)
if err != nil {
return nil, nil, err
}
for _, f := range files {
if strings.HasPrefix(f.Name(), ".") {
continue
}
time := f.ModTime()
file := &model.File{
Name: f.Name(),
Size: f.Size(),
Type: 0,
UpdatedAt: &time,
Driver: "Native",
}
if f.IsDir() {
file.Type = conf.FOLDER
} else {
file.Type = utils.GetFileType(filepath.Ext(f.Name()))
}
result = append(result, file)
}
return nil, result, nil
}
f, err := os.Stat(fullPath)
if err != nil {
return nil, nil, err
}
time := f.ModTime()
file := &model.File{
Name: f.Name(),
Size: f.Size(),
Type: utils.GetFileType(filepath.Ext(f.Name())),
UpdatedAt: &time,
Driver: "Native",
}
return file, nil, nil
}
func (n Native) Link(path string, account *model.Account) (string, error) {
fullPath := filepath.Join(account.RootFolder, path)
s, err := os.Stat(fullPath)
if err != nil {
return "", err
}
if s.IsDir() {
return "", fmt.Errorf("can't down folder")
}
return fullPath, nil
}
var _ Driver = (*Native)(nil)
func init() {
RegisterDriver("Native", &Native{})
}

157
drivers/native/driver.go Normal file
View File

@ -0,0 +1,157 @@
package native
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
log "github.com/sirupsen/logrus"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type Native struct{}
var driverName = "Native"
func (driver Native) Items() []drivers.Item {
return []drivers.Item{
{
Name: "root_folder",
Label: "root folder path",
Type: "string",
Required: true,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,updated_at",
Required: false,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "ASC,DESC",
Required: false,
},
}
}
func (driver Native) Save(account *model.Account, old *model.Account) error {
log.Debugf("save a account: [%s]", account.Name)
if !utils.Exists(account.RootFolder) {
account.Status = fmt.Sprintf("[%s] not exist", account.RootFolder)
_ = model.SaveAccount(account)
return fmt.Errorf("[%s] not exist", account.RootFolder)
}
account.Status = "work"
account.Proxy = true
err := model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (driver Native) File(path string, account *model.Account) (*model.File, error) {
fullPath := filepath.Join(account.RootFolder, path)
if !utils.Exists(fullPath) {
return nil, drivers.PathNotFound
}
f, err := os.Stat(fullPath)
if err != nil {
return nil, err
}
time := f.ModTime()
file := &model.File{
Name: f.Name(),
Size: f.Size(),
UpdatedAt: &time,
Driver: driverName,
}
if f.IsDir() {
file.Type = conf.FOLDER
} else {
file.Type = utils.GetFileType(filepath.Ext(f.Name()))
}
return file, nil
}
func (driver Native) Files(path string, account *model.Account) ([]model.File, error) {
fullPath := filepath.Join(account.RootFolder, path)
if !utils.Exists(fullPath) {
return nil, drivers.PathNotFound
}
files := make([]model.File, 0)
rawFiles, err := ioutil.ReadDir(fullPath)
if err != nil {
return nil, err
}
for _, f := range rawFiles {
if strings.HasPrefix(f.Name(), ".") {
continue
}
time := f.ModTime()
file := model.File{
Name: f.Name(),
Size: f.Size(),
Type: 0,
UpdatedAt: &time,
Driver: driverName,
}
if f.IsDir() {
file.Type = conf.FOLDER
} else {
file.Type = utils.GetFileType(filepath.Ext(f.Name()))
}
files = append(files, file)
}
model.SortFiles(files, account)
return files, nil
}
func (driver Native) Link(path string, account *model.Account) (string, error) {
fullPath := filepath.Join(account.RootFolder, path)
s, err := os.Stat(fullPath)
if err != nil {
return "", err
}
if s.IsDir() {
return "", fmt.Errorf("can't down folder")
}
return fullPath, nil
}
func (driver Native) Path(path string, account *model.Account) (*model.File, []model.File, error) {
log.Debugf("native path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
//file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
model.SortFiles(files, account)
return nil, files, nil
}
func (driver Native) Proxy(c *gin.Context, account *model.Account) {
// unnecessary
}
func (driver Native) Preview(path string, account *model.Account) (interface{}, error) {
return nil, fmt.Errorf("no need")
}
var _ drivers.Driver = (*Native)(nil)

9
drivers/native/native.go Normal file
View File

@ -0,0 +1,9 @@
package native
import (
"github.com/Xhofe/alist/drivers"
)
func init() {
drivers.RegisterDriver(driverName, &Native{})
}

View File

@ -1,319 +0,0 @@
package drivers
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/go-resty/resty/v2"
"github.com/robfig/cron/v3"
log "github.com/sirupsen/logrus"
"path/filepath"
"time"
)
type Onedrive struct{}
var oneClient = resty.New()
type OnedriveHost struct {
Oauth string
Api string
}
var onedriveHostMap = map[string]OnedriveHost{
"global": {
Oauth: "https://login.microsoftonline.com",
Api: "https://graph.microsoft.com",
},
"cn": {
Oauth: "https://login.chinacloudapi.cn",
Api: "https://microsoftgraph.chinacloudapi.cn",
},
"us": {
Oauth: "https://login.microsoftonline.us",
Api: "https://graph.microsoft.us",
},
"de": {
Oauth: "https://login.microsoftonline.de",
Api: "https://graph.microsoft.de",
},
}
func init() {
RegisterDriver("Onedrive", &Onedrive{})
oneClient.SetRetryCount(3)
}
func (o Onedrive) GetMetaUrl(account *model.Account, auth bool, path string) string {
path = filepath.Join(account.RootFolder, path)
log.Debugf(path)
host, _ := onedriveHostMap[account.Zone]
if auth {
return host.Oauth
}
switch account.OnedriveType {
case "onedrive":
{
if path == "/" || path == "\\" {
return fmt.Sprintf("%s/v1.0/me/drive/root", host.Api)
} else {
return fmt.Sprintf("%s/v1.0/me/drive/root:%s:", host.Api, path)
}
}
case "sharepoint":
{
if path == "/" {
return fmt.Sprintf("%s/v1.0/sites/%s/drive/root", host.Api, account.SiteId)
} else {
return fmt.Sprintf("%s/v1.0/sites/%s/drive/root:%s:", host.Api, account.SiteId, path)
}
}
default:
return ""
}
}
func (o Onedrive) Items() []Item {
return []Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "zone",
Label: "zone",
Type: "select",
Required: true,
Values: "global,cn,us,de",
Description: "",
},
{
Name: "onedrive_type",
Label: "onedrive type",
Type: "select",
Required: true,
Values: "onedrive,sharepoint",
},
{
Name: "client_id",
Label: "client id",
Type: "string",
Required: true,
},
{
Name: "client_secret",
Label: "client secret",
Type: "string",
Required: true,
},
{
Name: "redirect_uri",
Label: "redirect uri",
Type: "string",
Required: true,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "site_id",
Label: "site id",
Type: "string",
Required: false,
},
{
Name: "root_folder",
Label: "root folder path",
Type: "string",
Required: false,
},
}
}
type OneTokenErr struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
func (o Onedrive) RefreshToken(account *model.Account) error {
url := o.GetMetaUrl(account, true, "") + "/common/oauth2/v2.0/token"
var resp TokenResp
var e OneTokenErr
_, err := oneClient.R().SetResult(&resp).SetError(&e).SetFormData(map[string]string{
"grant_type": "refresh_token",
"client_id": account.ClientId,
"client_secret": account.ClientSecret,
"redirect_uri": account.RedirectUri,
"refresh_token": account.RefreshToken,
}).Post(url)
if err != nil {
account.Status = err.Error()
return err
}
if e.Error != "" {
account.Status = e.ErrorDescription
return fmt.Errorf("%s", e.ErrorDescription)
}else {
account.Status = "work"
}
account.RefreshToken, account.AccessToken = resp.RefreshToken, resp.AccessToken
return nil
}
type OneFile struct {
Name string `json:"name"`
Size int64 `json:"size"`
LastModifiedDateTime *time.Time `json:"lastModifiedDateTime"`
Url string `json:"@microsoft.graph.downloadUrl"`
File struct {
MimeType string `json:"mimeType"`
} `json:"file"`
}
type OneFiles struct {
Value []OneFile `json:"value"`
}
type OneRespErr struct {
Error struct {
Code string `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
func (o Onedrive) FormatFile(file *OneFile) *model.File {
f := &model.File{
Name: file.Name,
Size: file.Size,
UpdatedAt: file.LastModifiedDateTime,
Driver: "OneDrive",
Url: file.Url,
}
if file.File.MimeType == "" {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(filepath.Ext(file.Name))
}
return f
}
func (o Onedrive) GetFiles(account *model.Account, path string) ([]OneFile, error) {
var files OneFiles
var e OneRespErr
_, err := oneClient.R().SetResult(&files).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(o.GetMetaUrl(account, false, path) + "/children")
if err != nil {
return nil, err
}
if e.Error.Code != "" {
return nil, fmt.Errorf("%s", e.Error.Message)
}
return files.Value, nil
}
func (o Onedrive) GetFile(account *model.Account, path string) (*OneFile, error) {
var file OneFile
var e OneRespErr
_, err := oneClient.R().SetResult(&file).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(o.GetMetaUrl(account, false, path))
if err != nil {
return nil, err
}
if e.Error.Code != "" {
return nil, fmt.Errorf("%s", e.Error.Message)
}
return &file, nil
}
func (o Onedrive) Path(path string, account *model.Account) (*model.File, []*model.File, error) {
path = utils.ParsePath(path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]*model.File)
return nil, files, nil
}
file, err := o.GetFile(account, path)
if err != nil {
return nil, nil, err
}
if file.File.MimeType != "" {
return o.FormatFile(file), nil, nil
} else {
files, err := o.GetFiles(account, path)
if err != nil {
return nil, nil, err
}
res := make([]*model.File, 0)
for _, file := range files {
res = append(res, o.FormatFile(&file))
}
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), res, nil)
return nil, res, nil
}
}
func (o Onedrive) Link(path string, account *model.Account) (string, error) {
file, err := o.GetFile(account, path)
if err != nil {
return "", err
}
if file.File.MimeType == "" {
return "", fmt.Errorf("can't down folder")
}
return file.Url, nil
}
func (o Onedrive) Save(account *model.Account, old *model.Account) error {
_, ok := onedriveHostMap[account.Zone]
if !ok {
return fmt.Errorf("no [%s] zone", account.Zone)
}
if old != nil {
conf.Cron.Remove(cron.EntryID(old.CronId))
}
account.RootFolder = utils.ParsePath(account.RootFolder)
err := o.RefreshToken(account)
if err != nil {
return err
}
cronId, err := conf.Cron.AddFunc("@every 1h", func() {
name := account.Name
log.Debugf("onedrive account name: %s", name)
newAccount, ok := model.GetAccount(name)
log.Debugf("onedrive account: %+v", newAccount)
if !ok {
return
}
err = o.RefreshToken(&newAccount)
_ = model.SaveAccount(&newAccount)
})
if err != nil {
return err
}
account.CronId = int(cronId)
err = model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (o Onedrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("Origin")
}
func (o Onedrive) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}
var _ Driver = (*Onedrive)(nil)

209
drivers/onedrive/driver.go Normal file
View File

@ -0,0 +1,209 @@
package onedrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/gin-gonic/gin"
"github.com/robfig/cron/v3"
log "github.com/sirupsen/logrus"
"path/filepath"
)
type Onedrive struct{}
var driverName = "Onedrive"
func (driver Onedrive) Items() []drivers.Item {
return []drivers.Item{
{
Name: "proxy",
Label: "proxy",
Type: "bool",
Required: true,
Description: "allow proxy",
},
{
Name: "zone",
Label: "zone",
Type: "select",
Required: true,
Values: "global,cn,us,de",
Description: "",
},
{
Name: "onedrive_type",
Label: "onedrive type",
Type: "select",
Required: true,
Values: "onedrive,sharepoint",
},
{
Name: "client_id",
Label: "client id",
Type: "string",
Required: true,
},
{
Name: "client_secret",
Label: "client secret",
Type: "string",
Required: true,
},
{
Name: "redirect_uri",
Label: "redirect uri",
Type: "string",
Required: true,
},
{
Name: "refresh_token",
Label: "refresh token",
Type: "string",
Required: true,
},
{
Name: "site_id",
Label: "site id",
Type: "string",
Required: false,
},
{
Name: "root_folder",
Label: "root folder path",
Type: "string",
Required: false,
},
{
Name: "order_by",
Label: "order_by",
Type: "select",
Values: "name,size,lastModifiedDateTime",
Required: false,
},
{
Name: "order_direction",
Label: "order_direction",
Type: "select",
Values: "asc,desc",
Required: false,
},
}
}
func (driver Onedrive) Save(account *model.Account, old *model.Account) error {
_, ok := onedriveHostMap[account.Zone]
if !ok {
return fmt.Errorf("no [%s] zone", account.Zone)
}
if old != nil {
conf.Cron.Remove(cron.EntryID(old.CronId))
}
account.RootFolder = utils.ParsePath(account.RootFolder)
err := driver.RefreshToken(account)
if err != nil {
return err
}
cronId, err := conf.Cron.AddFunc("@every 1h", func() {
name := account.Name
log.Debugf("onedrive account name: %s", name)
newAccount, ok := model.GetAccount(name)
log.Debugf("onedrive account: %+v", newAccount)
if !ok {
return
}
err = driver.RefreshToken(&newAccount)
_ = model.SaveAccount(&newAccount)
})
if err != nil {
return err
}
account.CronId = int(cronId)
err = model.SaveAccount(account)
if err != nil {
return err
}
return nil
}
func (driver Onedrive) File(path string, account *model.Account) (*model.File, error) {
path = utils.ParsePath(path)
if path == "/" {
return &model.File{
Id: account.RootFolder,
Name: account.Name,
Size: 0,
Type: conf.FOLDER,
Driver: driverName,
UpdatedAt: account.UpdatedAt,
}, nil
}
dir, name := filepath.Split(path)
files, err := driver.Files(dir, account)
if err != nil {
return nil, err
}
for _, file := range files {
if file.Name == name {
return &file, nil
}
}
return nil, drivers.PathNotFound
}
func (driver Onedrive) Files(path string, account *model.Account) ([]model.File, error) {
path = utils.ParsePath(path)
cache, err := conf.Cache.Get(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path))
if err == nil {
files, _ := cache.([]model.File)
return files, nil
}
rawFiles, err := driver.GetFiles(account, path)
if err != nil {
return nil, err
}
files := make([]model.File, 0)
for _, file := range rawFiles {
files = append(files, *driver.FormatFile(&file))
}
_ = conf.Cache.Set(conf.Ctx, fmt.Sprintf("%s%s", account.Name, path), files, nil)
return files, nil
}
func (driver Onedrive) Link(path string, account *model.Account) (string, error) {
file, err := driver.GetFile(account, path)
if err != nil {
return "", err
}
if file.File.MimeType == "" {
return "", fmt.Errorf("can't down folder")
}
return file.Url, nil
}
func (driver Onedrive) Path(path string, account *model.Account) (*model.File, []model.File, error) {
log.Debugf("onedrive path: %s", path)
file, err := driver.File(path, account)
if err != nil {
return nil, nil, err
}
if file.Type != conf.FOLDER {
//file.Url, _ = driver.Link(path, account)
return file, nil, nil
}
files, err := driver.Files(path, account)
if err != nil {
return nil, nil, err
}
return nil, files, nil
}
func (driver Onedrive) Proxy(c *gin.Context, account *model.Account) {
c.Request.Header.Del("Origin")
}
func (driver Onedrive) Preview(path string, account *model.Account) (interface{}, error) {
return nil, nil
}

View File

@ -0,0 +1,185 @@
package onedrive
import (
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
"path/filepath"
"time"
)
var oneClient = resty.New()
type Host struct {
Oauth string
Api string
}
var onedriveHostMap = map[string]Host{
"global": {
Oauth: "https://login.microsoftonline.com",
Api: "https://graph.microsoft.com",
},
"cn": {
Oauth: "https://login.chinacloudapi.cn",
Api: "https://microsoftgraph.chinacloudapi.cn",
},
"us": {
Oauth: "https://login.microsoftonline.us",
Api: "https://graph.microsoft.us",
},
"de": {
Oauth: "https://login.microsoftonline.de",
Api: "https://graph.microsoft.de",
},
}
func (driver Onedrive) GetMetaUrl(account *model.Account, auth bool, path string) string {
path = filepath.Join(account.RootFolder, path)
log.Debugf(path)
host, _ := onedriveHostMap[account.Zone]
if auth {
return host.Oauth
}
switch account.OnedriveType {
case "onedrive":
{
if path == "/" || path == "\\" {
return fmt.Sprintf("%s/v1.0/me/drive/root", host.Api)
} else {
return fmt.Sprintf("%s/v1.0/me/drive/root:%s:", host.Api, path)
}
}
case "sharepoint":
{
if path == "/" {
return fmt.Sprintf("%s/v1.0/sites/%s/drive/root", host.Api, account.SiteId)
} else {
return fmt.Sprintf("%s/v1.0/sites/%s/drive/root:%s:", host.Api, account.SiteId, path)
}
}
default:
return ""
}
}
type OneTokenErr struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
}
func (driver Onedrive) RefreshToken(account *model.Account) error {
url := driver.GetMetaUrl(account, true, "") + "/common/oauth2/v2.0/token"
var resp drivers.TokenResp
var e OneTokenErr
_, err := oneClient.R().SetResult(&resp).SetError(&e).SetFormData(map[string]string{
"grant_type": "refresh_token",
"client_id": account.ClientId,
"client_secret": account.ClientSecret,
"redirect_uri": account.RedirectUri,
"refresh_token": account.RefreshToken,
}).Post(url)
if err != nil {
account.Status = err.Error()
return err
}
if e.Error != "" {
account.Status = e.ErrorDescription
return fmt.Errorf("%s", e.ErrorDescription)
} else {
account.Status = "work"
}
account.RefreshToken, account.AccessToken = resp.RefreshToken, resp.AccessToken
return nil
}
type OneFile struct {
Name string `json:"name"`
Size int64 `json:"size"`
LastModifiedDateTime *time.Time `json:"lastModifiedDateTime"`
Url string `json:"@microsoft.graph.downloadUrl"`
File struct {
MimeType string `json:"mimeType"`
} `json:"file"`
}
type OneFiles struct {
Value []OneFile `json:"value"`
NextLink string `json:"@odata.nextLink"`
}
type OneRespErr struct {
Error struct {
Code string `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
func (driver Onedrive) FormatFile(file *OneFile) *model.File {
f := &model.File{
Name: file.Name,
Size: file.Size,
UpdatedAt: file.LastModifiedDateTime,
Driver: driverName,
Url: file.Url,
}
if file.File.MimeType == "" {
f.Type = conf.FOLDER
} else {
f.Type = utils.GetFileType(filepath.Ext(file.Name))
}
return f
}
func (driver Onedrive) GetFiles(account *model.Account, path string) ([]OneFile, error) {
var res []OneFile
nextLink := driver.GetMetaUrl(account, false, path) + "/children"
if account.OrderBy != "" {
nextLink += fmt.Sprintf("?orderby=%s", account.OrderBy)
if account.OrderDirection != "" {
nextLink += fmt.Sprintf(" %s", account.OrderDirection)
}
}
for nextLink != "" {
var files OneFiles
var e OneRespErr
_, err := oneClient.R().SetResult(&files).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(nextLink)
if err != nil {
return nil, err
}
if e.Error.Code != "" {
return nil, fmt.Errorf("%s", e.Error.Message)
}
res = append(res, files.Value...)
nextLink = files.NextLink
}
return res, nil
}
func (driver Onedrive) GetFile(account *model.Account, path string) (*OneFile, error) {
var file OneFile
var e OneRespErr
_, err := oneClient.R().SetResult(&file).SetError(&e).
SetHeader("Authorization", "Bearer "+account.AccessToken).
Get(driver.GetMetaUrl(account, false, path))
if err != nil {
return nil, err
}
if e.Error.Code != "" {
return nil, fmt.Errorf("%s", e.Error.Message)
}
return &file, nil
}
var _ drivers.Driver = (*Onedrive)(nil)
func init() {
drivers.RegisterDriver(driverName, &Onedrive{})
oneClient.SetRetryCount(3)
}

View File

@ -99,14 +99,14 @@ func GetAccountById(id uint) (*Account, error) {
return &account, nil
}
func GetAccountFiles() ([]*File, error) {
files := make([]*File, 0)
func GetAccountFiles() ([]File, error) {
files := make([]File, 0)
var accounts []Account
if err := conf.DB.Order("`index`").Find(&accounts).Error; err != nil {
return nil, err
}
for _, v := range accounts {
files = append(files, &File{
files = append(files, File{
Name: v.Name,
Size: 0,
Type: conf.FOLDER,

View File

@ -1,8 +1,14 @@
package model
import "time"
import (
"github.com/Xhofe/alist/conf"
"sort"
"strings"
"time"
)
type File struct {
Id string `json:"-"`
Name string `json:"name"`
Size int64 `json:"size"`
Type int `json:"type"`
@ -10,4 +16,51 @@ type File struct {
UpdatedAt *time.Time `json:"updated_at"`
Thumbnail string `json:"thumbnail"`
Url string `json:"url"`
}
func SortFiles(files []File, account *Account) {
if account.OrderBy == "" {
return
}
sort.Slice(files, func(i, j int) bool {
switch account.OrderBy {
case "name":
{
c := strings.Compare(files[i].Name, files[j].Name)
if account.OrderDirection == "DESC" {
return c >= 0
}
return c <= 0
}
case "size":
{
if account.OrderDirection == "DESC" {
return files[i].Size >= files[j].Size
}
return files[i].Size <= files[j].Size
}
case "updated_at":
if account.OrderDirection == "DESC" {
return files[i].UpdatedAt.After(*files[j].UpdatedAt)
}
return files[i].UpdatedAt.Before(*files[j].UpdatedAt)
}
return false
})
}
func (f File) GetSize() uint64 {
return uint64(f.Size)
}
func (f File) GetName() string {
return f.Name
}
func (f File) ModTime() time.Time {
return *f.UpdatedAt
}
func (f File) IsDir() bool {
return f.Type == conf.FOLDER
}

View File

@ -85,4 +85,13 @@ func LoadSettings() {
//conf.CustomizeStyle = customizeScript.Value
conf.IndexHtml = strings.Replace(conf.IndexHtml, "// customize-js", customizeScript.Value, 1)
}
davUsername, err := GetSettingByKey("WebDAV username")
if err == nil {
conf.DavUsername = davUsername.Value
}
davPassword, err := GetSettingByKey("WebDAV password")
if err == nil {
conf.DavPassword = davPassword.Value
}
}

View File

@ -49,7 +49,7 @@ func CheckParent(path string, password string) bool {
}
return true
} else {
if path == "/" {
if path == "/" || path == "\\" {
return true
}
return CheckParent(filepath.Dir(path), password)
@ -72,7 +72,7 @@ func CheckDownLink(path string, passwordMd5 string) bool {
if !conf.CheckParent {
return true
}
if path == "/" {
if path == "/" || path == "\\" {
return true
}
return CheckDownLink(filepath.Dir(path), passwordMd5)

View File

@ -31,8 +31,7 @@ func Path(c *gin.Context) {
return
}
// TODO hide or ignore?
}
if conf.CheckParent {
} else if conf.CheckParent {
if !CheckParent(filepath.Dir(req.Path), req.Password) {
ErrorResp(c, fmt.Errorf("wrong password"), 401)
return
@ -72,7 +71,7 @@ func Path(c *gin.Context) {
})
} else {
if meta != nil && meta.Hide != "" {
tmpFiles := make([]*model.File, 0)
tmpFiles := make([]model.File, 0)
hideFiles := strings.Split(meta.Hide, ",")
for _, item := range files {
if !utils.IsContain(hideFiles, item.Name) {

View File

@ -40,6 +40,7 @@ func InitApiRouter(r *gin.Engine) {
admin.DELETE("/meta", DeleteMeta)
}
Static(r)
WebDav(r)
}
func Cors(r *gin.Engine) {

View File

@ -32,5 +32,6 @@ func Static(r *gin.Engine) {
c.Header("Content-Type", "text/html")
_, _ = c.Writer.WriteString(conf.IndexHtml)
c.Writer.Flush()
c.Writer.WriteHeaderNow()
})
}

60
server/webdav.go Normal file
View File

@ -0,0 +1,60 @@
package server
import (
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/server/webdav"
"github.com/gin-gonic/gin"
"net/http"
)
var handler *webdav.Handler
func init() {
handler = &webdav.Handler{
Prefix: "/dav",
LockSystem: webdav.NewMemLS(),
}
}
func WebDav(r *gin.Engine) {
dav := r.Group("/dav")
dav.Use(WebDAVAuth)
dav.Any("/*path", ServeWebDAV)
dav.Any("", ServeWebDAV)
dav.Handle("PROPFIND", "/*path", ServeWebDAV)
dav.Handle("PROPFIND", "", ServeWebDAV)
dav.Handle("MKCOL", "/*path", ServeWebDAV)
dav.Handle("LOCK", "/*path", ServeWebDAV)
dav.Handle("UNLOCK", "/*path", ServeWebDAV)
dav.Handle("PROPPATCH", "/*path", ServeWebDAV)
dav.Handle("COPY", "/*path", ServeWebDAV)
dav.Handle("MOVE", "/*path", ServeWebDAV)
}
func ServeWebDAV(c *gin.Context) {
fs := webdav.FileSystem{}
handler.ServeHTTP(c.Writer,c.Request,&fs)
}
func WebDAVAuth(c *gin.Context) {
if c.Request.Method == "OPTIONS" {
c.Next()
return
}
username, password, ok := c.Request.BasicAuth()
if !ok {
c.Writer.Header()["WWW-Authenticate"] = []string{`Basic realm="alist"`}
c.Status(http.StatusUnauthorized)
c.Abort()
return
}
if conf.DavUsername != "" && conf.DavUsername != username {
c.Status(http.StatusUnauthorized)
c.Abort()
}
if conf.DavPassword != "" && conf.DavPassword != password {
c.Status(http.StatusUnauthorized)
c.Abort()
}
c.Next()
}

195
server/webdav/file.go Normal file
View File

@ -0,0 +1,195 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"context"
"fmt"
"github.com/Xhofe/alist/conf"
"github.com/Xhofe/alist/drivers"
"github.com/Xhofe/alist/model"
"github.com/Xhofe/alist/utils"
log "github.com/sirupsen/logrus"
"net/http"
"path"
"path/filepath"
"strings"
"time"
)
type FileSystem struct{}
func ParsePath(rawPath string) (*model.Account, string, drivers.Driver, error) {
var path, name string
switch model.AccountsCount() {
case 0:
return nil, "", nil, fmt.Errorf("no accounts,please add one first")
case 1:
path = rawPath
break
default:
paths := strings.Split(rawPath, "/")
path = "/" + strings.Join(paths[2:], "/")
name = paths[1]
}
account, ok := model.GetAccount(name)
if !ok {
return nil, "", nil, fmt.Errorf("no [%s] account", name)
}
driver, ok := drivers.GetDriver(account.Type)
if !ok {
return nil, "", nil, fmt.Errorf("no [%s] driver", account.Type)
}
return &account, path, driver, nil
}
func (fs *FileSystem) File(rawPath string) (*model.File, error) {
rawPath = utils.ParsePath(rawPath)
if model.AccountsCount() > 1 && rawPath == "/" {
now := time.Now()
return &model.File{
Name: "root",
Size: 0,
Type: conf.FOLDER,
Driver: "root",
UpdatedAt: &now,
}, nil
}
account, path_, driver, err := ParsePath(rawPath)
if err != nil {
return nil, err
}
return driver.File(path_, account)
}
func (fs *FileSystem) Files(rawPath string) ([]model.File, error) {
rawPath = utils.ParsePath(rawPath)
if model.AccountsCount() > 1 && rawPath == "/" {
files, err := model.GetAccountFiles()
if err != nil {
return nil, err
}
return files, nil
}
account, path_, driver, err := ParsePath(rawPath)
if err != nil {
return nil, err
}
return driver.Files(path_, account)
}
func GetPW(path string) string {
if !conf.CheckDown {
return ""
}
meta, err := model.GetMetaByPath(path)
if err == nil {
if meta.Password != "" {
utils.Get16MD5Encode(meta.Password)
}
return ""
} else {
if !conf.CheckParent {
return ""
}
if path == "/" || path == "\\" {
return ""
}
return GetPW(filepath.Dir(path))
}
}
func (fs *FileSystem) Link(host, rawPath string) (string, error) {
rawPath = utils.ParsePath(rawPath)
if model.AccountsCount() > 1 && rawPath == "/" {
// error
}
account, path_, driver, err := ParsePath(rawPath)
if err != nil {
return "", err
}
if account.Type == "Native" || account.Type == "GoogleDrive" {
link := fmt.Sprintf("//%s/p%s", host, rawPath)
if conf.CheckDown {
pw := GetPW(filepath.Dir(rawPath))
link += "?pw" + pw
}
log.Debugf("proxy link: %s", link)
return link, nil
}
return driver.Link(path_, account)
}
func (fs *FileSystem) CreateDirectory(ctx context.Context, reqPath string) (interface{}, error) {
return nil, nil
}
// slashClean is equivalent to but slightly more efficient than
// path.Clean("/" + name).
func slashClean(name string) string {
if name == "" || name[0] != '/' {
name = "/" + name
}
return path.Clean(name)
}
// moveFiles moves files and/or directories from src to dst.
//
// See section 9.9.4 for when various HTTP status codes apply.
func moveFiles(ctx context.Context, fs *FileSystem, src FileInfo, dst string, overwrite bool) (status int, err error) {
return http.StatusNoContent, nil
}
// copyFiles copies files and/or directories from src to dst.
//
// See section 9.8.5 for when various HTTP status codes apply.
func copyFiles(ctx context.Context, fs *FileSystem, src FileInfo, dst string, overwrite bool, depth int, recursion int) (status int, err error) {
return http.StatusNoContent, nil
}
// walkFS traverses filesystem fs starting at name up to depth levels.
//
// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node,
// walkFS calls walkFn. If a visited file system node is a directory and
// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node.
func walkFS(
ctx context.Context,
fs *FileSystem,
depth int,
name string,
info FileInfo,
walkFn func(reqPath string, info FileInfo, err error) error) error {
// This implementation is based on Walk's code in the standard path/filepath package.
err := walkFn(name, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() || depth == 0 {
return nil
}
if depth == 1 {
depth = 0
}
files, err := fs.Files(name)
if err != nil {
return err
}
for _, fileInfo := range files {
filename := path.Join(name, fileInfo.Name)
err = walkFS(ctx, fs, depth, filename, &fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
return nil
}

173
server/webdav/if.go Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The If header is covered by Section 10.4.
// http://www.webdav.org/specs/rfc4918.html#HEADER_If
import (
"strings"
)
// ifHeader is a disjunction (OR) of ifLists.
type ifHeader struct {
lists []ifList
}
// ifList is a conjunction (AND) of Conditions, and an optional resource tag.
type ifList struct {
resourceTag string
conditions []Condition
}
// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string
// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is
// returned by req.Header.Get("If") for a http.Request req.
func parseIfHeader(httpHeader string) (h ifHeader, ok bool) {
s := strings.TrimSpace(httpHeader)
switch tokenType, _, _ := lex(s); tokenType {
case '(':
return parseNoTagLists(s)
case angleTokenType:
return parseTaggedLists(s)
default:
return ifHeader{}, false
}
}
func parseNoTagLists(s string) (h ifHeader, ok bool) {
for {
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
}
}
func parseTaggedLists(s string) (h ifHeader, ok bool) {
resourceTag, n := "", 0
for first := true; ; first = false {
tokenType, tokenStr, remaining := lex(s)
switch tokenType {
case angleTokenType:
if !first && n == 0 {
return ifHeader{}, false
}
resourceTag, n = tokenStr, 0
s = remaining
case '(':
n++
l, remaining, ok := parseList(s)
if !ok {
return ifHeader{}, false
}
l.resourceTag = resourceTag
h.lists = append(h.lists, l)
if remaining == "" {
return h, true
}
s = remaining
default:
return ifHeader{}, false
}
}
}
func parseList(s string) (l ifList, remaining string, ok bool) {
tokenType, _, s := lex(s)
if tokenType != '(' {
return ifList{}, "", false
}
for {
tokenType, _, remaining = lex(s)
if tokenType == ')' {
if len(l.conditions) == 0 {
return ifList{}, "", false
}
return l, remaining, true
}
c, remaining, ok := parseCondition(s)
if !ok {
return ifList{}, "", false
}
l.conditions = append(l.conditions, c)
s = remaining
}
}
func parseCondition(s string) (c Condition, remaining string, ok bool) {
tokenType, tokenStr, s := lex(s)
if tokenType == notTokenType {
c.Not = true
tokenType, tokenStr, s = lex(s)
}
switch tokenType {
case strTokenType, angleTokenType:
c.Token = tokenStr
case squareTokenType:
c.ETag = tokenStr
default:
return Condition{}, "", false
}
return c, s, true
}
// Single-rune tokens like '(' or ')' have a token type equal to their rune.
// All other tokens have a negative token type.
const (
errTokenType = rune(-1)
eofTokenType = rune(-2)
strTokenType = rune(-3)
notTokenType = rune(-4)
angleTokenType = rune(-5)
squareTokenType = rune(-6)
)
func lex(s string) (tokenType rune, tokenStr string, remaining string) {
// The net/textproto Data that parses the HTTP header will collapse
// Linear White Space that spans multiple "\r\n" lines to a single " ",
// so we don't need to look for '\r' or '\n'.
for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') {
s = s[1:]
}
if len(s) == 0 {
return eofTokenType, "", ""
}
i := 0
loop:
for ; i < len(s); i++ {
switch s[i] {
case '\t', ' ', '(', ')', '<', '>', '[', ']':
break loop
}
}
if i != 0 {
tokenStr, remaining = s[:i], s[i:]
if tokenStr == "Not" {
return notTokenType, "", remaining
}
return strTokenType, tokenStr, remaining
}
j := 0
switch s[0] {
case '<':
j, tokenType = strings.IndexByte(s, '>'), angleTokenType
case '[':
j, tokenType = strings.IndexByte(s, ']'), squareTokenType
default:
return rune(s[0]), "", s[1:]
}
if j < 0 {
return errTokenType, "", ""
}
return tokenType, s[1:j], s[j+1:]
}

View File

@ -0,0 +1,11 @@
This is a fork of the encoding/xml package at ca1d6c4, the last commit before
https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name
space behavior" made late in the lead-up to the Go 1.5 release.
The list of encoding/xml changes is at
https://go.googlesource.com/go/+log/master/src/encoding/xml
This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is
released.
See http://golang.org/issue/11841

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,692 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"bytes"
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
)
// BUG(rsc): Mapping between XML elements and data structures is inherently flawed:
// an XML element is an order-dependent collection of anonymous
// values, while a data structure is an order-independent collection
// of named values.
// See package json for a textual representation more suitable
// to data structures.
// Unmarshal parses the XML-encoded data and stores the result in
// the value pointed to by v, which must be an arbitrary struct,
// slice, or string. Well-formed data that does not fit into v is
// discarded.
//
// Because Unmarshal uses the reflect package, it can only assign
// to exported (upper case) fields. Unmarshal uses a case-sensitive
// comparison to match XML element names to tag values and struct
// field names.
//
// Unmarshal maps an XML element to a struct using the following rules.
// In the rules, the tag of a field refers to the value associated with the
// key 'xml' in the struct field's tag (see the example above).
//
// * If the struct has a field of type []byte or string with tag
// ",innerxml", Unmarshal accumulates the raw XML nested inside the
// element in that field. The rest of the rules still apply.
//
// * If the struct has a field named XMLName of type xml.Name,
// Unmarshal records the element name in that field.
//
// * If the XMLName field has an associated tag of the form
// "name" or "namespace-URL name", the XML element must have
// the given name (and, optionally, name space) or else Unmarshal
// returns an error.
//
// * If the XML element has an attribute whose name matches a
// struct field name with an associated tag containing ",attr" or
// the explicit name in a struct field tag of the form "name,attr",
// Unmarshal records the attribute value in that field.
//
// * If the XML element contains character data, that data is
// accumulated in the first struct field that has tag ",chardata".
// The struct field may have type []byte or string.
// If there is no such field, the character data is discarded.
//
// * If the XML element contains comments, they are accumulated in
// the first struct field that has tag ",comment". The struct
// field may have type []byte or string. If there is no such
// field, the comments are discarded.
//
// * If the XML element contains a sub-element whose name matches
// the prefix of a tag formatted as "a" or "a>b>c", unmarshal
// will descend into the XML structure looking for elements with the
// given names, and will map the innermost elements to that struct
// field. A tag starting with ">" is equivalent to one starting
// with the field name followed by ">".
//
// * If the XML element contains a sub-element whose name matches
// a struct field's XMLName tag and the struct field has no
// explicit name tag as per the previous rule, unmarshal maps
// the sub-element to that struct field.
//
// * If the XML element contains a sub-element whose name matches a
// field without any mode flags (",attr", ",chardata", etc), Unmarshal
// maps the sub-element to that struct field.
//
// * If the XML element contains a sub-element that hasn't matched any
// of the above rules and the struct has a field with tag ",any",
// unmarshal maps the sub-element to that struct field.
//
// * An anonymous struct field is handled as if the fields of its
// value were part of the outer struct.
//
// * A struct field with tag "-" is never unmarshalled into.
//
// Unmarshal maps an XML element to a string or []byte by saving the
// concatenation of that element's character data in the string or
// []byte. The saved []byte is never nil.
//
// Unmarshal maps an attribute value to a string or []byte by saving
// the value in the string or slice.
//
// Unmarshal maps an XML element to a slice by extending the length of
// the slice and mapping the element to the newly created value.
//
// Unmarshal maps an XML element or attribute value to a bool by
// setting it to the boolean value represented by the string.
//
// Unmarshal maps an XML element or attribute value to an integer or
// floating-point field by setting the field to the result of
// interpreting the string value in decimal. There is no check for
// overflow.
//
// Unmarshal maps an XML element to an xml.Name by recording the
// element name.
//
// Unmarshal maps an XML element to a pointer by setting the pointer
// to a freshly allocated value and then mapping the element to that value.
//
func Unmarshal(data []byte, v interface{}) error {
return NewDecoder(bytes.NewReader(data)).Decode(v)
}
// Decode works like xml.Unmarshal, except it reads the decoder
// stream to find the start element.
func (d *Decoder) Decode(v interface{}) error {
return d.DecodeElement(v, nil)
}
// DecodeElement works like xml.Unmarshal except that it takes
// a pointer to the start XML element to decode into v.
// It is useful when a client reads some raw XML tokens itself
// but also wants to defer to Unmarshal for some elements.
func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error {
val := reflect.ValueOf(v)
if val.Kind() != reflect.Ptr {
return errors.New("non-pointer passed to Unmarshal")
}
return d.unmarshal(val.Elem(), start)
}
// An UnmarshalError represents an error in the unmarshalling process.
type UnmarshalError string
func (e UnmarshalError) Error() string { return string(e) }
// Unmarshaler is the interface implemented by objects that can unmarshal
// an XML element description of themselves.
//
// UnmarshalXML decodes a single XML element
// beginning with the given start element.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXML must consume exactly one XML element.
// One common implementation strategy is to unmarshal into
// a separate value with a layout matching the expected XML
// using d.DecodeElement, and then to copy the data from
// that value into the receiver.
// Another common strategy is to use d.Token to process the
// XML object one token at a time.
// UnmarshalXML may not use d.RawToken.
type Unmarshaler interface {
UnmarshalXML(d *Decoder, start StartElement) error
}
// UnmarshalerAttr is the interface implemented by objects that can unmarshal
// an XML attribute description of themselves.
//
// UnmarshalXMLAttr decodes a single XML attribute.
// If it returns an error, the outer call to Unmarshal stops and
// returns that error.
// UnmarshalXMLAttr is used only for struct fields with the
// "attr" option in the field tag.
type UnmarshalerAttr interface {
UnmarshalXMLAttr(attr Attr) error
}
// receiverType returns the receiver type to use in an expression like "%s.MethodName".
func receiverType(val interface{}) string {
t := reflect.TypeOf(val)
if t.Name() != "" {
return t.String()
}
return "(" + t.String() + ")"
}
// unmarshalInterface unmarshals a single XML element into val.
// start is the opening tag of the element.
func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error {
// Record that decoder must stop at end tag corresponding to start.
p.pushEOF()
p.unmarshalDepth++
err := val.UnmarshalXML(p, *start)
p.unmarshalDepth--
if err != nil {
p.popEOF()
return err
}
if !p.popEOF() {
return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local)
}
return nil
}
// unmarshalTextInterface unmarshals a single XML element into val.
// The chardata contained in the element (but not its children)
// is passed to the text unmarshaler.
func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error {
var buf []byte
depth := 1
for depth > 0 {
t, err := p.Token()
if err != nil {
return err
}
switch t := t.(type) {
case CharData:
if depth == 1 {
buf = append(buf, t...)
}
case StartElement:
depth++
case EndElement:
depth--
}
}
return val.UnmarshalText(buf)
}
// unmarshalAttr unmarshals a single XML attribute into val.
func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error {
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) {
return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr)
}
}
// Not an UnmarshalerAttr; try encoding.TextUnmarshaler.
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value))
}
}
copyValue(val, []byte(attr.Value))
return nil
}
var (
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Unmarshal a single XML element into val.
func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error {
// Find start element if we need it.
if start == nil {
for {
tok, err := p.Token()
if err != nil {
return err
}
if t, ok := tok.(StartElement); ok {
start = &t
break
}
}
}
// Load value from interface, but only if the result will be
// usefully addressable.
if val.Kind() == reflect.Interface && !val.IsNil() {
e := val.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
val = e
}
}
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
if val.CanInterface() && val.Type().Implements(unmarshalerType) {
// This is an unmarshaler with a non-pointer receiver,
// so it's likely to be incorrect, but we do what we're told.
return p.unmarshalInterface(val.Interface().(Unmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(unmarshalerType) {
return p.unmarshalInterface(pv.Interface().(Unmarshaler), start)
}
}
if val.CanInterface() && val.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start)
}
if val.CanAddr() {
pv := val.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start)
}
}
var (
data []byte
saveData reflect.Value
comment []byte
saveComment reflect.Value
saveXML reflect.Value
saveXMLIndex int
saveXMLData []byte
saveAny reflect.Value
sv reflect.Value
tinfo *typeInfo
err error
)
switch v := val; v.Kind() {
default:
return errors.New("unknown type " + v.Type().String())
case reflect.Interface:
// TODO: For now, simply ignore the field. In the near
// future we may choose to unmarshal the start
// element on it, if not nil.
return p.Skip()
case reflect.Slice:
typ := v.Type()
if typ.Elem().Kind() == reflect.Uint8 {
// []byte
saveData = v
break
}
// Slice of element values.
// Grow slice.
n := v.Len()
if n >= v.Cap() {
ncap := 2 * n
if ncap < 4 {
ncap = 4
}
new := reflect.MakeSlice(typ, n, ncap)
reflect.Copy(new, v)
v.Set(new)
}
v.SetLen(n + 1)
// Recur to read element into slice.
if err := p.unmarshal(v.Index(n), start); err != nil {
v.SetLen(n)
return err
}
return nil
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String:
saveData = v
case reflect.Struct:
typ := v.Type()
if typ == nameType {
v.Set(reflect.ValueOf(start.Name))
break
}
sv = v
tinfo, err = getTypeInfo(typ)
if err != nil {
return err
}
// Validate and assign element name.
if tinfo.xmlname != nil {
finfo := tinfo.xmlname
if finfo.name != "" && finfo.name != start.Name.Local {
return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">")
}
if finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have "
if start.Name.Space == "" {
e += "no name space"
} else {
e += start.Name.Space
}
return UnmarshalError(e)
}
fv := finfo.value(sv)
if _, ok := fv.Interface().(Name); ok {
fv.Set(reflect.ValueOf(start.Name))
}
}
// Assign attributes.
// Also, determine whether we need to save character data or comments.
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
switch finfo.flags & fMode {
case fAttr:
strv := finfo.value(sv)
// Look for attribute.
for _, a := range start.Attr {
if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) {
if err := p.unmarshalAttr(strv, a); err != nil {
return err
}
break
}
}
case fCharData:
if !saveData.IsValid() {
saveData = finfo.value(sv)
}
case fComment:
if !saveComment.IsValid() {
saveComment = finfo.value(sv)
}
case fAny, fAny | fElement:
if !saveAny.IsValid() {
saveAny = finfo.value(sv)
}
case fInnerXml:
if !saveXML.IsValid() {
saveXML = finfo.value(sv)
if p.saved == nil {
saveXMLIndex = 0
p.saved = new(bytes.Buffer)
} else {
saveXMLIndex = p.savedOffset()
}
}
}
}
}
// Find end element.
// Process sub-elements along the way.
Loop:
for {
var savedOffset int
if saveXML.IsValid() {
savedOffset = p.savedOffset()
}
tok, err := p.Token()
if err != nil {
return err
}
switch t := tok.(type) {
case StartElement:
consumed := false
if sv.IsValid() {
consumed, err = p.unmarshalPath(tinfo, sv, nil, &t)
if err != nil {
return err
}
if !consumed && saveAny.IsValid() {
consumed = true
if err := p.unmarshal(saveAny, &t); err != nil {
return err
}
}
}
if !consumed {
if err := p.Skip(); err != nil {
return err
}
}
case EndElement:
if saveXML.IsValid() {
saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset]
if saveXMLIndex == 0 {
p.saved = nil
}
}
break Loop
case CharData:
if saveData.IsValid() {
data = append(data, t...)
}
case Comment:
if saveComment.IsValid() {
comment = append(comment, t...)
}
}
}
if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) {
if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
if saveData.IsValid() && saveData.CanAddr() {
pv := saveData.Addr()
if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) {
if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil {
return err
}
saveData = reflect.Value{}
}
}
if err := copyValue(saveData, data); err != nil {
return err
}
switch t := saveComment; t.Kind() {
case reflect.String:
t.SetString(string(comment))
case reflect.Slice:
t.Set(reflect.ValueOf(comment))
}
switch t := saveXML; t.Kind() {
case reflect.String:
t.SetString(string(saveXMLData))
case reflect.Slice:
t.Set(reflect.ValueOf(saveXMLData))
}
return nil
}
func copyValue(dst reflect.Value, src []byte) (err error) {
dst0 := dst
if dst.Kind() == reflect.Ptr {
if dst.IsNil() {
dst.Set(reflect.New(dst.Type().Elem()))
}
dst = dst.Elem()
}
// Save accumulated data.
switch dst.Kind() {
case reflect.Invalid:
// Probably a comment.
default:
return errors.New("cannot unmarshal into " + dst0.Type().String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetInt(itmp)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits())
if err != nil {
return err
}
dst.SetUint(utmp)
case reflect.Float32, reflect.Float64:
ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits())
if err != nil {
return err
}
dst.SetFloat(ftmp)
case reflect.Bool:
value, err := strconv.ParseBool(strings.TrimSpace(string(src)))
if err != nil {
return err
}
dst.SetBool(value)
case reflect.String:
dst.SetString(string(src))
case reflect.Slice:
if len(src) == 0 {
// non-nil to flag presence
src = []byte{}
}
dst.SetBytes(src)
}
return nil
}
// unmarshalPath walks down an XML structure looking for wanted
// paths, and calls unmarshal on them.
// The consumed result tells whether XML elements have been consumed
// from the Decoder until start's matching end element, or if it's
// still untouched because start is uninteresting for sv's fields.
func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) {
recurse := false
Loop:
for i := range tinfo.fields {
finfo := &tinfo.fields[i]
if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space {
continue
}
for j := range parents {
if parents[j] != finfo.parents[j] {
continue Loop
}
}
if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local {
// It's a perfect match, unmarshal the field.
return true, p.unmarshal(finfo.value(sv), start)
}
if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local {
// It's a prefix for the field. Break and recurse
// since it's not ok for one field path to be itself
// the prefix for another field path.
recurse = true
// We can reuse the same slice as long as we
// don't try to append to it.
parents = finfo.parents[:len(parents)+1]
break
}
}
if !recurse {
// We have no business with this element.
return false, nil
}
// The element is not a perfect match for any field, but one
// or more fields have the path to this element as a parent
// prefix. Recurse and attempt to match these.
for {
var tok Token
tok, err = p.Token()
if err != nil {
return true, err
}
switch t := tok.(type) {
case StartElement:
consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t)
if err != nil {
return true, err
}
if !consumed2 {
if err := p.Skip(); err != nil {
return true, err
}
}
case EndElement:
return true, nil
}
}
}
// Skip reads tokens until it has consumed the end element
// matching the most recent start element already consumed.
// It recurs if it encounters a start element, so it can be used to
// skip nested structures.
// It returns nil if it finds an end element matching the start
// element; otherwise it returns an error describing the problem.
func (d *Decoder) Skip() error {
for {
tok, err := d.Token()
if err != nil {
return err
}
switch tok.(type) {
case StartElement:
if err := d.Skip(); err != nil {
return err
}
case EndElement:
return nil
}
}
}

View File

@ -0,0 +1,371 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xml
import (
"fmt"
"reflect"
"strings"
"sync"
)
// typeInfo holds details for the xml representation of a type.
type typeInfo struct {
xmlname *fieldInfo
fields []fieldInfo
}
// fieldInfo holds details for the xml representation of a single field.
type fieldInfo struct {
idx []int
name string
xmlns string
flags fieldFlags
parents []string
}
type fieldFlags int
const (
fElement fieldFlags = 1 << iota
fAttr
fCharData
fInnerXml
fComment
fAny
fOmitEmpty
fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny
)
var tinfoMap = make(map[reflect.Type]*typeInfo)
var tinfoLock sync.RWMutex
var nameType = reflect.TypeOf(Name{})
// getTypeInfo returns the typeInfo structure with details necessary
// for marshalling and unmarshalling typ.
func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
tinfoLock.RLock()
tinfo, ok := tinfoMap[typ]
tinfoLock.RUnlock()
if ok {
return tinfo, nil
}
tinfo = &typeInfo{}
if typ.Kind() == reflect.Struct && typ != nameType {
n := typ.NumField()
for i := 0; i < n; i++ {
f := typ.Field(i)
if f.PkgPath != "" || f.Tag.Get("xml") == "-" {
continue // Private field
}
// For embedded structs, embed its fields.
if f.Anonymous {
t := f.Type
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() == reflect.Struct {
inner, err := getTypeInfo(t)
if err != nil {
return nil, err
}
if tinfo.xmlname == nil {
tinfo.xmlname = inner.xmlname
}
for _, finfo := range inner.fields {
finfo.idx = append([]int{i}, finfo.idx...)
if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
return nil, err
}
}
continue
}
}
finfo, err := structFieldInfo(typ, &f)
if err != nil {
return nil, err
}
if f.Name == "XMLName" {
tinfo.xmlname = finfo
continue
}
// Add the field if it doesn't conflict with other fields.
if err := addFieldInfo(typ, tinfo, finfo); err != nil {
return nil, err
}
}
}
tinfoLock.Lock()
tinfoMap[typ] = tinfo
tinfoLock.Unlock()
return tinfo, nil
}
// structFieldInfo builds and returns a fieldInfo for f.
func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
finfo := &fieldInfo{idx: f.Index}
// Split the tag from the xml namespace if necessary.
tag := f.Tag.Get("xml")
if i := strings.Index(tag, " "); i >= 0 {
finfo.xmlns, tag = tag[:i], tag[i+1:]
}
// Parse flags.
tokens := strings.Split(tag, ",")
if len(tokens) == 1 {
finfo.flags = fElement
} else {
tag = tokens[0]
for _, flag := range tokens[1:] {
switch flag {
case "attr":
finfo.flags |= fAttr
case "chardata":
finfo.flags |= fCharData
case "innerxml":
finfo.flags |= fInnerXml
case "comment":
finfo.flags |= fComment
case "any":
finfo.flags |= fAny
case "omitempty":
finfo.flags |= fOmitEmpty
}
}
// Validate the flags used.
valid := true
switch mode := finfo.flags & fMode; mode {
case 0:
finfo.flags |= fElement
case fAttr, fCharData, fInnerXml, fComment, fAny:
if f.Name == "XMLName" || tag != "" && mode != fAttr {
valid = false
}
default:
// This will also catch multiple modes in a single field.
valid = false
}
if finfo.flags&fMode == fAny {
finfo.flags |= fElement
}
if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 {
valid = false
}
if !valid {
return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
}
// Use of xmlns without a name is not allowed.
if finfo.xmlns != "" && tag == "" {
return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q",
f.Name, typ, f.Tag.Get("xml"))
}
if f.Name == "XMLName" {
// The XMLName field records the XML element name. Don't
// process it as usual because its name should default to
// empty rather than to the field name.
finfo.name = tag
return finfo, nil
}
if tag == "" {
// If the name part of the tag is completely empty, get
// default from XMLName of underlying struct if feasible,
// or field name otherwise.
if xmlname := lookupXMLName(f.Type); xmlname != nil {
finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name
} else {
finfo.name = f.Name
}
return finfo, nil
}
if finfo.xmlns == "" && finfo.flags&fAttr == 0 {
// If it's an element no namespace specified, get the default
// from the XMLName of enclosing struct if possible.
if xmlname := lookupXMLName(typ); xmlname != nil {
finfo.xmlns = xmlname.xmlns
}
}
// Prepare field name and parents.
parents := strings.Split(tag, ">")
if parents[0] == "" {
parents[0] = f.Name
}
if parents[len(parents)-1] == "" {
return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ)
}
finfo.name = parents[len(parents)-1]
if len(parents) > 1 {
if (finfo.flags & fElement) == 0 {
return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ","))
}
finfo.parents = parents[:len(parents)-1]
}
// If the field type has an XMLName field, the names must match
// so that the behavior of both marshalling and unmarshalling
// is straightforward and unambiguous.
if finfo.flags&fElement != 0 {
ftyp := f.Type
xmlname := lookupXMLName(ftyp)
if xmlname != nil && xmlname.name != finfo.name {
return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName",
finfo.name, typ, f.Name, xmlname.name, ftyp)
}
}
return finfo, nil
}
// lookupXMLName returns the fieldInfo for typ's XMLName field
// in case it exists and has a valid xml field tag, otherwise
// it returns nil.
func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) {
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
return nil
}
for i, n := 0, typ.NumField(); i < n; i++ {
f := typ.Field(i)
if f.Name != "XMLName" {
continue
}
finfo, err := structFieldInfo(typ, &f)
if finfo.name != "" && err == nil {
return finfo
}
// Also consider errors as a non-existent field tag
// and let getTypeInfo itself report the error.
break
}
return nil
}
func min(a, b int) int {
if a <= b {
return a
}
return b
}
// addFieldInfo adds finfo to tinfo.fields if there are no
// conflicts, or if conflicts arise from previous fields that were
// obtained from deeper embedded structures than finfo. In the latter
// case, the conflicting entries are dropped.
// A conflict occurs when the path (parent + name) to a field is
// itself a prefix of another path, or when two paths match exactly.
// It is okay for field paths to share a common, shorter prefix.
func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
var conflicts []int
Loop:
// First, figure all conflicts. Most working code will have none.
for i := range tinfo.fields {
oldf := &tinfo.fields[i]
if oldf.flags&fMode != newf.flags&fMode {
continue
}
if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns {
continue
}
minl := min(len(newf.parents), len(oldf.parents))
for p := 0; p < minl; p++ {
if oldf.parents[p] != newf.parents[p] {
continue Loop
}
}
if len(oldf.parents) > len(newf.parents) {
if oldf.parents[len(newf.parents)] == newf.name {
conflicts = append(conflicts, i)
}
} else if len(oldf.parents) < len(newf.parents) {
if newf.parents[len(oldf.parents)] == oldf.name {
conflicts = append(conflicts, i)
}
} else {
if newf.name == oldf.name {
conflicts = append(conflicts, i)
}
}
}
// Without conflicts, add the new field and return.
if conflicts == nil {
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// If any conflict is shallower, ignore the new field.
// This matches the Go field resolution on embedding.
for _, i := range conflicts {
if len(tinfo.fields[i].idx) < len(newf.idx) {
return nil
}
}
// Otherwise, if any of them is at the same depth level, it's an error.
for _, i := range conflicts {
oldf := &tinfo.fields[i]
if len(oldf.idx) == len(newf.idx) {
f1 := typ.FieldByIndex(oldf.idx)
f2 := typ.FieldByIndex(newf.idx)
return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")}
}
}
// Otherwise, the new field is shallower, and thus takes precedence,
// so drop the conflicting fields from tinfo and append the new one.
for c := len(conflicts) - 1; c >= 0; c-- {
i := conflicts[c]
copy(tinfo.fields[i:], tinfo.fields[i+1:])
tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
}
tinfo.fields = append(tinfo.fields, *newf)
return nil
}
// A TagPathError represents an error in the unmarshalling process
// caused by the use of field tags with conflicting paths.
type TagPathError struct {
Struct reflect.Type
Field1, Tag1 string
Field2, Tag2 string
}
func (e *TagPathError) Error() string {
return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2)
}
// value returns v's field value corresponding to finfo.
// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
// and dereferences pointers as necessary.
func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
for i, x := range finfo.idx {
if i > 0 {
t := v.Type()
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
}
}
v = v.Field(x)
}
return v
}

File diff suppressed because it is too large Load Diff

445
server/webdav/lock.go Normal file
View File

@ -0,0 +1,445 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"container/heap"
"errors"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
ErrConfirmationFailed = errors.New("webdav: confirmation failed")
// ErrForbidden is returned by a LockSystem's Unlock method.
ErrForbidden = errors.New("webdav: forbidden")
// ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods.
ErrLocked = errors.New("webdav: locked")
// ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods.
ErrNoSuchLock = errors.New("webdav: no such lock")
)
// Condition can match a WebDAV resource, based on a token or ETag.
// Exactly one of Token and ETag should be non-empty.
type Condition struct {
Not bool
Token string
ETag string
}
// LockSystem manages access to a collection of named resources. The elements
// in a lock name are separated by slash ('/', U+002F) characters, regardless
// of host operating system convention.
type LockSystem interface {
// Confirm confirms that the caller can claim all of the locks specified by
// the given conditions, and that holding the union of all of those locks
// gives exclusive access to all of the named resources. Up to two resources
// can be named. Empty names are ignored.
//
// Exactly one of release and err will be non-nil. If release is non-nil,
// all of the requested locks are held until release is called. Calling
// release does not unlock the lock, in the WebDAV UNLOCK sense, but once
// Confirm has confirmed that a lock claim is valid, that lock cannot be
// Confirmed again until it has been released.
//
// If Confirm returns ErrConfirmationFailed then the Handler will continue
// to try any other set of locks presented (a WebDAV HTTP request can
// present more than one set of locks). If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error)
// Create creates a lock with the given depth, duration, owner and root
// (name). The depth will either be negative (meaning infinite) or zero.
//
// If Create returns ErrLocked then the Handler will write a "423 Locked"
// HTTP status. If it returns any other non-nil error, the Handler will
// write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
//
// The token returned identifies the created lock. It should be an absolute
// URI as defined by RFC 3986, Section 4.3. In particular, it should not
// contain whitespace.
Create(now time.Time, details LockDetails) (token string, err error)
// Refresh refreshes the lock with the given token.
//
// If Refresh returns ErrLocked then the Handler will write a "423 Locked"
// HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write
// a "412 Precondition Failed" HTTP Status. If it returns any other non-nil
// error, the Handler will write a "500 Internal Server Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for
// when to use each error.
Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error)
// Unlock unlocks the lock with the given token.
//
// If Unlock returns ErrForbidden then the Handler will write a "403
// Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler
// will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock
// then the Handler will write a "409 Conflict" HTTP Status. If it returns
// any other non-nil error, the Handler will write a "500 Internal Server
// Error" HTTP status.
//
// See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for
// when to use each error.
Unlock(now time.Time, token string) error
}
// LockDetails are a lock's metadata.
type LockDetails struct {
// Root is the root resource name being locked. For a zero-depth lock, the
// root is the only resource being locked.
Root string
// Duration is the lock timeout. A negative duration means infinite.
Duration time.Duration
// OwnerXML is the verbatim <owner> XML given in a LOCK HTTP request.
//
// TODO: does the "verbatim" nature play well with XML namespaces?
// Does the OwnerXML field need to have more structure? See
// https://codereview.appspot.com/175140043/#msg2
OwnerXML string
// ZeroDepth is whether the lock has zero depth. If it does not have zero
// depth, it has infinite depth.
ZeroDepth bool
}
// NewMemLS returns a new in-memory LockSystem.
func NewMemLS() LockSystem {
return &memLS{
byName: make(map[string]*memLSNode),
byToken: make(map[string]*memLSNode),
gen: uint64(time.Now().Unix()),
}
}
type memLS struct {
mu sync.Mutex
byName map[string]*memLSNode
byToken map[string]*memLSNode
gen uint64
// byExpiry only contains those nodes whose LockDetails have a finite
// Duration and are yet to expire.
byExpiry byExpiry
}
func (m *memLS) nextToken() string {
m.gen++
return strconv.FormatUint(m.gen, 10)
}
func (m *memLS) collectExpiredNodes(now time.Time) {
for len(m.byExpiry) > 0 {
if now.Before(m.byExpiry[0].expiry) {
break
}
m.remove(m.byExpiry[0])
}
}
func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
var n0, n1 *memLSNode
if name0 != "" {
if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil {
return nil, ErrConfirmationFailed
}
}
if name1 != "" {
if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil {
return nil, ErrConfirmationFailed
}
}
// Don't hold the same node twice.
if n1 == n0 {
n1 = nil
}
if n0 != nil {
m.hold(n0)
}
if n1 != nil {
m.hold(n1)
}
return func() {
m.mu.Lock()
defer m.mu.Unlock()
if n1 != nil {
m.unhold(n1)
}
if n0 != nil {
m.unhold(n0)
}
}, nil
}
// lookup returns the node n that locks the named resource, provided that n
// matches at least one of the given conditions and that lock isn't held by
// another party. Otherwise, it returns nil.
//
// n may be a parent of the named resource, if n is an infinite depth lock.
func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) {
// TODO: support Condition.Not and Condition.ETag.
for _, c := range conditions {
n = m.byToken[c.Token]
if n == nil || n.held {
continue
}
if name == n.details.Root {
return n
}
if n.details.ZeroDepth {
continue
}
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
return n
}
}
return nil
}
func (m *memLS) hold(n *memLSNode) {
if n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = true
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func (m *memLS) unhold(n *memLSNode) {
if !n.held {
panic("webdav: memLS inconsistent held state")
}
n.held = false
if n.details.Duration >= 0 {
heap.Push(&m.byExpiry, n)
}
}
func (m *memLS) Create(now time.Time, details LockDetails) (string, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
details.Root = slashClean(details.Root)
if !m.canCreate(details.Root, details.ZeroDepth) {
return "", ErrLocked
}
n := m.create(details.Root)
n.token = m.nextToken()
m.byToken[n.token] = n
n.details = details
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.token, nil
}
func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return LockDetails{}, ErrNoSuchLock
}
if n.held {
return LockDetails{}, ErrLocked
}
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
n.details.Duration = duration
if n.details.Duration >= 0 {
n.expiry = now.Add(n.details.Duration)
heap.Push(&m.byExpiry, n)
}
return n.details, nil
}
func (m *memLS) Unlock(now time.Time, token string) error {
m.mu.Lock()
defer m.mu.Unlock()
m.collectExpiredNodes(now)
n := m.byToken[token]
if n == nil {
return ErrNoSuchLock
}
if n.held {
return ErrLocked
}
m.remove(n)
return nil
}
func (m *memLS) canCreate(name string, zeroDepth bool) bool {
return walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
return true
}
if first {
if n.token != "" {
// The target node is already locked.
return false
}
if !zeroDepth {
// The requested lock depth is infinite, and the fact that n exists
// (n != nil) means that a descendent of the target node is locked.
return false
}
} else if n.token != "" && !n.details.ZeroDepth {
// An ancestor of the target node is locked with infinite depth.
return false
}
return true
})
}
func (m *memLS) create(name string) (ret *memLSNode) {
walkToRoot(name, func(name0 string, first bool) bool {
n := m.byName[name0]
if n == nil {
n = &memLSNode{
details: LockDetails{
Root: name0,
},
byExpiryIndex: -1,
}
m.byName[name0] = n
}
n.refCount++
if first {
ret = n
}
return true
})
return ret
}
func (m *memLS) remove(n *memLSNode) {
delete(m.byToken, n.token)
n.token = ""
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
x := m.byName[name0]
x.refCount--
if x.refCount == 0 {
delete(m.byName, name0)
}
return true
})
if n.byExpiryIndex >= 0 {
heap.Remove(&m.byExpiry, n.byExpiryIndex)
}
}
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
for first := true; ; first = false {
if !f(name, first) {
return false
}
if name == "/" {
break
}
name = name[:strings.LastIndex(name, "/")]
if name == "" {
name = "/"
}
}
return true
}
type memLSNode struct {
// details are the lock metadata. Even if this node's name is not explicitly locked,
// details.Root will still equal the node's name.
details LockDetails
// token is the unique identifier for this node's lock. An empty token means that
// this node is not explicitly locked.
token string
// refCount is the number of self-or-descendent nodes that are explicitly locked.
refCount int
// expiry is when this node's lock expires.
expiry time.Time
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
// if this node does not expire, or has expired.
byExpiryIndex int
// held is whether this node's lock is actively held by a Confirm call.
held bool
}
type byExpiry []*memLSNode
func (b *byExpiry) Len() int {
return len(*b)
}
func (b *byExpiry) Less(i, j int) bool {
return (*b)[i].expiry.Before((*b)[j].expiry)
}
func (b *byExpiry) Swap(i, j int) {
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
(*b)[i].byExpiryIndex = i
(*b)[j].byExpiryIndex = j
}
func (b *byExpiry) Push(x interface{}) {
n := x.(*memLSNode)
n.byExpiryIndex = len(*b)
*b = append(*b, n)
}
func (b *byExpiry) Pop() interface{} {
i := len(*b) - 1
n := (*b)[i]
(*b)[i] = nil
n.byExpiryIndex = -1
*b = (*b)[:i]
return n
}
const infiniteTimeout = -1
// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is
// empty, an infiniteTimeout is returned.
func parseTimeout(s string) (time.Duration, error) {
if s == "" {
return infiniteTimeout, nil
}
if i := strings.IndexByte(s, ','); i >= 0 {
s = s[:i]
}
s = strings.TrimSpace(s)
if s == "Infinite" {
return infiniteTimeout, nil
}
const pre = "Second-"
if !strings.HasPrefix(s, pre) {
return 0, errInvalidTimeout
}
s = s[len(pre):]
if s == "" || s[0] < '0' || '9' < s[0] {
return 0, errInvalidTimeout
}
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || 1<<32-1 < n {
return 0, errInvalidTimeout
}
return time.Duration(n) * time.Second, nil
}

413
server/webdav/prop.go Normal file
View File

@ -0,0 +1,413 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
import (
"bytes"
"context"
"encoding/xml"
"errors"
"fmt"
"net/http"
"strconv"
"time"
)
type FileInfo interface {
GetSize() uint64
GetName() string
ModTime() time.Time
IsDir() bool
//GetPosition() string
}
// Proppatch describes a property update instruction as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH
type Proppatch struct {
// Remove specifies whether this patch removes properties. If it does not
// remove them, it sets them.
Remove bool
// Props contains the properties to be set or removed.
Props []Property
}
// Propstat describes a XML propstat element as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
type Propstat struct {
// Props contains the properties for which Status applies.
Props []Property
// Status defines the HTTP status code of the properties in Prop.
// Allowed values include, but are not limited to the WebDAV status
// code extensions for HTTP/1.1.
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
Status int
// XMLError contains the XML representation of the optional error element.
// XML content within this field must not rely on any predefined
// namespace declarations or prefixes. If empty, the XML error element
// is omitted.
XMLError string
// ResponseDescription contains the contents of the optional
// responsedescription field. If empty, the XML element is omitted.
ResponseDescription string
}
// makePropstats returns a slice containing those of x and y whose Props slice
// is non-empty. If both are empty, it returns a slice containing an otherwise
// zero Propstat whose HTTP status code is 200 OK.
func makePropstats(x, y Propstat) []Propstat {
pstats := make([]Propstat, 0, 2)
if len(x.Props) != 0 {
pstats = append(pstats, x)
}
if len(y.Props) != 0 {
pstats = append(pstats, y)
}
if len(pstats) == 0 {
pstats = append(pstats, Propstat{
Status: http.StatusOK,
})
}
return pstats
}
// DeadPropsHolder holds the dead properties of a resource.
//
// Dead properties are those properties that are explicitly defined. In
// comparison, live properties, such as DAV:getcontentlength, are implicitly
// defined by the underlying resource, and cannot be explicitly overridden or
// removed. See the Terminology section of
// http://www.webdav.org/specs/rfc4918.html#rfc.section.3
//
// There is a whitelist of the names of live properties. This package handles
// all live properties, and will only pass non-whitelisted names to the Patch
// method of DeadPropsHolder implementations.
type DeadPropsHolder interface {
// DeadProps returns a copy of the dead properties held.
DeadProps() (map[xml.Name]Property, error)
// Patch patches the dead properties held.
//
// Patching is atomic; either all or no patches succeed. It returns (nil,
// non-nil) if an internal server error occurred, otherwise the Propstats
// collectively contain one Property for each proposed patch Property. If
// all patches succeed, Patch returns a slice of length one and a Propstat
// element with a 200 OK HTTP status code. If none succeed, for reasons
// other than an internal server error, no Propstat has status 200 OK.
//
// For more details on when various HTTP status codes apply, see
// http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status
Patch([]Proppatch) ([]Propstat, error)
}
// liveProps contains all supported, protected DAV: properties.
var liveProps = map[xml.Name]struct {
// findFn implements the propfind function of this property. If nil,
// it indicates a hidden property.
findFn func(context.Context, *FileSystem, LockSystem, string, FileInfo) (string, error)
// dir is true if the property applies to directories.
dir bool
}{
{Space: "DAV:", Local: "resourcetype"}: {
findFn: findResourceType,
dir: true,
},
{Space: "DAV:", Local: "displayname"}: {
findFn: findDisplayName,
dir: true,
},
{Space: "DAV:", Local: "getcontentlength"}: {
findFn: findContentLength,
dir: false,
},
{Space: "DAV:", Local: "getlastmodified"}: {
findFn: findLastModified,
// http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified
// suggests that getlastmodified should only apply to GETable
// resources, and this package does not support GET on directories.
//
// Nonetheless, some WebDAV clients expect child directories to be
// sortable by getlastmodified date, so this value is true, not false.
// See golang.org/issue/15334.
dir: true,
},
{Space: "DAV:", Local: "creationdate"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontentlanguage"}: {
findFn: nil,
dir: false,
},
{Space: "DAV:", Local: "getcontenttype"}: {
findFn: findContentType,
dir: false,
},
{Space: "DAV:", Local: "getetag"}: {
findFn: findETag,
// findETag implements ETag as the concatenated hex values of a file's
// modification time and size. This is not a reliable synchronization
// mechanism for directories, so we do not advertise getetag for DAV
// collections.
dir: false,
},
// TODO: The lockdiscovery property requires LockSystem to list the
// active locks on a resource.
{Space: "DAV:", Local: "lockdiscovery"}: {},
{Space: "DAV:", Local: "supportedlock"}: {
findFn: findSupportedLock,
dir: true,
},
}
// TODO(nigeltao) merge props and allprop?
// Props returns the status of the properties named pnames for resource name.
//
// Each Propstat has a unique status and each property name will only be part
// of one Propstat element.
func props(ctx context.Context, fs *FileSystem, ls LockSystem, fi FileInfo, pnames []xml.Name) ([]Propstat, error) {
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
pstatOK := Propstat{Status: http.StatusOK}
pstatNotFound := Propstat{Status: http.StatusNotFound}
for _, pn := range pnames {
// If this file has dead properties, check if they contain pn.
if dp, ok := deadProps[pn]; ok {
pstatOK.Props = append(pstatOK.Props, dp)
continue
}
// Otherwise, it must either be a live property or we don't know it.
if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) {
innerXML, err := prop.findFn(ctx, fs, ls, fi.GetName(), fi)
if err != nil {
return nil, err
}
pstatOK.Props = append(pstatOK.Props, Property{
XMLName: pn,
InnerXML: []byte(innerXML),
})
} else {
pstatNotFound.Props = append(pstatNotFound.Props, Property{
XMLName: pn,
})
}
}
return makePropstats(pstatOK, pstatNotFound), nil
}
// Propnames returns the property names defined for resource name.
func propnames(ctx context.Context, fs *FileSystem, ls LockSystem, fi FileInfo) ([]xml.Name, error) {
isDir := fi.IsDir()
var deadProps map[xml.Name]Property
pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps))
for pn, prop := range liveProps {
if prop.findFn != nil && (prop.dir || !isDir) {
pnames = append(pnames, pn)
}
}
return pnames, nil
}
// Allprop returns the properties defined for resource name and the properties
// named in include.
//
// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined
// within the RFC plus dead properties. Other live properties should only be
// returned if they are named in 'include'.
//
// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
func allprop(ctx context.Context, fs *FileSystem, ls LockSystem, info FileInfo, include []xml.Name) ([]Propstat, error) {
pnames, err := propnames(ctx, fs, ls, info)
if err != nil {
return nil, err
}
// Add names from include if they are not already covered in pnames.
nameset := make(map[xml.Name]bool)
for _, pn := range pnames {
nameset[pn] = true
}
for _, pn := range include {
if !nameset[pn] {
pnames = append(pnames, pn)
}
}
return props(ctx, fs, ls, info, pnames)
}
// Patch patches the properties of resource name. The return values are
// constrained in the same manner as DeadPropsHolder.Patch.
func patch(ctx context.Context, fs *FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) {
conflict := false
loop:
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
conflict = true
break loop
}
}
}
if conflict {
pstatForbidden := Propstat{
Status: http.StatusForbidden,
XMLError: `<D:cannot-modify-protected-property xmlns:D="DAV:"/>`,
}
pstatFailedDep := Propstat{
Status: StatusFailedDependency,
}
for _, patch := range patches {
for _, p := range patch.Props {
if _, ok := liveProps[p.XMLName]; ok {
pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName})
} else {
pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName})
}
}
}
return makePropstats(pstatForbidden, pstatFailedDep), nil
}
// The file doesn't implement the optional DeadPropsHolder interface, so
// all patches are forbidden.
pstat := Propstat{Status: http.StatusOK}
for _, patch := range patches {
for _, p := range patch.Props {
pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName})
}
}
return []Propstat{pstat}, nil
}
func escapeXML(s string) string {
for i := 0; i < len(s); i++ {
// As an optimization, if s contains only ASCII letters, digits or a
// few special characters, the escaped value is s itself and we don't
// need to allocate a buffer and convert between string and []byte.
switch c := s[i]; {
case c == ' ' || c == '_' ||
('+' <= c && c <= '9') || // Digits as well as + , - . and /
('A' <= c && c <= 'Z') ||
('a' <= c && c <= 'z'):
continue
}
// Otherwise, go through the full escaping process.
var buf bytes.Buffer
xml.EscapeText(&buf, []byte(s))
return buf.String()
}
return s
}
func findResourceType(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
if fi.IsDir() {
return `<D:collection xmlns:D="DAV:"/>`, nil
}
return "", nil
}
func findDisplayName(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
if slashClean(name) == "/" {
// Hide the real name of a possibly prefixed root directory.
return "", nil
}
return escapeXML(fi.GetName()), nil
}
func findContentLength(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
return strconv.FormatUint(fi.GetSize(), 10), nil
}
func findLastModified(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
return fi.ModTime().UTC().Format(http.TimeFormat), nil
}
// ErrNotImplemented should be returned by optional interfaces if they
// want the original implementation to be used.
var ErrNotImplemented = errors.New("not implemented")
// ContentTyper is an optional interface for the os.FileInfo
// objects returned by the FileSystem.
//
// If this interface is defined then it will be used to read the
// content type from the object.
//
// If this interface is not defined the file will be opened and the
// content type will be guessed from the initial contents of the file.
type ContentTyper interface {
// ContentType returns the content type for the file.
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ContentType(ctx context.Context) (string, error)
}
func findContentType(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
//if do, ok := fi.(ContentTyper); ok {
// ctype, err := do.ContentType(ctx)
// if err != ErrNotImplemented {
// return ctype, err
// }
//}
//f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0)
//if err != nil {
// return "", err
//}
//defer f.Close()
//// This implementation is based on serveContent's code in the standard net/http package.
//ctype := mime.TypeByExtension(filepath.Ext(name))
//if ctype != "" {
// return ctype, nil
//}
//// Read a chunk to decide between utf-8 text and binary.
//var buf [512]byte
//n, err := io.ReadFull(f, buf[:])
//if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// return "", err
//}
//ctype = http.DetectContentType(buf[:n])
//// Rewind file.
//_, err = f.Seek(0, os.SEEK_SET)
//return ctype, err
return "", nil
}
// ETager is an optional interface for the os.FileInfo objects
// returned by the FileSystem.
//
// If this interface is defined then it will be used to read the ETag
// for the object.
//
// If this interface is not defined an ETag will be computed using the
// ModTime() and the Size() methods of the os.FileInfo object.
type ETager interface {
// ETag returns an ETag for the file. This should be of the
// form "value" or W/"value"
//
// If this returns error ErrNotImplemented then the error will
// be ignored and the base implementation will be used
// instead.
ETag(ctx context.Context) (string, error)
}
func findETag(ctx context.Context, fs *FileSystem, ls LockSystem, reqPath string, fi FileInfo) (string, error) {
return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.GetSize()), nil
}
func findSupportedLock(ctx context.Context, fs *FileSystem, ls LockSystem, name string, fi FileInfo) (string, error) {
return `` +
`<D:lockentry xmlns:D="DAV:">` +
`<D:lockscope><D:exclusive/></D:lockscope>` +
`<D:locktype><D:write/></D:locktype>` +
`</D:lockentry>`, nil
}

731
server/webdav/webdav.go Normal file
View File

@ -0,0 +1,731 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package webdav provides a WebDAV server implementation.
package webdav // import "golang.org/x/net/webdav"
import (
"context"
"errors"
"fmt"
"github.com/Xhofe/alist/utils"
log "github.com/sirupsen/logrus"
"net/http"
"net/url"
"path"
"strings"
"time"
)
type Handler struct {
// Prefix is the URL path prefix to strip from WebDAV resource paths.
Prefix string
// LockSystem is the lock management system.
LockSystem LockSystem
// Logger is an optional error logger. If non-nil, it will be called
// for all HTTP requests.
Logger func(*http.Request, error)
}
func (h *Handler) stripPrefix(p string) (string, int, error) {
if h.Prefix == "" {
return p, http.StatusOK, nil
}
prefix := h.Prefix
if r := strings.TrimPrefix(p, prefix); len(r) < len(p) {
if len(r) == 0 {
r = "/"
}
return utils.RemoveLastSlash(r), http.StatusOK, nil
}
return p, http.StatusNotFound, errPrefixMismatch
}
// isPathExist 路径是否存在
func isPathExist(ctx context.Context, fs *FileSystem, path string) (bool, FileInfo) {
file, err := fs.File(path)
if err != nil {
return false, nil
}
return true, file
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, fs *FileSystem) {
status, err := http.StatusBadRequest, errUnsupportedMethod
if h.LockSystem == nil {
status, err = http.StatusInternalServerError, errNoLockSystem
} else {
switch r.Method {
case "OPTIONS":
status, err = h.handleOptions(w, r, fs)
case "GET", "HEAD", "POST":
status, err = h.handleGetHeadPost(w, r, fs)
case "DELETE":
status, err = h.handleDelete(w, r, fs)
case "PUT":
status, err = h.handlePut(w, r, fs)
case "MKCOL":
status, err = h.handleMkcol(w, r, fs)
case "COPY", "MOVE":
status, err = h.handleCopyMove(w, r, fs)
case "LOCK":
status, err = h.handleLock(w, r, fs)
case "UNLOCK":
status, err = h.handleUnlock(w, r, fs)
case "PROPFIND":
status, err = h.handlePropfind(w, r, fs)
case "PROPPATCH":
status, err = h.handleProppatch(w, r, fs)
}
}
if err != nil {
log.Error(err)
}
if status != 0 {
w.WriteHeader(status)
if status != http.StatusNoContent {
w.Write([]byte(StatusText(status)))
}
}
if h.Logger != nil {
h.Logger(r, err)
}
}
// OK
func (h *Handler) lock(now time.Time, root string, fs *FileSystem) (token string, status int, err error) {
token, err = h.LockSystem.Create(now, LockDetails{
Root: root,
Duration: infiniteTimeout,
ZeroDepth: true,
})
if err != nil {
if err == ErrLocked {
return "", StatusLocked, err
}
return "", http.StatusInternalServerError, err
}
return token, 0, nil
}
// ok
func (h *Handler) confirmLocks(r *http.Request, src, dst string, fs *FileSystem) (release func(), status int, err error) {
hdr := r.Header.Get("If")
if hdr == "" {
// An empty If header means that the client hasn't previously created locks.
// Even if this client doesn't care about locks, we still need to check that
// the resources aren't locked by another client, so we create temporary
// locks that would conflict with another client's locks. These temporary
// locks are unlocked at the end of the HTTP request.
now, srcToken, dstToken := time.Now(), "", ""
if src != "" {
srcToken, status, err = h.lock(now, src, fs)
if err != nil {
return nil, status, err
}
}
if dst != "" {
dstToken, status, err = h.lock(now, dst, fs)
if err != nil {
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
return nil, status, err
}
}
return func() {
if dstToken != "" {
h.LockSystem.Unlock(now, dstToken)
}
if srcToken != "" {
h.LockSystem.Unlock(now, srcToken)
}
}, 0, nil
}
ih, ok := parseIfHeader(hdr)
if !ok {
return nil, http.StatusBadRequest, errInvalidIfHeader
}
// ih is a disjunction (OR) of ifLists, so any ifList will do.
for _, l := range ih.lists {
lsrc := l.resourceTag
if lsrc == "" {
lsrc = src
} else {
u, err := url.Parse(lsrc)
if err != nil {
continue
}
//if u.Host != r.Host {
// continue
//}
lsrc, status, err = h.stripPrefix(u.Path)
if err != nil {
return nil, status, err
}
}
release, err = h.LockSystem.Confirm(
time.Now(),
lsrc,
dst,
l.conditions...,
)
if err == ErrConfirmationFailed {
continue
}
if err != nil {
return nil, http.StatusInternalServerError, err
}
return release, 0, nil
}
// Section 10.4.1 says that "If this header is evaluated and all state lists
// fail, then the request must fail with a 412 (Precondition Failed) status."
// We follow the spec even though the cond_put_corrupt_token test case from
// the litmus test warns on seeing a 412 instead of a 423 (Locked).
return nil, http.StatusPreconditionFailed, ErrLocked
}
//OK
func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
allow := "OPTIONS, LOCK, PUT, MKCOL"
if exist, fi := isPathExist(ctx, fs, reqPath); exist {
log.Debugf("fi: %+v", fi)
if fi.IsDir() {
allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND"
} else {
allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT"
}
}
w.Header().Set("Allow", allow)
// http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes
w.Header().Set("DAV", "1, 2")
// http://msdn.microsoft.com/en-au/library/cc250217.aspx
w.Header().Set("MS-Author-Via", "DAV")
return 0, nil
}
// OK
func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
exist, file := isPathExist(ctx, fs, reqPath)
if !exist {
return http.StatusNotFound, nil
}
etag, err := findETag(ctx, fs, h.LockSystem, reqPath, file)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
log.Debugf("url: %+v", r.URL)
host := r.Host
link, err := fs.Link(host, reqPath)
if err != nil {
return http.StatusInternalServerError, err
}
http.Redirect(w, r, link, 302)
return 0, nil
}
// OK
func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "", fs)
if err != nil {
return status, err
}
defer release()
//ctx := r.Context()
//// 尝试作为文件删除
//if ok, file := fs.IsFileExist(reqPath); ok {
// if err := fs.Delete(ctx, []uint{}, []uint{file.ID}, false); err != nil {
// return http.StatusMethodNotAllowed, err
// }
// return http.StatusNoContent, nil
//}
//
//// 尝试作为目录删除
//if ok, folder := fs.IsPathExist(reqPath); ok {
// if err := fs.Delete(ctx, []uint{folder.ID}, []uint{}, false); err != nil {
// return http.StatusMethodNotAllowed, err
// }
// return http.StatusNoContent, nil
//}
return http.StatusNotFound, nil
}
// OK
func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "", fs)
if err != nil {
return status, err
}
defer release()
// TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz'
// comments in http.checkEtag.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
etag, err := findETag(ctx, fs, h.LockSystem, reqPath, nil)
if err != nil {
return http.StatusInternalServerError, err
}
w.Header().Set("ETag", etag)
return http.StatusCreated, nil
}
// OK
func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "", fs)
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if r.ContentLength > 0 {
return http.StatusUnsupportedMediaType, nil
}
if strings.Contains(r.UserAgent(), "rclone") {
//if _, ok := ctx.Value(fsctx.IgnoreDirectoryConflictCtx).(bool); !ok {
// ctx = context.WithValue(ctx, fsctx.IgnoreDirectoryConflictCtx, true)
//}
}
if _, err := fs.CreateDirectory(ctx, reqPath); err != nil {
return http.StatusConflict, err
}
return http.StatusCreated, nil
}
// OK
func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
hdr := r.Header.Get("Destination")
if hdr == "" {
return http.StatusBadRequest, errInvalidDestination
}
u, err := url.Parse(hdr)
if err != nil {
return http.StatusBadRequest, errInvalidDestination
}
//if u.Host != "" && u.Host != r.Host {
// return http.StatusBadGateway, errInvalidDestination
//}
src, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
dst, status, err := h.stripPrefix(u.Path)
if err != nil {
return status, err
}
if dst == "" {
return http.StatusBadGateway, errInvalidDestination
}
if dst == src {
return http.StatusForbidden, errDestinationEqualsSource
}
ctx := r.Context()
isExist, target := isPathExist(ctx, fs, src)
if !isExist {
return http.StatusNotFound, nil
}
if r.Method == "COPY" {
// Section 7.5.1 says that a COPY only needs to lock the destination,
// not both destination and source. Strictly speaking, this is racy,
// even though a COPY doesn't modify the source, if a concurrent
// operation modifies the source. However, the litmus test explicitly
// checks that COPYing a locked-by-another source is OK.
release, status, err := h.confirmLocks(r, "", dst, fs)
if err != nil {
return status, err
}
defer release()
// Section 9.8.3 says that "The COPY method on a collection without a Depth
// header must act as if a Depth header with value "infinity" was included".
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.8.3 says that "A client may submit a Depth header on a
// COPY on a collection with a value of "0" or "infinity"."
return http.StatusBadRequest, errInvalidDepth
}
}
return copyFiles(ctx, fs, target, dst, r.Header.Get("Overwrite") != "F", depth, 0)
}
// windows下某些情况下网盘根目录下Office保存文件时附带的锁token只包含源文件
// 此处暂时去除了对dst锁的检查
release, status, err := h.confirmLocks(r, src, "", fs)
if err != nil {
return status, err
}
defer release()
// Section 9.9.2 says that "The MOVE method on a collection must act as if
// a "Depth: infinity" header was used on it. A client must not submit a
// Depth header on a MOVE on a collection with any value but "infinity"."
if hdr := r.Header.Get("Depth"); hdr != "" {
if parseDepth(hdr) != infiniteDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
return moveFiles(ctx, fs, target, dst, r.Header.Get("Overwrite") == "T")
}
// OK
func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request, fs *FileSystem) (retStatus int, retErr error) {
duration, err := parseTimeout(r.Header.Get("Timeout"))
if err != nil {
return http.StatusBadRequest, err
}
li, status, err := readLockInfo(r.Body)
if err != nil {
return status, err
}
//ctx := r.Context()
token, ld, now, created := "", LockDetails{}, time.Now(), false
if li == (lockInfo{}) {
// An empty lockInfo means to refresh the lock.
ih, ok := parseIfHeader(r.Header.Get("If"))
if !ok {
return http.StatusBadRequest, errInvalidIfHeader
}
if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 {
token = ih.lists[0].conditions[0].Token
}
if token == "" {
return http.StatusBadRequest, errInvalidLockToken
}
ld, err = h.LockSystem.Refresh(now, token, duration)
if err != nil {
if err == ErrNoSuchLock {
return http.StatusPreconditionFailed, err
}
return http.StatusInternalServerError, err
}
} else {
// Section 9.10.3 says that "If no Depth header is submitted on a LOCK request,
// then the request MUST act as if a "Depth:infinity" had been submitted."
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth != 0 && depth != infiniteDepth {
// Section 9.10.3 says that "Values other than 0 or infinity must not be
// used with the Depth header on a LOCK method".
return http.StatusBadRequest, errInvalidDepth
}
}
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ld = LockDetails{
Root: reqPath,
Duration: duration,
OwnerXML: li.Owner.InnerXML,
ZeroDepth: depth == 0,
}
token, err = h.LockSystem.Create(now, ld)
if err != nil {
if err == ErrLocked {
return StatusLocked, err
}
return http.StatusInternalServerError, err
}
defer func() {
if retErr != nil {
h.LockSystem.Unlock(now, token)
}
}()
// Create the resource if it didn't previously exist.
//if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil {
// f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
// if err != nil {
// // TODO: detect missing intermediate dirs and return http.StatusConflict?
// return http.StatusInternalServerError, err
// }
// f.Close()
// created = true
//}
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We add angle brackets.
w.Header().Set("Lock-Token", "<"+token+">")
}
w.Header().Set("Content-Type", "application/xml; charset=utf-8")
if created {
// This is "w.WriteHeader(http.StatusCreated)" and not "return
// http.StatusCreated, nil" because we write our own (XML) response to w
// and Handler.ServeHTTP would otherwise write "Created".
w.WriteHeader(http.StatusCreated)
}
writeLockInfo(w, token, ld)
return 0, nil
}
// OK
func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
// http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the
// Lock-Token value is a Coded-URL. We strip its angle brackets.
t := r.Header.Get("Lock-Token")
if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' {
return http.StatusBadRequest, errInvalidLockToken
}
t = t[1 : len(t)-1]
switch err = h.LockSystem.Unlock(time.Now(), t); err {
case nil:
return http.StatusNoContent, err
case ErrForbidden:
return http.StatusForbidden, err
case ErrLocked:
return StatusLocked, err
case ErrNoSuchLock:
return http.StatusConflict, err
default:
return http.StatusInternalServerError, err
}
}
// OK
func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
ctx := r.Context()
ok, fi := isPathExist(ctx, fs, reqPath)
if !ok {
return http.StatusNotFound, err
}
depth := infiniteDepth
if hdr := r.Header.Get("Depth"); hdr != "" {
depth = parseDepth(hdr)
if depth == invalidDepth {
return http.StatusBadRequest, errInvalidDepth
}
}
pf, status, err := readPropfind(r.Body)
if err != nil {
return status, err
}
mw := multistatusWriter{w: w}
walkFn := func(reqPath string, info FileInfo, err error) error {
if err != nil {
return err
}
var pstats []Propstat
if pf.Propname != nil {
pnames, err := propnames(ctx, fs, h.LockSystem, info)
if err != nil {
return err
}
pstat := Propstat{Status: http.StatusOK}
for _, xmlname := range pnames {
pstat.Props = append(pstat.Props, Property{XMLName: xmlname})
}
pstats = append(pstats, pstat)
} else if pf.Allprop != nil {
pstats, err = allprop(ctx, fs, h.LockSystem, info, pf.Prop)
} else {
pstats, err = props(ctx, fs, h.LockSystem, info, pf.Prop)
}
if err != nil {
return err
}
href := path.Join(h.Prefix, reqPath)
if href != "/" && info.IsDir() {
href += "/"
}
return mw.write(makePropstatResponse(href, pstats))
}
walkErr := walkFS(ctx, fs, depth, reqPath, fi, walkFn)
closeErr := mw.close()
if walkErr != nil {
return http.StatusInternalServerError, walkErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request, fs *FileSystem) (status int, err error) {
reqPath, status, err := h.stripPrefix(r.URL.Path)
if err != nil {
return status, err
}
release, status, err := h.confirmLocks(r, reqPath, "", fs)
if err != nil {
return status, err
}
defer release()
ctx := r.Context()
if exist, _ := isPathExist(ctx, fs, reqPath); !exist {
return http.StatusNotFound, nil
}
patches, status, err := readProppatch(r.Body)
if err != nil {
return status, err
}
pstats, err := patch(ctx, fs, h.LockSystem, reqPath, patches)
if err != nil {
return http.StatusInternalServerError, err
}
mw := multistatusWriter{w: w}
writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats))
closeErr := mw.close()
if writeErr != nil {
return http.StatusInternalServerError, writeErr
}
if closeErr != nil {
return http.StatusInternalServerError, closeErr
}
return 0, nil
}
func makePropstatResponse(href string, pstats []Propstat) *response {
resp := response{
Href: []string{(&url.URL{Path: href}).EscapedPath()},
Propstat: make([]propstat, 0, len(pstats)),
}
for _, p := range pstats {
var xmlErr *xmlError
if p.XMLError != "" {
xmlErr = &xmlError{InnerXML: []byte(p.XMLError)}
}
resp.Propstat = append(resp.Propstat, propstat{
Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)),
Prop: p.Props,
ResponseDescription: p.ResponseDescription,
Error: xmlErr,
})
}
return &resp
}
const (
infiniteDepth = -1
invalidDepth = -2
)
// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and
// infiniteDepth. Parsing any other string returns invalidDepth.
//
// Different WebDAV methods have further constraints on valid depths:
// - PROPFIND has no further restrictions, as per section 9.1.
// - COPY accepts only "0" or "infinity", as per section 9.8.3.
// - MOVE accepts only "infinity", as per section 9.9.2.
// - LOCK accepts only "0" or "infinity", as per section 9.10.3.
// These constraints are enforced by the handleXxx methods.
func parseDepth(s string) int {
switch s {
case "0":
return 0
case "1":
return 1
case "infinity":
return infiniteDepth
}
return invalidDepth
}
// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11
const (
StatusMulti = 207
StatusUnprocessableEntity = 422
StatusLocked = 423
StatusFailedDependency = 424
StatusInsufficientStorage = 507
)
func StatusText(code int) string {
switch code {
case StatusMulti:
return "Multi-Status"
case StatusUnprocessableEntity:
return "Unprocessable Entity"
case StatusLocked:
return "Locked"
case StatusFailedDependency:
return "Failed Dependency"
case StatusInsufficientStorage:
return "Insufficient Storage"
}
return http.StatusText(code)
}
var (
errDestinationEqualsSource = errors.New("webdav: destination equals source")
errDirectoryNotEmpty = errors.New("webdav: directory not empty")
errInvalidDepth = errors.New("webdav: invalid depth")
errInvalidDestination = errors.New("webdav: invalid destination")
errInvalidIfHeader = errors.New("webdav: invalid If header")
errInvalidLockInfo = errors.New("webdav: invalid lock info")
errInvalidLockToken = errors.New("webdav: invalid lock token")
errInvalidPropfind = errors.New("webdav: invalid propfind")
errInvalidProppatch = errors.New("webdav: invalid proppatch")
errInvalidResponse = errors.New("webdav: invalid response")
errInvalidTimeout = errors.New("webdav: invalid timeout")
errNoFileSystem = errors.New("webdav: no file system")
errNoLockSystem = errors.New("webdav: no lock system")
errNotADirectory = errors.New("webdav: not a directory")
errPrefixMismatch = errors.New("webdav: prefix mismatch")
errRecursionTooDeep = errors.New("webdav: recursion too deep")
errUnsupportedLockInfo = errors.New("webdav: unsupported lock info")
errUnsupportedMethod = errors.New("webdav: unsupported method")
)

519
server/webdav/xml.go Normal file
View File

@ -0,0 +1,519 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package webdav
// The XML encoding is covered by Section 14.
// http://www.webdav.org/specs/rfc4918.html#xml.element.definitions
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"net/http"
"time"
// As of https://go-review.googlesource.com/#/c/12772/ which was submitted
// in July 2015, this package uses an internal fork of the standard
// library's encoding/xml package, due to changes in the way namespaces
// were encoded. Such changes were introduced in the Go 1.5 cycle, but were
// rolled back in response to https://github.com/golang/go/issues/11841
//
// However, this package's exported API, specifically the Property and
// DeadPropsHolder types, need to refer to the standard library's version
// of the xml.Name type, as code that imports this package cannot refer to
// the internal version.
//
// This file therefore imports both the internal and external versions, as
// ixml and xml, and converts between them.
//
// In the long term, this package should use the standard library's version
// only, and the internal fork deleted, once
// https://github.com/golang/go/issues/13400 is resolved.
ixml "github.com/Xhofe/alist/server/webdav/internal/xml"
)
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo
type lockInfo struct {
XMLName ixml.Name `xml:"lockinfo"`
Exclusive *struct{} `xml:"lockscope>exclusive"`
Shared *struct{} `xml:"lockscope>shared"`
Write *struct{} `xml:"locktype>write"`
Owner owner `xml:"owner"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner
type owner struct {
InnerXML string `xml:",innerxml"`
}
func readLockInfo(r io.Reader) (li lockInfo, status int, err error) {
c := &countingReader{r: r}
if err = ixml.NewDecoder(c).Decode(&li); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to refresh the lock.
// http://www.webdav.org/specs/rfc4918.html#refreshing-locks
return lockInfo{}, 0, nil
}
err = errInvalidLockInfo
}
return lockInfo{}, http.StatusBadRequest, err
}
// We only support exclusive (non-shared) write locks. In practice, these are
// the only types of locks that seem to matter.
if li.Exclusive == nil || li.Shared != nil || li.Write == nil {
return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo
}
return li, 0, nil
}
type countingReader struct {
n int
r io.Reader
}
func (c *countingReader) Read(p []byte) (int, error) {
n, err := c.r.Read(p)
c.n += n
return n, err
}
func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) {
depth := "infinity"
if ld.ZeroDepth {
depth = "0"
}
timeout := ld.Duration / time.Second
return fmt.Fprintf(w, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"+
"<D:prop xmlns:D=\"DAV:\"><D:lockdiscovery><D:activelock>\n"+
" <D:locktype><D:write/></D:locktype>\n"+
" <D:lockscope><D:exclusive/></D:lockscope>\n"+
" <D:depth>%s</D:depth>\n"+
" <D:owner>%s</D:owner>\n"+
" <D:timeout>Second-%d</D:timeout>\n"+
" <D:locktoken><D:href>%s</D:href></D:locktoken>\n"+
" <D:lockroot><D:href>%s</D:href></D:lockroot>\n"+
"</D:activelock></D:lockdiscovery></D:prop>",
depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root),
)
}
func escape(s string) string {
for i := 0; i < len(s); i++ {
switch s[i] {
case '"', '&', '\'', '<', '>':
b := bytes.NewBuffer(nil)
ixml.EscapeText(b, []byte(s))
return b.String()
}
}
return s
}
// Next returns the next token, if any, in the XML stream of d.
// RFC 4918 requires to ignore comments, processing instructions
// and directives.
// http://www.webdav.org/specs/rfc4918.html#property_values
// http://www.webdav.org/specs/rfc4918.html#xml-extensibility
func next(d *ixml.Decoder) (ixml.Token, error) {
for {
t, err := d.Token()
if err != nil {
return t, err
}
switch t.(type) {
case ixml.Comment, ixml.Directive, ixml.ProcInst:
continue
default:
return t, nil
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind)
type propfindProps []xml.Name
// UnmarshalXML appends the property names enclosed within start to pn.
//
// It returns an error if start does not contain any properties or if
// properties contain values. Character data between properties is ignored.
func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
for {
t, err := next(d)
if err != nil {
return err
}
switch t.(type) {
case ixml.EndElement:
if len(*pn) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
name := t.(ixml.StartElement).Name
t, err = next(d)
if err != nil {
return err
}
if _, ok := t.(ixml.EndElement); !ok {
return fmt.Errorf("unexpected token %T", t)
}
*pn = append(*pn, xml.Name(name))
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind
type propfind struct {
XMLName ixml.Name `xml:"DAV: propfind"`
Allprop *struct{} `xml:"DAV: allprop"`
Propname *struct{} `xml:"DAV: propname"`
Prop propfindProps `xml:"DAV: prop"`
Include propfindProps `xml:"DAV: include"`
}
func readPropfind(r io.Reader) (pf propfind, status int, err error) {
c := countingReader{r: r}
if err = ixml.NewDecoder(&c).Decode(&pf); err != nil {
if err == io.EOF {
if c.n == 0 {
// An empty body means to propfind allprop.
// http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND
return propfind{Allprop: new(struct{})}, 0, nil
}
err = errInvalidPropfind
}
return propfind{}, http.StatusBadRequest, err
}
if pf.Allprop == nil && pf.Include != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Prop != nil && pf.Propname != nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil {
return propfind{}, http.StatusBadRequest, errInvalidPropfind
}
return pf, 0, nil
}
// Property represents a single DAV resource property as defined in RFC 4918.
// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties
type Property struct {
// XMLName is the fully qualified name that identifies this property.
XMLName xml.Name
// Lang is an optional xml:lang attribute.
Lang string `xml:"xml:lang,attr,omitempty"`
// InnerXML contains the XML representation of the property value.
// See http://www.webdav.org/specs/rfc4918.html#property_values
//
// Property values of complex type or mixed-content must have fully
// expanded XML namespaces or be self-contained with according
// XML namespace declarations. They must not rely on any XML
// namespace declarations within the scope of the XML document,
// even including the DAV: namespace.
InnerXML []byte `xml:",innerxml"`
}
// ixmlProperty is the same as the Property type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlProperty struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error
// See multistatusWriter for the "D:" namespace prefix.
type xmlError struct {
XMLName ixml.Name `xml:"D:error"`
InnerXML []byte `xml:",innerxml"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat
// See multistatusWriter for the "D:" namespace prefix.
type propstat struct {
Prop []Property `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// ixmlPropstat is the same as the propstat type except it holds an ixml.Name
// instead of an xml.Name.
type ixmlPropstat struct {
Prop []ixmlProperty `xml:"D:prop>_ignored_"`
Status string `xml:"D:status"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace
// before encoding. See multistatusWriter.
func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error {
// Convert from a propstat to an ixmlPropstat.
ixmlPs := ixmlPropstat{
Prop: make([]ixmlProperty, len(ps.Prop)),
Status: ps.Status,
Error: ps.Error,
ResponseDescription: ps.ResponseDescription,
}
for k, prop := range ps.Prop {
ixmlPs.Prop[k] = ixmlProperty{
XMLName: ixml.Name(prop.XMLName),
Lang: prop.Lang,
InnerXML: prop.InnerXML,
}
}
for k, prop := range ixmlPs.Prop {
if prop.XMLName.Space == "DAV:" {
prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local}
ixmlPs.Prop[k] = prop
}
}
// Distinct type to avoid infinite recursion of MarshalXML.
type newpropstat ixmlPropstat
return e.EncodeElement(newpropstat(ixmlPs), start)
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response
// See multistatusWriter for the "D:" namespace prefix.
type response struct {
XMLName ixml.Name `xml:"D:response"`
Href []string `xml:"D:href"`
Propstat []propstat `xml:"D:propstat"`
Status string `xml:"D:status,omitempty"`
Error *xmlError `xml:"D:error"`
ResponseDescription string `xml:"D:responsedescription,omitempty"`
}
// MultistatusWriter marshals one or more Responses into a XML
// multistatus response.
// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus
// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as
// "DAV:" on this element, is prepended on the nested response, as well as on all
// its nested elements. All property names in the DAV: namespace are prefixed as
// well. This is because some versions of Mini-Redirector (on windows 7) ignore
// elements with a default namespace (no prefixed namespace). A less intrusive fix
// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177
type multistatusWriter struct {
// ResponseDescription contains the optional responsedescription
// of the multistatus XML element. Only the latest content before
// close will be emitted. Empty response descriptions are not
// written.
responseDescription string
w http.ResponseWriter
enc *ixml.Encoder
}
// Write validates and emits a DAV response as part of a multistatus response
// element.
//
// It sets the HTTP status code of its underlying http.ResponseWriter to 207
// (Multi-Status) and populates the Content-Type header. If r is the
// first, valid response to be written, Write prepends the XML representation
// of r with a multistatus tag. Callers must call close after the last response
// has been written.
func (w *multistatusWriter) write(r *response) error {
switch len(r.Href) {
case 0:
return errInvalidResponse
case 1:
if len(r.Propstat) > 0 != (r.Status == "") {
return errInvalidResponse
}
default:
if len(r.Propstat) > 0 || r.Status == "" {
return errInvalidResponse
}
}
err := w.writeHeader()
if err != nil {
return err
}
return w.enc.Encode(r)
}
// writeHeader writes a XML multistatus start element on w's underlying
// http.ResponseWriter and returns the result of the write operation.
// After the first write attempt, writeHeader becomes a no-op.
func (w *multistatusWriter) writeHeader() error {
if w.enc != nil {
return nil
}
w.w.Header().Add("Content-Type", "text/xml; charset=utf-8")
w.w.WriteHeader(StatusMulti)
_, err := fmt.Fprintf(w.w, `<?xml version="1.0" encoding="UTF-8"?>`)
if err != nil {
return err
}
w.enc = ixml.NewEncoder(w.w)
return w.enc.EncodeToken(ixml.StartElement{
Name: ixml.Name{
Space: "DAV:",
Local: "multistatus",
},
Attr: []ixml.Attr{{
Name: ixml.Name{Space: "xmlns", Local: "D"},
Value: "DAV:",
}},
})
}
// Close completes the marshalling of the multistatus response. It returns
// an error if the multistatus response could not be completed. If both the
// return value and field enc of w are nil, then no multistatus response has
// been written.
func (w *multistatusWriter) close() error {
if w.enc == nil {
return nil
}
var end []ixml.Token
if w.responseDescription != "" {
name := ixml.Name{Space: "DAV:", Local: "responsedescription"}
end = append(end,
ixml.StartElement{Name: name},
ixml.CharData(w.responseDescription),
ixml.EndElement{Name: name},
)
}
end = append(end, ixml.EndElement{
Name: ixml.Name{Space: "DAV:", Local: "multistatus"},
})
for _, t := range end {
err := w.enc.EncodeToken(t)
if err != nil {
return err
}
}
return w.enc.Flush()
}
var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"}
func xmlLang(s ixml.StartElement, d string) string {
for _, attr := range s.Attr {
if attr.Name == xmlLangName {
return attr.Value
}
}
return d
}
type xmlValue []byte
func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
// The XML value of a property can be arbitrary, mixed-content XML.
// To make sure that the unmarshalled value contains all required
// namespaces, we encode all the property value XML tokens into a
// buffer. This forces the encoder to redeclare any used namespaces.
var b bytes.Buffer
e := ixml.NewEncoder(&b)
for {
t, err := next(d)
if err != nil {
return err
}
if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name {
break
}
if err = e.EncodeToken(t); err != nil {
return err
}
}
err := e.Flush()
if err != nil {
return err
}
*v = b.Bytes()
return nil
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch)
type proppatchProps []Property
// UnmarshalXML appends the property names and values enclosed within start
// to ps.
//
// An xml:lang attribute that is defined either on the DAV:prop or property
// name XML element is propagated to the property's Lang field.
//
// UnmarshalXML returns an error if start does not contain any properties or if
// property values contain syntactically incorrect XML.
func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error {
lang := xmlLang(start, "")
for {
t, err := next(d)
if err != nil {
return err
}
switch elem := t.(type) {
case ixml.EndElement:
if len(*ps) == 0 {
return fmt.Errorf("%s must not be empty", start.Name.Local)
}
return nil
case ixml.StartElement:
p := Property{
XMLName: xml.Name(t.(ixml.StartElement).Name),
Lang: xmlLang(t.(ixml.StartElement), lang),
}
err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem)
if err != nil {
return err
}
*ps = append(*ps, p)
}
}
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove
type setRemove struct {
XMLName ixml.Name
Lang string `xml:"xml:lang,attr,omitempty"`
Prop proppatchProps `xml:"DAV: prop"`
}
// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate
type propertyupdate struct {
XMLName ixml.Name `xml:"DAV: propertyupdate"`
Lang string `xml:"xml:lang,attr,omitempty"`
SetRemove []setRemove `xml:",any"`
}
func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) {
var pu propertyupdate
if err = ixml.NewDecoder(r).Decode(&pu); err != nil {
return nil, http.StatusBadRequest, err
}
for _, op := range pu.SetRemove {
remove := false
switch op.XMLName {
case ixml.Name{Space: "DAV:", Local: "set"}:
// No-op.
case ixml.Name{Space: "DAV:", Local: "remove"}:
for _, p := range op.Prop {
if len(p.InnerXML) > 0 {
return nil, http.StatusBadRequest, errInvalidProppatch
}
}
remove = true
default:
return nil, http.StatusBadRequest, errInvalidProppatch
}
patches = append(patches, Proppatch{Remove: remove, Props: op.Prop})
}
return patches, 0, nil
}

View File

@ -89,3 +89,9 @@ func ParsePath(path string) string {
return path
}
func RemoveLastSlash(path string) string {
if len(path) > 1 {
return strings.TrimSuffix(path, "/")
}
return path
}