From 95bad895b4ad56e99d18d502ec627a2be3731647 Mon Sep 17 00:00:00 2001 From: feiniks <36756310+feiniks@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:19:37 +0800 Subject: [PATCH] Add upload link and download link API (#683) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add upload link and download link API * Use go 1.22 for ci * Go add upload link API * Add norm UTF8 path * Go add upload link API * Add seahub_settings.py * Add projectDir * Add download link API * Set and check etag * Add comment and set no-cache --------- Co-authored-by: 杨赫然 --- ci/serverctl.py | 5 + common/seaf-utils.c | 12 +- fileserver/fileop.go | 375 ++++++++++++++++++++++ fileserver/fileserver.go | 19 +- fileserver/merge.go | 3 +- lib/Makefile.am | 2 +- lib/seahub.vala | 10 + server/access-file.c | 573 +++++++++++++++++++++++++++++++++- server/http-tx-mgr.c | 132 ++++++-- server/http-tx-mgr.h | 3 + server/upload-file.c | 161 ++++++++++ server/zip-download-mgr.c | 99 ++++++ server/zip-download-mgr.h | 13 + tests/conf/seahub_settings.py | 2 + 14 files changed, 1377 insertions(+), 32 deletions(-) create mode 100644 lib/seahub.vala create mode 100644 tests/conf/seahub_settings.py diff --git a/ci/serverctl.py b/ci/serverctl.py index 09cf05e9..88e87f56 100755 --- a/ci/serverctl.py +++ b/ci/serverctl.py @@ -10,6 +10,7 @@ from collections import namedtuple from contextlib import contextmanager from os.path import abspath, basename, dirname, exists, join +import shutil import requests from tenacity import TryAgain, retry, stop_after_attempt, wait_fixed @@ -24,6 +25,7 @@ class ServerCtl(object): def __init__(self, topdir, projectdir, datadir, fileserver, db='sqlite3', seaf_server_bin='seaf-server', ccnet_server_bin='ccnet-server'): self.db = db + self.topdir = topdir self.datadir = datadir self.central_conf_dir = join(datadir, 'conf') self.seafile_conf_dir = join(datadir, 'seafile-data') @@ -53,6 +55,9 @@ def setup(self): os.mkdir (self.central_conf_dir, 0o755) os.mkdir (self.seafile_conf_dir, 0o755) os.mkdir (self.ccnet_conf_dir, 0o755) + src = join(self.projectdir, 'tests/conf/seahub_settings.py') + dst = join(self.central_conf_dir, 'seahub_settings.py') + shutil.copyfile(src, dst) self.init_ccnet() self.init_seafile() diff --git a/common/seaf-utils.c b/common/seaf-utils.c index ed1c99d3..6d6e79b7 100644 --- a/common/seaf-utils.c +++ b/common/seaf-utils.c @@ -418,25 +418,31 @@ load_seahub_private_key (SeafileSession *session, const char *conf_dir) char line[256]; char *site_root = NULL; while (fgets(line, sizeof(line), file)) { - GMatchInfo *match_info; + GMatchInfo *match_info = NULL; if (g_regex_match (secret_key_regex, line, 0, &match_info)) { char *sk = g_match_info_fetch (match_info, 1); session->seahub_pk = sk; } + g_match_info_free (match_info); + match_info = NULL; if (g_regex_match (site_root_regex, line, 0, &match_info)) { site_root = g_match_info_fetch (match_info, 1); } + g_match_info_free (match_info); } if (session->seahub_pk) { if (site_root) { - session->seahub_url = g_strdup_printf("http://127.0.0.1:8000%sapi/v2.1/internal/user-list/", site_root); + session->seahub_url = g_strdup_printf("http://127.0.0.1:8000%sapi/v2.1/internal", site_root); } else { - session->seahub_url = g_strdup("http://127.0.0.1:8000/api/v2.1/internal/user-list/"); + session->seahub_url = g_strdup("http://127.0.0.1:8000/api/v2.1/internal"); } session->seahub_conn_pool = connection_pool_new (); + } else { + seaf_warning ("No seahub private key is configured.\n"); } + g_free (site_root); out: if (secret_key_regex) diff --git a/fileserver/fileop.go b/fileserver/fileop.go index 777a8e2b..b188cbfa 100644 --- a/fileserver/fileop.go +++ b/fileserver/fileop.go @@ -30,12 +30,14 @@ import ( "sort" "syscall" + jwt "github.com/golang-jwt/jwt/v5" "github.com/haiwen/seafile-server/fileserver/blockmgr" "github.com/haiwen/seafile-server/fileserver/commitmgr" "github.com/haiwen/seafile-server/fileserver/diff" "github.com/haiwen/seafile-server/fileserver/fsmgr" "github.com/haiwen/seafile-server/fileserver/option" "github.com/haiwen/seafile-server/fileserver/repomgr" + "github.com/haiwen/seafile-server/fileserver/utils" log "github.com/sirupsen/logrus" "golang.org/x/text/unicode/norm" ) @@ -3414,6 +3416,379 @@ func indexRawBlocks(repoID string, blockIDs []string, fileHeaders []*multipart.F return nil } +/* +func uploadLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { + if seahubPK == "" { + err := fmt.Errorf("no seahub private key is configured") + return &appError{err, "", http.StatusNotFound} + } + if r.Method == "OPTIONS" { + setAccessControl(rsp) + rsp.WriteHeader(http.StatusOK) + return nil + } + + fsm, err := parseUploadLinkHeaders(r) + if err != nil { + return err + } + + if err := doUpload(rsp, r, fsm, false); err != nil { + formatJSONError(rsp, err) + return err + } + + return nil +} + +func parseUploadLinkHeaders(r *http.Request) (*recvData, *appError) { + tokenLen := 36 + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 2 { + msg := "Invalid URL" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + if len(parts[1]) < tokenLen { + msg := "Invalid URL" + return nil, &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1][:tokenLen] + + info, appErr := queryShareLinkInfo(token, "upload") + if appErr != nil { + return nil, appErr + } + + repoID := info.RepoID + parentDir := normalizeUTF8Path(info.ParentDir) + + status, err := repomgr.GetRepoStatus(repoID) + if err != nil { + return nil, &appError{err, "", http.StatusInternalServerError} + } + if status != repomgr.RepoStatusNormal && status != -1 { + msg := "Repo status not writable." + return nil, &appError{nil, msg, http.StatusBadRequest} + } + + user, _ := repomgr.GetRepoOwner(repoID) + + fsm := new(recvData) + + fsm.parentDir = parentDir + fsm.tokenType = "upload-link" + fsm.repoID = repoID + fsm.user = user + fsm.rstart = -1 + fsm.rend = -1 + fsm.fsize = -1 + + ranges := r.Header.Get("Content-Range") + if ranges != "" { + parseContentRange(ranges, fsm) + } + + return fsm, nil +} +*/ + +type ShareLinkInfo struct { + RepoID string `json:"repo_id"` + FilePath string `json:"file_path"` + ParentDir string `json:"parent_dir"` + ShareType string `json:"share_type"` +} + +func queryShareLinkInfo(token, opType string) (*ShareLinkInfo, *appError) { + claims := SeahubClaims{ + time.Now().Add(time.Second * 300).Unix(), + true, + jwt.RegisteredClaims{}, + } + + jwtToken := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), &claims) + tokenString, err := jwtToken.SignedString([]byte(seahubPK)) + if err != nil { + err := fmt.Errorf("failed to sign jwt token: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + url := fmt.Sprintf("%s?token=%s&type=%s", seahubURL+"/share-link-info/", token, opType) + header := map[string][]string{ + "Authorization": {"Token " + tokenString}, + } + status, body, err := utils.HttpCommon("GET", url, header, nil) + if err != nil { + err := fmt.Errorf("failed to get share link info: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + if status != http.StatusOK { + msg := "Link token not found" + return nil, &appError{nil, msg, http.StatusForbidden} + } + + info := new(ShareLinkInfo) + err = json.Unmarshal(body, &info) + if err != nil { + err := fmt.Errorf("failed to decode share link info: %v", err) + return nil, &appError{err, "", http.StatusInternalServerError} + } + + return info, nil +} + +func accessLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { + if seahubPK == "" { + err := fmt.Errorf("no seahub private key is configured") + return &appError{err, "", http.StatusNotFound} + } + + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 2 { + msg := "Invalid URL" + return &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1] + info, appErr := queryShareLinkInfo(token, "file") + if appErr != nil { + return appErr + } + + if info.FilePath == "" { + msg := "Internal server error\n" + err := fmt.Errorf("failed to get file_path by token %s", token) + return &appError{err, msg, http.StatusInternalServerError} + } + if info.ShareType != "f" { + msg := "Link type mismatch" + return &appError{nil, msg, http.StatusBadRequest} + } + + repoID := info.RepoID + filePath := normalizeUTF8Path(info.FilePath) + fileName := filepath.Base(filePath) + op := "download-link" + + ranges := r.Header["Range"] + byteRanges := strings.Join(ranges, "") + + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Bad repo id\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + user, _ := repomgr.GetRepoOwner(repoID) + + fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, filePath) + if err != nil { + msg := "Invalid file_path\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + // Check for file changes by comparing the ETag in the If-None-Match header with the file ID. Set no-cache to allow clients to validate file changes before using the cache. + etag := r.Header.Get("If-None-Match") + if etag == fileID { + return &appError{nil, "", http.StatusNotModified} + } + + rsp.Header().Set("ETag", fileID) + rsp.Header().Set("Cache-Control", "no-cache") + + var cryptKey *seafileCrypt + if repo.IsEncrypted { + key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) + if err != nil { + return err + } + cryptKey = key + } + + exists, _ := fsmgr.Exists(repo.StoreID, fileID) + if !exists { + msg := "Invalid file id" + return &appError{nil, msg, http.StatusBadRequest} + } + + if !repo.IsEncrypted && len(byteRanges) != 0 { + if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil { + return err + } + } else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil { + return err + } + + return nil +} + +/* +func accessDirLinkCB(rsp http.ResponseWriter, r *http.Request) *appError { + if seahubPK == "" { + err := fmt.Errorf("no seahub private key is configured") + return &appError{err, "", http.StatusNotFound} + } + + parts := strings.Split(r.URL.Path[1:], "/") + if len(parts) < 2 { + msg := "Invalid URL" + return &appError{nil, msg, http.StatusBadRequest} + } + token := parts[1] + info, appErr := queryShareLinkInfo(token, "dir") + if appErr != nil { + return appErr + } + + repoID := info.RepoID + parentDir := normalizeUTF8Path(info.ParentDir) + op := "download-link" + + repo := repomgr.Get(repoID) + if repo == nil { + msg := "Bad repo id\n" + return &appError{nil, msg, http.StatusBadRequest} + } + user, _ := repomgr.GetRepoOwner(repoID) + + filePath := r.URL.Query().Get("p") + if filePath == "" { + err := r.ParseForm() + if err != nil { + msg := "Invalid form\n" + return &appError{nil, msg, http.StatusBadRequest} + } + parentDir := r.FormValue("parent_dir") + if parentDir == "" { + msg := "Invalid parent_dir\n" + return &appError{nil, msg, http.StatusBadRequest} + } + parentDir = normalizeUTF8Path(parentDir) + parentDir = getCanonPath(parentDir) + dirents := r.FormValue("dirents") + if dirents == "" { + msg := "Invalid dirents\n" + return &appError{nil, msg, http.StatusBadRequest} + } + // opStr:=r.FormVale("op") + list, err := jsonToDirentList(repo, parentDir, dirents) + if err != nil { + log.Warnf("failed to parse dirent list: %v", err) + msg := "Invalid dirents\n" + return &appError{nil, msg, http.StatusBadRequest} + } + if len(list) == 0 { + msg := "Invalid dirents\n" + return &appError{nil, msg, http.StatusBadRequest} + } + + obj := make(map[string]interface{}) + if len(list) == 1 { + dent := list[0] + op = "download-dir-link" + obj["dir_name"] = dent.Name + obj["obj_id"] = dent.ID + } else { + op = "download-multi-link" + obj["parent_dir"] = parentDir + var fileList []string + for _, dent := range list { + fileList = append(fileList, dent.Name) + } + obj["file_list"] = fileList + } + data, err := json.Marshal(obj) + if err != nil { + err := fmt.Errorf("failed to encode zip obj: %v", err) + return &appError{err, "", http.StatusInternalServerError} + } + if err := downloadZipFile(rsp, r, string(data), repoID, user, op); err != nil { + return err + } + return nil + } + + // file path is not empty string + if _, ok := r.Header["If-Modified-Since"]; ok { + return &appError{nil, "", http.StatusNotModified} + } + + filePath = normalizeUTF8Path(filePath) + fullPath := filepath.Join(parentDir, filePath) + fileName := filepath.Base(filePath) + + fileID, _, err := fsmgr.GetObjIDByPath(repo.StoreID, repo.RootID, fullPath) + if err != nil { + msg := "Invalid file_path\n" + return &appError{nil, msg, http.StatusBadRequest} + } + rsp.Header().Set("ETag", fileID) + + now := time.Now() + rsp.Header().Set("Last-Modified", now.Format("Mon, 2 Jan 2006 15:04:05 GMT")) + rsp.Header().Set("Cache-Control", "max-age=3600") + + ranges := r.Header["Range"] + byteRanges := strings.Join(ranges, "") + + var cryptKey *seafileCrypt + if repo.IsEncrypted { + key, err := parseCryptKey(rsp, repoID, user, repo.EncVersion) + if err != nil { + return err + } + cryptKey = key + } + + exists, _ := fsmgr.Exists(repo.StoreID, fileID) + if !exists { + msg := "Invalid file id" + return &appError{nil, msg, http.StatusBadRequest} + } + + if !repo.IsEncrypted && len(byteRanges) != 0 { + if err := doFileRange(rsp, r, repo, fileID, fileName, op, byteRanges, user); err != nil { + return err + } + } else if err := doFile(rsp, r, repo, fileID, fileName, op, cryptKey, user); err != nil { + return err + } + + return nil +} + +func jsonToDirentList(repo *repomgr.Repo, parentDir, dirents string) ([]*fsmgr.SeafDirent, error) { + var list []string + err := json.Unmarshal([]byte(dirents), &list) + if err != nil { + return nil, err + } + + dir, err := fsmgr.GetSeafdirByPath(repo.StoreID, repo.RootID, parentDir) + if err != nil { + return nil, err + } + + direntHash := make(map[string]*fsmgr.SeafDirent) + for _, dent := range dir.Entries { + direntHash[dent.Name] = dent + } + + var direntList []*fsmgr.SeafDirent + for _, path := range list { + normPath := normalizeUTF8Path(path) + if normPath == "" || normPath == "/" { + return nil, fmt.Errorf("Invalid download file name: %s\n", normPath) + } + dent, ok := direntHash[normPath] + if !ok { + return nil, fmt.Errorf("failed to get dient for %s in dir %s in repo %s", normPath, parentDir, repo.StoreID) + } + direntList = append(direntList, dent) + } + + return direntList, nil +} +*/ + func removeFileopExpireCache() { deleteBlockMaps := func(key interface{}, value interface{}) bool { if blkMap, ok := value.(*blockMap); ok { diff --git a/fileserver/fileserver.go b/fileserver/fileserver.go index f5c7c239..409b0fc6 100644 --- a/fileserver/fileserver.go +++ b/fileserver/fileserver.go @@ -278,12 +278,14 @@ func loadSeahubPK() { scanner := bufio.NewScanner(file) - pkRe, err := regexp.Compile(`SECRET_KEY\\s*=\\s*'([^']*)'`) + pkExp := "SECRET_KEY\\s*=\\s*'([^']*)'" + pkRe, err := regexp.Compile(pkExp) if err != nil { log.Warnf("Failed to compile regex: %v", err) return } - siteRootRe, err := regexp.Compile(`SITE_ROOT\\s*=\\s*'([^']*)'`) + siteRootExpr := "SITE_ROOT\\s*=\\s*'([^']*)'" + siteRootRe, err := regexp.Compile(siteRootExpr) if err != nil { log.Warnf("Failed to compile regex: %v", err) return @@ -302,9 +304,12 @@ func loadSeahubPK() { } } if siteRoot != "" { - seahubURL = fmt.Sprintf("http://127.0.0.1:8000%sapi/v2.1/internal/user-list/", siteRoot) + seahubURL = fmt.Sprintf("http://127.0.0.1:8000%sapi/v2.1/internal", siteRoot) } else { - seahubURL = ("http://127.0.0.1:8000/api/v2.1/internal/user-list/") + seahubURL = ("http://127.0.0.1:8000/api/v2.1/internal") + } + if seahubPK == "" { + log.Warnf("No seahub private key is configured") } } @@ -507,6 +512,12 @@ func newHTTPRouter() *mux.Router { r.Handle("/update-aj/{.*}", appHandler(updateAjaxCB)) r.Handle("/upload-blks-api/{.*}", appHandler(uploadBlksAPICB)) r.Handle("/upload-raw-blks-api/{.*}", appHandler(uploadRawBlksAPICB)) + + // links api + //r.Handle("/u/{.*}", appHandler(uploadLinkCB)) + r.Handle("/f/{.*}", appHandler(accessLinkCB)) + //r.Handle("/d/{.*}", appHandler(accessDirLinkCB)) + // file syncing api r.Handle("/repo/{repoid:[\\da-z]{8}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{4}-[\\da-z]{12}}/permission-check{slash:\\/?}", appHandler(permissionCheckCB)) diff --git a/fileserver/merge.go b/fileserver/merge.go index c451200a..d57a74af 100644 --- a/fileserver/merge.go +++ b/fileserver/merge.go @@ -429,7 +429,8 @@ func postGetNickName(modifier string) string { return "" } - status, body, err := utils.HttpCommon("POST", seahubURL, header, bytes.NewReader(data)) + url := seahubURL + "/user-list/" + status, body, err := utils.HttpCommon("POST", url, header, bytes.NewReader(data)) if err != nil { return "" } diff --git a/lib/Makefile.am b/lib/Makefile.am index a2e1a559..d0b34aea 100644 --- a/lib/Makefile.am +++ b/lib/Makefile.am @@ -13,7 +13,7 @@ BUILT_SOURCES = gensource ## source file rules seafile_object_define = repo.vala commit.vala dirent.vala dir.vala \ - task.vala branch.vala crypt.vala webaccess.vala copy-task.vala ccnetobj.vala search-result.vala + task.vala branch.vala crypt.vala webaccess.vala seahub.vala copy-task.vala ccnetobj.vala search-result.vala seafile_object_gen = $(seafile_object_define:.vala=.c) diff --git a/lib/seahub.vala b/lib/seahub.vala new file mode 100644 index 00000000..230a0691 --- /dev/null +++ b/lib/seahub.vala @@ -0,0 +1,10 @@ +namespace Seafile { + +public class ShareLinkInfo : Object { + public string repo_id { set; get; } + public string file_path { set; get; } + public string parent_dir { set; get; } + public string share_type { set; get; } +} + +} diff --git a/server/access-file.c b/server/access-file.c index 8641ffe1..b36a2c4a 100644 --- a/server/access-file.c +++ b/server/access-file.c @@ -30,7 +30,6 @@ #define FILE_TYPE_MAP_DEFAULT_LEN 1 #define BUFFER_SIZE 1024 * 64 -#define MULTI_DOWNLOAD_FILE_PREFIX "documents-export-" struct file_type_map { char *suffix; @@ -1120,6 +1119,15 @@ set_etag (evhtp_request_t *req, evhtp_kvs_add_kv (req->headers_out, kv); } +static void +set_no_cache (evhtp_request_t *req) +{ + evhtp_kv_t *kv; + + kv = evhtp_kv_new ("Cache-Control", "no-cache", 1, 1); + evhtp_kvs_add_kv (req->headers_out, kv); +} + static gboolean can_use_cached_content (evhtp_request_t *req) { @@ -1189,6 +1197,7 @@ access_zip_cb (evhtp_request_t *req, void *arg) g_object_get (info, "obj_id", &info_str, NULL); if (!info_str) { seaf_warning ("Invalid obj_id for token: %s.\n", token); + error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } @@ -1196,6 +1205,7 @@ access_zip_cb (evhtp_request_t *req, void *arg) info_obj = json_loadb (info_str, strlen(info_str), 0, &jerror); if (!info_obj) { seaf_warning ("Failed to parse obj_id field: %s for token: %s.\n", jerror.text, token); + error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } @@ -1211,6 +1221,7 @@ access_zip_cb (evhtp_request_t *req, void *arg) filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL); } else { seaf_warning ("No dir_name or file_list in obj_id for token: %s.\n", token); + error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } @@ -1220,6 +1231,7 @@ access_zip_cb (evhtp_request_t *req, void *arg) g_object_get (info, "repo_id", &repo_id, NULL); seaf_warning ("Failed to get zip file path for %s in repo %.8s, token:[%s].\n", filename, repo_id, token); + error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; goto out; } @@ -1236,6 +1248,7 @@ access_zip_cb (evhtp_request_t *req, void *arg) int ret = start_download_zip_file (req, token, filename, zip_file_path, repo_id, user, token_type); if (ret < 0) { seaf_warning ("Failed to start download zip file: %s for token: %s", filename, token); + error = "Internal server error\n"; error_code = EVHTP_RES_SERVERR; } @@ -1262,6 +1275,88 @@ access_zip_cb (evhtp_request_t *req, void *arg) } } +/* +static void +access_zip_link_cb (evhtp_request_t *req, void *arg) +{ + char *token; + char *user = NULL; + char *zip_file_path; + char *zip_file_name; + const char *repo_id = NULL; + const char *task_id = NULL; + const char *error = NULL; + int error_code; + SeafileShareLinkInfo *info = NULL; + + char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); + if (g_strv_length (parts) != 2) { + error = "Invalid URL\n"; + error_code = EVHTP_RES_BADREQ; + goto out; + } + + token = parts[1]; + + task_id = evhtp_kv_find (req->uri->query, "task_id"); + if (!task_id) { + error = "No task_id\n"; + error_code = EVHTP_RES_BADREQ; + goto out; + } + + info = http_tx_manager_query_share_link_info (token, "dir"); + if (!info) { + error = "Access token not found\n"; + error_code = EVHTP_RES_FORBIDDEN; + goto out; + } + + repo_id = seafile_share_link_info_get_repo_id (info); + user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); + + zip_file_path = zip_download_mgr_get_zip_file_path (seaf->zip_download_mgr, task_id); + if (!zip_file_path) { + seaf_warning ("Failed to get zip file path in repo %.8s, task id:[%s].\n", repo_id, task_id); + error = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto out; + } + zip_file_name = zip_download_mgr_get_zip_file_name (seaf->zip_download_mgr, task_id); + if (!zip_file_name) { + seaf_warning ("Failed to get zip file name in repo %.8s, task id:[%s].\n", repo_id, task_id); + error = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto out; + } + + if (can_use_cached_content (req)) { + // Clean zip progress related resource + zip_download_mgr_del_zip_progress (seaf->zip_download_mgr, task_id); + goto out; + } + + int ret = start_download_zip_file (req, task_id, zip_file_name, zip_file_path, repo_id, user, "download-multi-link"); + if (ret < 0) { + seaf_warning ("Failed to start download zip file: %s for task: %s", zip_file_name, task_id); + error = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + } + +out: + g_strfreev (parts); + if (info) + g_object_unref (info); + if (user) + g_free (user); + + if (error) { + evbuffer_add_printf(req->buffer_out, "%s\n", error); + evhtp_send_reply(req, error_code); + } +} +*/ + static void access_cb(evhtp_request_t *req, void *arg) { @@ -1550,12 +1645,488 @@ access_blks_cb(evhtp_request_t *req, void *arg) evhtp_send_reply(req, error_code); } +static void +access_link_cb(evhtp_request_t *req, void *arg) +{ + SeafRepo *repo = NULL; + char *error_str = NULL; + char *token = NULL; + char *rpath = NULL; + char *filename = NULL; + char *file_id = NULL; + char *user = NULL; + char *norm_file_path = NULL; + const char *repo_id = NULL; + const char *file_path = NULL; + const char *share_type = NULL; + const char *byte_ranges = NULL; + int error_code = EVHTP_RES_BADREQ; + + SeafileCryptKey *key = NULL; + SeafileShareLinkInfo *info = NULL; + GError *error = NULL; + + if (!seaf->seahub_pk) { + seaf_warning ("No seahub private key is configured.\n"); + evhtp_send_reply(req, EVHTP_RES_NOTFOUND); + return; + } + + /* Skip the first '/'. */ + char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); + if (!parts || g_strv_length (parts) < 2 || + strcmp (parts[0], "f") != 0) { + error_str = "Invalid URL\n"; + goto out; + } + + token = parts[1]; + + info = http_tx_manager_query_share_link_info (token, "file"); + if (!info) { + error_str = "Link token not found\n"; + error_code = EVHTP_RES_FORBIDDEN; + goto out; + } + + repo_id = seafile_share_link_info_get_repo_id (info); + file_path = seafile_share_link_info_get_file_path (info); + if (!file_path) { + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + seaf_warning ("Failed to get file_path by token %s\n", token); + goto out; + } + share_type = seafile_share_link_info_get_share_type (info); + if (g_strcmp0 (share_type, "f") != 0) { + error_str = "Link type mismatch"; + goto out; + } + + norm_file_path = normalize_utf8_path(file_path); + rpath = format_dir_path (norm_file_path); + filename = g_path_get_basename (rpath); + + repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); + if (!repo) { + error_str = "Bad repo id\n"; + goto out; + } + user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); + + file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, rpath, &error); + if (!file_id) { + error_str = "Invalid file_path\n"; + if (error) + g_clear_error(&error); + goto out; + } + + const char *etag = evhtp_kv_find (req->headers_in, "If-None-Match"); + if (g_strcmp0 (etag, file_id) == 0) { + evhtp_send_reply (req, EVHTP_RES_NOTMOD); + error_code = EVHTP_RES_OK; + goto out; + } + set_etag (req, file_id); + set_no_cache (req); + + byte_ranges = evhtp_kv_find (req->headers_in, "Range"); + + if (repo->encrypted) { + key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, + repo_id, user); + if (!key) { + error_str = "Repo is encrypted. Please provide password to view it."; + goto out; + } + } + + if (!seaf_fs_manager_object_exists (seaf->fs_mgr, + repo->store_id, repo->version, file_id)) { + error_str = "Invalid file id\n"; + goto out; + } + + if (!repo->encrypted && byte_ranges) { + if (do_file_range (req, repo, file_id, filename, "download-link", byte_ranges, user) < 0) { + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto out; + } + } else if (do_file(req, repo, file_id, filename, "download-link", key, user) < 0) { + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto out; + } + + error_code = EVHTP_RES_OK; + +out: + g_strfreev (parts); + g_free (user); + g_free (norm_file_path); + g_free (rpath); + g_free (filename); + g_free (file_id); + if (repo != NULL) + seaf_repo_unref (repo); + if (key != NULL) + g_object_unref (key); + if (info != NULL) + g_object_unref (info); + + if (error_code != EVHTP_RES_OK) { + evbuffer_add_printf(req->buffer_out, "%s\n", error_str); + evhtp_send_reply(req, error_code); + } +} + +/* +static GList * +json_to_dirent_list (SeafRepo *repo, const char *parent_dir, const char *dirents) +{ + json_t *array; + json_error_t jerror; + int i; + int len; + const char *tmp_file_name; + char *file_name = NULL; + GList *dirent_list = NULL, *p = NULL; + SeafDir *dir; + SeafDirent *dirent; + GError *error = NULL; + + array = json_loadb (dirents, strlen(dirents), 0, &jerror); + if (!array) { + seaf_warning ("Failed to parse download data: %s.\n", jerror.text); + return NULL; + } + len = json_array_size (array); + if (len == 0) { + seaf_warning ("Invalid download data, miss download file name.\n"); + json_decref (array); + return NULL; + } + + dir = seaf_fs_manager_get_seafdir_by_path (seaf->fs_mgr, repo->store_id, + repo->version, repo->root_id, parent_dir, &error); + if (!dir) { + if (error) { + seaf_warning ("Failed to get dir %s repo %.8s: %s.\n", + parent_dir, repo->store_id, error->message); + g_clear_error(&error); + } else { + seaf_warning ("dir %s doesn't exist in repo %.8s.\n", + parent_dir, repo->store_id); + } + json_decref (array); + return NULL; + } + + GHashTable *dirent_hash = g_hash_table_new(g_str_hash, g_str_equal); + for (p = dir->entries; p; p = p->next) { + SeafDirent *d = p->data; + g_hash_table_insert(dirent_hash, d->name, d); + } + + for (i = 0; i < len; i++) { + tmp_file_name = json_string_value (json_array_get (array, i)); + file_name = normalize_utf8_path(tmp_file_name); + if (strcmp (file_name, "") == 0 || strchr (file_name, '/') != NULL) { + seaf_warning ("Invalid download file name: %s.\n", file_name); + if (dirent_list) { + g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); + dirent_list = NULL; + } + g_free (file_name); + break; + } + + dirent = g_hash_table_lookup (dirent_hash, file_name); + if (!dirent) { + seaf_warning ("Failed to get dirent for %s in dir %s in repo %.8s.\n", + file_name, parent_dir, repo->store_id); + if (dirent_list) { + g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); + dirent_list = NULL; + } + g_free (file_name); + break; + } + + dirent_list = g_list_prepend (dirent_list, seaf_dirent_dup(dirent)); + g_free (file_name); + } + + g_hash_table_unref(dirent_hash); + json_decref (array); + seaf_dir_free (dir); + return dirent_list; +} + +// application/x-www-form-urlencoded +// parent_dir=/sub&dirents=[a.md, suba] +static char * +get_form_field (const char *body_str, const char *field_name) +{ + char * value = NULL; + char * result = NULL; + char * start = strstr(body_str, field_name); + // find pos of start + if (start) { + // skip field and '=' + start += strlen(field_name) + 1; + + // find pos of '&' + char * end = strchr(start, '&'); + if (end == NULL) { + end = start + strlen(start); + } + + value = g_strndup(start, end - start); + } + if (!value) { + return NULL; + } + result = g_uri_unescape_string (value, NULL); + g_free (value); + return result; +} +*/ + +/* +static void +access_dir_link_cb(evhtp_request_t *req, void *arg) +{ + SeafRepo *repo = NULL; + char *error_str = NULL; + char *token = NULL; + char *r_parent_dir = NULL; + char *fullpath = NULL; + char *file_id = NULL; + char *filename = NULL; + char *norm_parent_dir = NULL; + char *norm_path = NULL; + char *user = NULL; + char *tmp_parent_dir = NULL; + char *dirents = NULL; + const char *repo_id = NULL; + const char *parent_dir = NULL; + const char *path= NULL; + const char *byte_ranges = NULL; + int error_code = EVHTP_RES_BADREQ; + + SeafileCryptKey *key = NULL; + SeafileShareLinkInfo *info = NULL; + GError *error = NULL; + + if (!seaf->seahub_pk) { + seaf_warning ("No seahub private key is configured.\n"); + evhtp_send_reply(req, EVHTP_RES_NOTFOUND); + return; + } + + // Skip the first '/'. + char **parts = g_strsplit (req->uri->path->full + 1, "/", 0); + if (!parts || g_strv_length (parts) < 2 || + strcmp (parts[0], "d") != 0) { + error_str = "Invalid URL\n"; + goto on_error; + } + + token = parts[1]; + + if (g_strv_length (parts) >= 4) { + if (strcmp (parts[2], "zip-task") != 0) { + error_str = "Invalid URL\n"; + goto on_error; + } + char *task_id = parts[3]; + char *progress = zip_download_mgr_query_zip_progress (seaf->zip_download_mgr, task_id, NULL); + if (!progress) { + error_str = "No zip progress\n"; + goto on_error; + } + evbuffer_add_printf (req->buffer_out, "%s", progress); + evhtp_headers_add_header ( + req->headers_out, + evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); + evhtp_send_reply (req, EVHTP_RES_OK); + g_free (progress); + goto success; + } + + info = http_tx_manager_query_share_link_info (token, "dir"); + if (!info) { + error_str = "Link token not found\n"; + error_code = EVHTP_RES_FORBIDDEN; + goto on_error; + } + + repo_id = seafile_share_link_info_get_repo_id (info); + + repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); + if (!repo) { + error_str = "Bad repo id\n"; + goto on_error; + } + user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); + + path = evhtp_kv_find (req->uri->query, "p"); + if (!path) { + int len = evbuffer_get_length (req->buffer_in); + if (len <= 0) { + error_str = "Invalid request body\n"; + goto on_error; + } + char *body = g_new0 (char, len); + evbuffer_remove(req->buffer_in, body, len); + tmp_parent_dir = get_form_field (body, "parent_dir"); + if (!tmp_parent_dir) { + g_free (body); + error_str = "Invalid parent_dir\n"; + goto on_error; + } + + dirents = get_form_field (body, "dirents"); + if (!dirents) { + g_free (body); + g_free (tmp_parent_dir); + error_str = "Invalid dirents\n"; + goto on_error; + } + g_free (body); + + norm_parent_dir = normalize_utf8_path (tmp_parent_dir); + r_parent_dir = format_dir_path (norm_parent_dir); + GList *dirent_list = json_to_dirent_list (repo, r_parent_dir, dirents); + if (!dirent_list) { + error_str = "Invalid dirents\n"; + goto on_error; + } + + char *task_id = NULL; + if (g_list_length(dirent_list) == 1) { + task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, "download-dir-link", user, dirent_list); + } else { + task_id = zip_download_mgr_start_zip_task_v2 (seaf->zip_download_mgr, repo_id, "download-multi-link", user, dirent_list); + } + if (!task_id) { + g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto on_error; + } + evbuffer_add_printf (req->buffer_out, "{\"task_id\": \"%s\"}", task_id); + evhtp_headers_add_header ( + req->headers_out, + evhtp_header_new("Content-Type", "application/json; charset=utf-8", 1, 1)); + evhtp_send_reply (req, EVHTP_RES_OK); + g_free (task_id); + goto success; + } + + if (can_use_cached_content (req)) { + goto success; + } + + parent_dir = seafile_share_link_info_get_parent_dir (info); + norm_parent_dir = normalize_utf8_path (parent_dir); + norm_path = normalize_utf8_path (path); + r_parent_dir = format_dir_path (norm_parent_dir); + fullpath = g_build_filename(r_parent_dir, norm_path, NULL); + filename = g_path_get_basename (fullpath); + + file_id = seaf_fs_manager_get_seafile_id_by_path (seaf->fs_mgr, repo->store_id, repo->version, repo->root_id, fullpath, &error); + if (!file_id) { + error_str = "Invalid file_path\n"; + if (error) + g_clear_error(&error); + goto on_error; + } + set_etag (req, file_id); + + byte_ranges = evhtp_kv_find (req->headers_in, "Range"); + + if (repo->encrypted) { + key = seaf_passwd_manager_get_decrypt_key (seaf->passwd_mgr, + repo_id, user); + if (!key) { + error_str = "Repo is encrypted. Please provide password to view it."; + goto on_error; + } + } + + if (!seaf_fs_manager_object_exists (seaf->fs_mgr, + repo->store_id, repo->version, file_id)) { + error_str = "Invalid file id\n"; + goto on_error; + } + + if (!repo->encrypted && byte_ranges) { + if (do_file_range (req, repo, file_id, filename, "download-link", byte_ranges, user) < 0) { + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto on_error; + } + } else if (do_file(req, repo, file_id, filename, "download-link", key, user) < 0) { + error_str = "Internal server error\n"; + error_code = EVHTP_RES_SERVERR; + goto on_error; + } + +success: + g_strfreev (parts); + g_free (tmp_parent_dir); + g_free (dirents); + g_free (user); + g_free (norm_parent_dir); + g_free (norm_path); + g_free (r_parent_dir); + g_free (fullpath); + g_free (filename); + g_free (file_id); + if (repo != NULL) + seaf_repo_unref (repo); + if (key != NULL) + g_object_unref (key); + if (info) + g_object_unref (info); + + return; + +on_error: + g_strfreev (parts); + g_free (tmp_parent_dir); + g_free (dirents); + g_free (user); + g_free (norm_parent_dir); + g_free (norm_path); + g_free (r_parent_dir); + g_free (fullpath); + g_free (filename); + g_free (file_id); + if (repo != NULL) + seaf_repo_unref (repo); + if (key != NULL) + g_object_unref (key); + if (info != NULL) + g_object_unref (info); + + evbuffer_add_printf(req->buffer_out, "%s\n", error_str); + evhtp_send_reply(req, error_code); +} +*/ + int access_file_init (evhtp_t *htp) { evhtp_set_regex_cb (htp, "^/files/.*", access_cb, NULL); evhtp_set_regex_cb (htp, "^/blks/.*", access_blks_cb, NULL); evhtp_set_regex_cb (htp, "^/zip/.*", access_zip_cb, NULL); + evhtp_set_regex_cb (htp, "^/f/.*", access_link_cb, NULL); + //evhtp_set_regex_cb (htp, "^/d/.*", access_dir_link_cb, NULL); return 0; } diff --git a/server/http-tx-mgr.c b/server/http-tx-mgr.c index d6b5303d..0204d2e0 100644 --- a/server/http-tx-mgr.c +++ b/server/http-tx-mgr.c @@ -184,27 +184,13 @@ recv_response (void *contents, size_t size, size_t nmemb, void *userp) * the server sometimes takes more than 45 seconds to calculate the result, * the client will time out. */ -int -http_get (Connection *conn, const char *url, const char *token, - int *rsp_status, char **rsp_content, gint64 *rsp_size, - HttpRecvCallback callback, void *cb_data, - gboolean timeout) +static int +http_get_common (CURL *curl, const char *url, const char *token, + int *rsp_status, char **rsp_content, gint64 *rsp_size, + HttpRecvCallback callback, void *cb_data, + gboolean timeout) { - char *token_header; - struct curl_slist *headers = NULL; int ret = 0; - CURL *curl; - - curl = conn->curl; - - headers = curl_slist_append (headers, "User-Agent: Seafile/"SEAFILE_CLIENT_VERSION" ("USER_AGENT_OS")"); - - if (token) { - token_header = g_strdup_printf ("Seafile-Repo-Token: %s", token); - headers = curl_slist_append (headers, token_header); - g_free (token_header); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - } curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1L); @@ -260,10 +246,8 @@ http_get (Connection *conn, const char *url, const char *token, out: if (ret < 0) { - conn->release = TRUE; g_free (rsp.content); } - curl_slist_free_all (headers); return ret; } @@ -495,6 +479,7 @@ http_tx_manager_get_nickname (const char *modifier) char *rsp_content = NULL; char *nickname = NULL; gint64 rsp_size; + char *url = NULL; jwt_token = gen_jwt_token (); if (!jwt_token) { @@ -528,7 +513,8 @@ http_tx_manager_get_nickname (const char *modifier) g_free (token_header); curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - ret = http_post_common (curl, seaf->seahub_url, jwt_token, req_content, strlen(req_content), + url = g_strdup_printf("%s/user-list/", seaf->seahub_url); + ret = http_post_common (curl, url, jwt_token, req_content, strlen(req_content), &rsp_status, &rsp_content, &rsp_size, TRUE, 1); if (ret < 0) { conn->release = TRUE; @@ -543,10 +529,112 @@ http_tx_manager_get_nickname (const char *modifier) nickname = parse_nickname (rsp_content, rsp_size); out: + g_free (url); g_free (jwt_token); g_free (req_content); g_free (rsp_content); + curl_slist_free_all (headers); connection_pool_return_connection (seaf->seahub_conn_pool, conn); return nickname; } + +static SeafileShareLinkInfo * +parse_share_link_info (const char *rsp_content, int rsp_size) +{ + json_t *object; + json_error_t jerror; + size_t n; + int i; + const char *repo_id = NULL; + const char *file_path = NULL; + const char *parent_dir = NULL; + const char *share_type = NULL; + SeafileShareLinkInfo *info = NULL; + + object = json_loadb (rsp_content, rsp_size, 0, &jerror); + if (!object) { + seaf_warning ("Parse response failed: %s.\n", jerror.text); + return NULL; + } + + repo_id = json_object_get_string_member (object, "repo_id"); + if (!repo_id) { + seaf_warning ("Failed to find repo_id in json.\n"); + goto out; + } + file_path = json_object_get_string_member (object, "file_path"); + parent_dir = json_object_get_string_member (object, "parent_dir"); + share_type = json_object_get_string_member (object, "share_type"); + + info = g_object_new (SEAFILE_TYPE_SHARE_LINK_INFO, + "repo_id", repo_id, + "file_path", file_path, + "parent_dir", parent_dir, + "share_type", share_type, + NULL); + +out: + json_decref (object); + return info; +} + +SeafileShareLinkInfo * +http_tx_manager_query_share_link_info (const char *token, const char *type) +{ + Connection *conn = NULL; + char *token_header; + struct curl_slist *headers = NULL; + int ret = 0; + CURL *curl; + int rsp_status; + char *jwt_token = NULL; + char *rsp_content = NULL; + gint64 rsp_size; + SeafileShareLinkInfo *info = NULL; + char *url = NULL; + + jwt_token = gen_jwt_token (); + if (!jwt_token) { + return NULL; + } + + conn = connection_pool_get_connection (seaf->seahub_conn_pool); + if (!conn) { + g_free (jwt_token); + seaf_warning ("Failed to get connection: out of memory.\n"); + return NULL; + } + + curl = conn->curl; + headers = curl_slist_append (headers, "User-Agent: Seafile/"SEAFILE_CLIENT_VERSION" ("USER_AGENT_OS")"); + token_header = g_strdup_printf ("Authorization: Token %s", jwt_token); + headers = curl_slist_append (headers, token_header); + headers = curl_slist_append (headers, "Content-Type: application/json"); + g_free (token_header); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + url = g_strdup_printf("%s/share-link-info/?token=%s&type=%s", seaf->seahub_url, token, type); + ret = http_get_common (curl, url, jwt_token, &rsp_status, + &rsp_content, &rsp_size, NULL, NULL, TRUE); + if (ret < 0) { + conn->release = TRUE; + goto out; + } + + if (rsp_status != HTTP_OK) { + seaf_warning ("Failed to query access token from seahub: %d.\n", + rsp_status); + } + + info = parse_share_link_info (rsp_content, rsp_size); + +out: + g_free (url); + g_free (jwt_token); + g_free (rsp_content); + curl_slist_free_all (headers); + connection_pool_return_connection (seaf->seahub_conn_pool, conn); + + return info; +} diff --git a/server/http-tx-mgr.h b/server/http-tx-mgr.h index 45cb7e1a..3db2bdd7 100644 --- a/server/http-tx-mgr.h +++ b/server/http-tx-mgr.h @@ -48,4 +48,7 @@ http_tx_manager_init (); char * http_tx_manager_get_nickname (const char *modifier); + +SeafileShareLinkInfo * +http_tx_manager_query_share_link_info (const char *token, const char *type); #endif diff --git a/server/upload-file.c b/server/upload-file.c index cc057b43..c85fe439 100755 --- a/server/upload-file.c +++ b/server/upload-file.c @@ -1810,6 +1810,14 @@ update_ajax_cb(evhtp_request_t *req, void *arg) return; } +/* +static void +upload_link_cb(evhtp_request_t *req, void *arg) +{ + return upload_api_cb (req, arg); +} +*/ + static evhtp_res upload_finish_cb (evhtp_request_t *req, void *arg) { @@ -2641,6 +2649,155 @@ upload_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) return EVHTP_RES_OK; } +/* +static evhtp_res +upload_link_headers_cb (evhtp_request_t *req, evhtp_headers_t *hdr, void *arg) +{ + char **parts = NULL; + char *token = NULL; + const char *repo_id = NULL, *parent_dir = NULL; + char *r_parent_dir = NULL; + char *norm_parent_dir = NULL; + char *user = NULL; + char *boundary = NULL; + gint64 content_len; + char *progress_id = NULL; + char *err_msg = NULL; + RecvFSM *fsm = NULL; + Progress *progress = NULL; + int error_code = EVHTP_RES_BADREQ; + SeafileShareLinkInfo *info = NULL; + + if (!seaf->seahub_pk) { + seaf_warning ("No seahub private key is configured.\n"); + return EVHTP_RES_NOTFOUND; + } + + if (evhtp_request_get_method(req) == htp_method_OPTIONS) { + return EVHTP_RES_OK; + } + + token = req->uri->path->file; + if (!token) { + seaf_debug ("[upload] No token in url.\n"); + err_msg = "No token in url"; + goto err; + } + + parts = g_strsplit (req->uri->path->full + 1, "/", 0); + if (!parts || g_strv_length (parts) < 2) { + err_msg = "Invalid URL"; + goto err; + } + + info = http_tx_manager_query_access_token (token, "upload"); + if (!info) { + err_msg = "Access token not found\n"; + error_code = EVHTP_RES_FORBIDDEN; + goto err; + } + repo_id = seafile_share_link_info_get_repo_id (info); + parent_dir = seafile_share_link_info_get_parent_dir (info); + if (!parent_dir) { + err_msg = "No parent_dir\n"; + goto err; + } + norm_parent_dir = normalize_utf8_path (parent_dir); + r_parent_dir = format_dir_path (norm_parent_dir); + + user = seaf_repo_manager_get_repo_owner (seaf->repo_mgr, repo_id); + + boundary = get_boundary (hdr); + if (!boundary) { + err_msg = "Wrong boundary in url"; + goto err; + } + + if (get_progress_info (req, hdr, &content_len, &progress_id) < 0) { + err_msg = "No progress info"; + goto err; + } + + if (progress_id != NULL) { + pthread_mutex_lock (&pg_lock); + if (g_hash_table_lookup (upload_progress, progress_id)) { + pthread_mutex_unlock (&pg_lock); + err_msg = "Duplicate progress id.\n"; + goto err; + } + pthread_mutex_unlock (&pg_lock); + } + + gint64 rstart = -1; + gint64 rend = -1; + gint64 fsize = -1; + if (!parse_range_val (hdr, &rstart, &rend, &fsize)) { + seaf_warning ("Invalid Seafile-Content-Range value.\n"); + err_msg = "Invalid Seafile-Content-Range"; + goto err; + } + + fsm = g_new0 (RecvFSM, 1); + fsm->boundary = boundary; + fsm->repo_id = g_strdup (repo_id); + fsm->parent_dir = r_parent_dir; + fsm->user = user; + fsm->token_type = "upload-link"; + fsm->rstart = rstart; + fsm->rend = rend; + fsm->fsize = fsize; + fsm->line = evbuffer_new (); + fsm->form_kvs = g_hash_table_new_full (g_str_hash, g_str_equal, + g_free, g_free); + // const char *need_idx_progress = evhtp_kv_find (req->uri->query, "need_idx_progress"); + // if (g_strcmp0(need_idx_progress, "true") == 0) + // fsm->need_idx_progress = TRUE; + fsm->need_idx_progress = FALSE; + + if (progress_id != NULL) { + progress = g_new0 (Progress, 1); + progress->size = content_len; + fsm->progress_id = progress_id; + fsm->progress = progress; + + pthread_mutex_lock (&pg_lock); + g_hash_table_insert (upload_progress, g_strdup(progress_id), progress); + pthread_mutex_unlock (&pg_lock); + } + + // Set up per-request hooks, so that we can read file data piece by piece. + evhtp_set_hook (&req->hooks, evhtp_hook_on_read, upload_read_cb, fsm); + evhtp_set_hook (&req->hooks, evhtp_hook_on_request_fini, upload_finish_cb, fsm); + // Set arg for upload_cb or update_cb. + req->cbarg = fsm; + + g_free (norm_parent_dir); + g_strfreev (parts); + g_object_unref (info); + + return EVHTP_RES_OK; + +err: + // Don't receive any data before the connection is closed. + // evhtp_request_pause (req); + + // Set keepalive to 0. This will cause evhtp to close the + // connection after sending the reply. + req->keepalive = 0; + send_error_reply (req, error_code, err_msg); + + g_free (norm_parent_dir); + g_free (r_parent_dir); + g_free (user); + g_free (boundary); + g_free (progress_id); + g_strfreev (parts); + if (info) + g_object_unref (info); + return EVHTP_RES_OK; +} +*/ + static void idx_progress_cb(evhtp_request_t *req, void *arg) { @@ -2757,6 +2914,10 @@ upload_file_init (evhtp_t *htp, const char *http_temp_dir) cb = evhtp_set_regex_cb (htp, "^/update-aj/.*", update_ajax_cb, NULL); evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_headers_cb, NULL); + // upload links + // cb = evhtp_set_regex_cb (htp, "^/u/.*", upload_link_cb, NULL); + //evhtp_set_hook(&cb->hooks, evhtp_hook_on_headers, upload_link_headers_cb, NULL); + evhtp_set_regex_cb (htp, "^/upload_progress.*", upload_progress_cb, NULL); evhtp_set_regex_cb (htp, "^/idx_progress.*", idx_progress_cb, NULL); diff --git a/server/zip-download-mgr.c b/server/zip-download-mgr.c index 30376ba5..ba821f47 100644 --- a/server/zip-download-mgr.c +++ b/server/zip-download-mgr.c @@ -572,6 +572,90 @@ zip_download_mgr_start_zip_task (ZipDownloadMgr *mgr, return ret; } +/* +#define TOKEN_LEN 36 +static char * +gen_new_token (GHashTable *token_hash) +{ + char uuid[37]; + char *token; + + while (1) { + gen_uuid_inplace (uuid); + token = g_strndup(uuid, TOKEN_LEN); + + // Make sure the new token doesn't conflict with an existing one. + if (g_hash_table_lookup (token_hash, token) != NULL) + g_free (token); + else + return token; + } +} + +char * +zip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr, + const char *repo_id, + const char *operation, + const char *user, + GList *dirent_list) +{ + SeafRepo *repo = NULL; + char *token = NULL; + char *task_id = NULL; + char *filename = NULL; + DownloadObj *obj; + Progress *progress; + ZipDownloadMgrPriv *priv = mgr->priv; + + repo = seaf_repo_manager_get_repo(seaf->repo_mgr, repo_id); + if (!repo) { + seaf_warning ("Failed to get repo %s\n", repo_id); + return NULL; + } + + obj = g_new0 (DownloadObj, 1); + obj->repo = repo; + obj->user = g_strdup (user); + + if (strcmp (operation, "download-dir") == 0 || + strcmp (operation, "download-dir-link") == 0) { + obj->type = DOWNLOAD_DIR; + SeafDirent *dent = dirent_list->data; + obj->dir_name = g_strdup (dent->name); + obj->internal = g_strdup (dent->id); + filename = g_strdup (obj->dir_name); + g_list_free_full (dirent_list, (GDestroyNotify)seaf_dirent_free); + } else { + obj->type = DOWNLOAD_MULTI; + obj->dir_name = g_strdup(""); + obj->internal = dirent_list; + time_t now = time(NULL); + char date_str[11]; + strftime(date_str, sizeof(date_str), "%Y-%m-%d", localtime(&now)); + filename = g_strconcat (MULTI_DOWNLOAD_FILE_PREFIX, date_str, NULL); + } + + progress = g_new0 (Progress, 1); + // Set to real total in worker thread. Here to just prevent the client from thinking + // the zip has been finished too early. + progress->total = 1; + progress->expire_ts = time(NULL) + PROGRESS_TTL; + progress->zip_file_name = filename; + obj->progress = progress; + + pthread_mutex_lock (&priv->progress_lock); + token = gen_new_token (priv->progress_store); + g_hash_table_replace (priv->progress_store, token, progress); + pthread_mutex_unlock (&priv->progress_lock); + obj->token = g_strdup (token); + task_id = g_strdup (token); + + g_thread_pool_push (priv->zip_tpool, obj, NULL); + + return task_id; +} +*/ + static Progress * get_progress_obj (ZipDownloadMgrPriv *priv, const char *token) { @@ -636,6 +720,21 @@ zip_download_mgr_get_zip_file_path (struct ZipDownloadMgr *mgr, return progress->zip_file_path; } +/* +char * +zip_download_mgr_get_zip_file_name (struct ZipDownloadMgr *mgr, + const char *token) +{ + Progress *progress; + + progress = get_progress_obj (mgr->priv, token); + if (!progress) { + return NULL; + } + return progress->zip_file_name; +} +*/ + void zip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr, const char *token) diff --git a/server/zip-download-mgr.h b/server/zip-download-mgr.h index e11d7660..7643f0da 100644 --- a/server/zip-download-mgr.h +++ b/server/zip-download-mgr.h @@ -5,6 +5,8 @@ #include "seafile-object.h" +#define MULTI_DOWNLOAD_FILE_PREFIX "documents-export-" + struct ZipDownloadMgrPriv; typedef struct ZipDownloadMgr { @@ -20,6 +22,13 @@ zip_download_mgr_start_zip_task (ZipDownloadMgr *mgr, SeafileWebAccess *info, GError **error); +char * +zip_download_mgr_start_zip_task_v2 (ZipDownloadMgr *mgr, + const char *repo_id, + const char *operation, + const char *user, + GList *dirent_list); + char * zip_download_mgr_query_zip_progress (ZipDownloadMgr *mgr, const char *token, GError **error); @@ -28,6 +37,10 @@ char * zip_download_mgr_get_zip_file_path (ZipDownloadMgr *mgr, const char *token); +char * +zip_download_mgr_get_zip_file_name (ZipDownloadMgr *mgr, + const char *token); + void zip_download_mgr_del_zip_progress (ZipDownloadMgr *mgr, const char *token); diff --git a/tests/conf/seahub_settings.py b/tests/conf/seahub_settings.py new file mode 100644 index 00000000..b7eab820 --- /dev/null +++ b/tests/conf/seahub_settings.py @@ -0,0 +1,2 @@ +SECRET_KEY='122h5qj(4&n2712ybr$0mn8x!#sz&(w2w*-zrxe&$!yrzbu9' +SITE_ROOT= '/seahub/'