diff --git a/.gitignore b/.gitignore
index 900164b4..2e04ca4b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,9 @@
+*~
+*#
+*.log
+*.gz
+/seafdav.conf
+/seafdav.fcgi.conf
.cache
.coverage
.eggs
diff --git a/.travis.yml b/.travis.yml
index 55959d0c..c82ae023 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,52 +1,31 @@
-# sudo: false
+dist: bionic
language: python
-
-matrix:
- include:
- - python: "2.7"
- env: TOXENV=lint-py27
- - python: "3.7"
- env: TOXENV=black-check,lint-py3
- dist: xenial # only while we're using python 3.7
- - python: "2.7"
- env: TOXENV=py27
- # - python: "3.4" # EOL 2019-03-18
- # env: TOXENV=py34
- - python: "3.5"
- env: TOXENV=py35
- - python: "3.6"
- env: TOXENV=py36
- - python: "3.7"
- env: TOXENV=py37
- dist: xenial
- - python: "3.8"
- env: TOXENV=py38
- - python: "3.7-dev"
- env: TOXENV=py37
- - python: "3.8-dev"
- env: TOXENV=py38
- allow_failures:
- - python: "3.7-dev"
- env: TOXENV=py37
- - python: "3.8-dev"
- env: TOXENV=py38
-
+python:
+- "3.6"
+compiler:
+- gcc
+addons:
+ apt:
+ packages:
+ - valac
+ - uuid-dev
+ - libevent-dev
+ - libarchive-dev
+ - intltool
+ - libjansson-dev
+ - libonig-dev
+ - libfuse-dev
+ - net-tools
+ - libglib2.0-dev
+ - sqlite3
+ - libsqlite3-dev
+ - libonig-dev
+ - libcurl4-openssl-dev
before_install:
- # See issue #80: litmus fails to build on travis
- # The branch 'travis-litmus' still has this enabled to investigate...
-# - sudo apt-get install libneon27-dev
-# - ./install_litmus.sh
-
-services:
- - redis-server
-
+ - chmod +x ci/install-deps.sh
+ - chmod +x ci/functests.sh
+ - pip install -r ./ci/requirements.txt
install:
- - travis_retry pip install -U pip setuptools
- - travis_retry pip install -U tox coveralls coverage
-
+- "./ci/install-deps.sh"
script:
- - travis_retry tox
-
-after_success:
- - coverage combine
- - coveralls
+- "./ci/functests.sh init && ./ci/functests.sh runserver && ./ci/functests.sh test"
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
index 675ace27..c7897f48 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,9 @@
The MIT License
Copyright (c) 2009-2020 Martin Wendt, (Original PyFileServer (c) 2005 Ho Chun Wei)
+Copyright (c) 2012-present Seafile Ltd.
+
+Seafile webdav server is based on WsgiDAV.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile b/Makefile
new file mode 100644
index 00000000..ac10dfd2
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,6 @@
+all: seafdav.tar.gz
+
+seafdav.tar.gz:
+ git archive HEAD wsgidav | gzip > seafdav.tar.gz
+clean:
+ rm -f *.gz
diff --git a/README.md b/README.md
index c0cf722f..fbdb1ed6 100644
--- a/README.md
+++ b/README.md
@@ -1,78 +1,31 @@
-# ![logo](https://raw.githubusercontent.com/mar10/wsgidav/master/doc/logo.png) WsgiDAV
-[![Build Status](https://travis-ci.org/mar10/wsgidav.svg?branch=master)](https://travis-ci.org/mar10/wsgidav)
-[![Latest Version](https://img.shields.io/pypi/v/wsgidav.svg)](https://pypi.python.org/pypi/WsgiDAV/)
-[![License](https://img.shields.io/pypi/l/wsgidav.svg)](https://github.com/mar10/wsgidav/blob/master/LICENSE)
-[![Documentation Status](https://readthedocs.org/projects/wsgidav/badge/?version=latest)](http://wsgidav.readthedocs.io/)
-[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
-[![StackOverflow: WsgiDAV](https://img.shields.io/badge/StackOverflow-WsgiDAV-blue.svg)](https://stackoverflow.com/questions/tagged/WsgiDAV)
+# Seafile WebDAV Server [![Build Status](https://secure.travis-ci.org/haiwen/seafdav.svg?branch=master)](http://travis-ci.org/haiwen/seafdav)
-A generic and extendable [WebDAV](http://www.ietf.org/rfc/rfc4918.txt) server
-written in Python and based on [WSGI](http://www.python.org/dev/peps/pep-3333/).
+This is the WebDAV server for seafile.
-Main features:
+See [Seafile Server Manual](http://manual.seafile.com/extension/webdav.html) for details.
- - WsgiDAV is a stand-alone WebDAV server with SSL support, that can be
- installed and run as Python command line script on Linux, OSX, and Windows:
- ```
- $ pip install wsgidav cheroot
- $ wsgidav --host=0.0.0.0 --port=8080 --root=/tmp
- WARNING: share '/' will allow anonymous access.
- Running WsgiDAV/2.2.2 Cheroot/5.5.0 Python/3.4.2
- Serving on http://0.0.0.0:8080 ...
- ```
- Run `wsgidav --help` for a list of available options.
- **Note:** The syntax changed slightly with v3.0.
- - **Note:** python-pam is needed if using pam-login on Linux or OSX:
- ```
- $ pip install python-pam
- $ wsgidav --auth=pam-login --host=0.0.0.0 --port=8080 --root=/tmp
- ```
- - **Note:** Windows users may prefer the
- [MSI Installer](https://github.com/mar10/wsgidav/releases/latest)
- (see Assets section).
- - WebDAV is a superset of HTTP, so WsgiDAV is also a performant, multi-threaded
- web server with SSL support.
- - WsgiDAV is also a Python library that implements the WSGI protocol and can
- be run behind any WSGI compliant web server.
- - WsgiDAV is implemented as a configurable stack of WSGI middleware
- applications.
- Its open architecture allows to extend the functionality and integrate
- WebDAV services into your project.
- Typical use cases are:
- - Expose data structures as virtual, editable file systems.
- - Allow online editing of MS Office documents.
+# Running
+There are a template for running seafdav:
+- run.sh.template: This is for running seafdav on the default 8080 port with a built-in CherryPy server.
-## Status
+To run on 8080 port:
-[![Latest Version](https://img.shields.io/pypi/v/wsgidav.svg)](https://pypi.python.org/pypi/WsgiDAV/)
-See the ([change log](https://github.com/mar10/wsgidav/blob/master/CHANGELOG.md)) for details.
+```
+cp run.sh.template run.sh
+```
-**Note:** Release 3.0 introduces some refactorings and breaking changes.
- See the ([change log](https://github.com/mar10/wsgidav/blob/master/CHANGELOG.md)) for details.
+Then change CCNET_CONF_DIR and SEAFILE_CONF_DIR to your Seafile server's settings.
+# Testing
-## More info
-
- * [Read The Docs](http://wsgidav.rtfd.org) for details.
- * [Discussion Group](https://groups.google.com/forum/#!forum/wsgidav)
- * [Stackoverflow](http://stackoverflow.com/questions/tagged/wsgidav)
-
-
-## Credits
-
-Contributors:
-
- * WsgiDAV is a [refactored version](https://github.com/mar10/wsgidav/blob/master/doc/changelog04.md)
- of [PyFileServer 0.2](https://github.com/cwho/pyfileserver),
- Copyright (c) 2005 Ho Chun Wei.
- Chun gave his approval to change the license from LGPL to MIT-License for
- this project.
- *
- * Markus Majer for providing the logo (a mixture of the international
- maritime signal flag for 'W (Whiskey)' and a dove.)
-
-
-Any kind of feedback is very welcome!
-Have fun :-)
-Martin
+- start local seafile server
+- start local seahub server (While seafdav itself doesn't require seahub, we use seahub webapi as a driver for testing)
+- start seafdav server
+- create a test user `test@seafiletest.com` with password `test`
+- Run the tests
+```
+export CCNET_CONF_DIR=/path/to/ccnet
+export SEAFILE_CONF_DIR=/path/to/seafile-data
+./ci/functest.sh test
+```
diff --git a/ci/functests.sh b/ci/functests.sh
new file mode 100755
index 00000000..d40f5a8a
--- /dev/null
+++ b/ci/functests.sh
@@ -0,0 +1,83 @@
+set -e
+if [ $# -lt "1" ]; then
+ echo
+ echo "Usage: ./functests.sh {init|runserver|test}"
+ echo
+ exit 1
+fi
+if [ ${TRAVIS} ] ;then
+ set -x
+ CCNET_CONF_DIR="/tmp/seafile-server/tests/conf"
+ SEAFILE_CONF_DIR="/tmp/seafile-server/tests/conf/seafile-data"
+ PYTHONPATH="/usr/local/lib/python3.6/site-packages:/tmp/seafobj:/tmp/seafile-server/tests/conf/seafile-data/:${PYTHONPATH}"
+ export PYTHONPATH
+ export CCNET_CONF_DIR
+ export SEAFILE_CONF_DIR
+
+fi
+
+function start_server() {
+ seaf-server -c /tmp/seafile-server/tests/conf -d /tmp/seafile-server/tests/conf/seafile-data -f -l - &
+ sleep 2
+}
+
+function init() {
+ cat > /tmp/seafile-server/tests/conf/ccnet.conf << EOF
+[General]
+USER_NAME = server
+ID = 8e4b13b49ca79f35732d9f44a0804940d985627c
+NAME = server
+SERVICE_URL = http://127.0.0.1
+
+[Network]
+PORT = 10002
+
+[Client]
+PORT = 9999
+
+[Database]
+CREATE_TABLES = true
+EOF
+ mkdir /tmp/seafile-server/tests/conf/seafile-data
+ touch /tmp/seafile-server/tests/conf/seafile-data/seafile.conf
+ touch /tmp/seafile-server/tests/conf/seafile-data/seahub_settings.py
+ cat > /tmp/seafile-server/tests/conf/seafile-data/seafile.conf << EOF
+[database]
+create_tables = true
+EOF
+ touch ${CCNET_CONF_DIR}/seafile.ini
+ cat > ${CCNET_CONF_DIR}/seafile.ini << EOF
+/tmp/seafile-server/tests/conf/seafile-data
+EOF
+ start_server
+ python -c "from seaserv import ccnet_api as api;api.add_emailuser('test@seafiletest.com','test',0,1)"
+}
+
+function start_seafdav() {
+ if [ ${TRAVIS} ]; then
+ cd ${TRAVIS_BUILD_DIR}
+ python -m wsgidav.server.server_cli --host=127.0.0.1 --port=8080 --root=/ --server=gunicorn &
+ sleep 5
+ fi
+}
+
+function run_tests() {
+ cd seafdav_tests
+ py.test
+}
+
+case $1 in
+ "init")
+ init
+ ;;
+ "runserver")
+ start_seafdav
+ ;;
+ "test")
+ run_tests
+ ;;
+ *)
+ echo "unknow command \"$1\""
+ ;;
+esac
+
diff --git a/ci/install-deps.sh b/ci/install-deps.sh
new file mode 100755
index 00000000..738a6838
--- /dev/null
+++ b/ci/install-deps.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -e -x
+
+git clone --depth=1 --branch=master git://github.com/haiwen/libevhtp /tmp/libevhtp
+cd /tmp/libevhtp
+cmake -DEVHTP_DISABLE_SSL=ON -DEVHTP_BUILD_SHARED=OFF .
+make -j2
+sudo make install
+cd -
+
+git clone --depth=1 --branch=master git://github.com/haiwen/libsearpc /tmp/libsearpc
+cd /tmp/libsearpc
+./autogen.sh
+./configure
+make -j2
+sudo make install
+cd -
+
+git clone --depth=1 --branch=master git://github.com/haiwen/seafile-server /tmp/seafile-server
+cd /tmp/seafile-server
+./autogen.sh
+./configure
+make -j2
+sudo make install
+cd -
+
+sudo ldconfig
+
+git clone --depth=1 --branch=master git://github.com/haiwen/seafobj /tmp/seafobj
diff --git a/ci/requirements.txt b/ci/requirements.txt
new file mode 100644
index 00000000..4fccdbe4
--- /dev/null
+++ b/ci/requirements.txt
@@ -0,0 +1,16 @@
+termcolor>=1.1.0
+requests>=2.8.0
+pytest>=3.3.2
+backports.functools_lru_cache>=1.4
+tenacity>=4.8.0
+defusedxml~=0.5
+Jinja2~=2.10
+jsmin~=2.2
+python-pam~=1.8
+PyYAML~=5.1
+six~=1.12
+gunicorn
+future
+lxml
+sqlalchemy
+pyjson5
diff --git a/requirements.txt b/requirements.txt
index 4caf8bba..8f7d0255 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,3 +4,5 @@ json5~=0.8.5
python-pam~=1.8
PyYAML~=5.1
six~=1.13
+lxml
+sqlalchemy
diff --git a/run.sh.template b/run.sh.template
new file mode 100644
index 00000000..dcfb9ea6
--- /dev/null
+++ b/run.sh.template
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+export CCNET_CONF_DIR=/data/data/ccnet
+export SEAFILE_CONF_DIR=/data/data/seafile-data
+
+TOP_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))")
+
+cd "$TOP_DIR"
+
+python -m wsgidav.server.run_server --host=0.0.0.0 --port=8080 --root=/ --server=gunicorn
diff --git a/seafdav_tests/client.py b/seafdav_tests/client.py
new file mode 100644
index 00000000..6891108c
--- /dev/null
+++ b/seafdav_tests/client.py
@@ -0,0 +1,70 @@
+#coding: UTF-8
+
+from easywebdav3 import easywebdav
+import os
+import io
+import posixpath
+from seaserv import seafile_api
+
+USER = os.environ.get('SEAFILE_TEST_USERNAME', 'test@seafiletest.com')
+PASSWORD = os.environ.get('SEAFILE_TEST_PASSWORD', 'test')
+
+def get_webapi_client():
+ apiclient = seafile_api.connect('http://127.0.0.1:8000', USER, PASSWORD)
+ return apiclient
+
+class SeafDavClient(object):
+ """Wrapper around easywebdav to provide common operations on seafile webdav
+ server.
+
+ Davfs2 would be a better option, but it's not supported on travis ci.
+ """
+ server = '127.0.0.1'
+ port = 8080
+ user = USER
+ password = PASSWORD
+
+ def __init__(self):
+ self._dav = easywebdav.Client(self.server, port=self.port,
+ username=self.user,
+ password=self.password)
+
+ def list_repos(self):
+ return [e for e in self._dav.ls('/') if e.name != '/']
+
+ def repo_listdir(self, repo, path='/'):
+ repo_name = repo.get('name')
+ path = posixpath.join('/', repo_name, path.lstrip('/'))
+ if not path.endswith('/'):
+ path += '/'
+ entries = self._dav.ls(path)
+ # the file entries list also contains the path iteself, we just filter it
+ # out for convenience
+ return [e for e in entries if e.name != path]
+
+ def repo_mkdir(self, repo, parentdir, dirname, safe=False):
+ repo_name = repo.get('name')
+ fullpath = posixpath.join('/', repo_name, parentdir.lstrip('/'), dirname)
+ self._dav.mkdir(fullpath, safe)
+
+ def repo_getfile(self, repo, path):
+ fobj = io.BytesIO()
+ repo_name = repo.get('name')
+ fullpath = posixpath.join('/', repo_name, path.lstrip('/'))
+ self._dav.download(fullpath, fobj)
+ return fobj.getvalue()
+
+ def repo_uploadfile(self, repo, localpath_or_fileobj, path):
+ repo_name = repo.get('name')
+ fullpath = posixpath.join('/', repo_name, path.lstrip('/'))
+ self._dav.upload(localpath_or_fileobj, fullpath)
+
+ def repo_removedir(self, repo, path):
+ repo_name = repo.get('name')
+ fullpath = posixpath.join('/', repo_name, path.lstrip('/'))
+ self._dav.rmdir(fullpath)
+
+ def repo_removefile(self, repo, path):
+ repo_name = repo.get('name')
+ fullpath = posixpath.join('/', repo_name, path.lstrip('/'))
+ self._dav.delete(fullpath)
diff --git a/seafdav_tests/data/test.txt b/seafdav_tests/data/test.txt
new file mode 100644
index 00000000..9daeafb9
--- /dev/null
+++ b/seafdav_tests/data/test.txt
@@ -0,0 +1 @@
+test
diff --git a/seafdav_tests/easywebdav3/__init__.py b/seafdav_tests/easywebdav3/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/seafdav_tests/easywebdav3/easywebdav.py b/seafdav_tests/easywebdav3/easywebdav.py
new file mode 100644
index 00000000..73c41ed6
--- /dev/null
+++ b/seafdav_tests/easywebdav3/easywebdav.py
@@ -0,0 +1,181 @@
+import requests
+import platform
+from numbers import Number
+import xml.etree.cElementTree as xml
+from collections import namedtuple
+from http.client import responses as HTTP_CODES
+from urllib.parse import urlparse
+
+DOWNLOAD_CHUNK_SIZE_BYTES = 1 * 1024 * 1024
+
+class WebdavException(Exception):
+ pass
+
+class ConnectionFailed(WebdavException):
+ pass
+
+
+def codestr(code):
+ return HTTP_CODES.get(code, 'UNKNOWN')
+
+
+File = namedtuple('File', ['name', 'size', 'mtime', 'ctime', 'contenttype'])
+
+
+def prop(elem, name, default=None):
+ child = elem.find('.//{DAV:}' + name)
+ return default if child is None else child.text
+
+
+def elem2file(elem):
+ return File(
+ prop(elem, 'href'),
+ int(prop(elem, 'getcontentlength', 0)),
+ prop(elem, 'getlastmodified', ''),
+ prop(elem, 'creationdate', ''),
+ prop(elem, 'getcontenttype', ''),
+ )
+
+
+class OperationFailed(WebdavException):
+ _OPERATIONS = dict(
+ HEAD = "get header",
+ GET = "download",
+ PUT = "upload",
+ DELETE = "delete",
+ MKCOL = "create directory",
+ PROPFIND = "list directory",
+ )
+
+ def __init__(self, method, path, expected_code, actual_code):
+ self.method = method
+ self.path = path
+ self.expected_code = expected_code
+ self.actual_code = actual_code
+ operation_name = self._OPERATIONS[method]
+ self.reason = 'Failed to {operation_name} "{path}"'.format(**locals())
+ expected_codes = (expected_code,) if isinstance(expected_code, Number) else expected_code
+ expected_codes_str = ", ".join('{0} {1}'.format(code, codestr(code)) for code in expected_codes)
+ actual_code_str = codestr(actual_code)
+ msg = '''\
+{self.reason}.
+ Operation : {method} {path}
+ Expected code : {expected_codes_str}
+ Actual code : {actual_code} {actual_code_str}'''.format(**locals())
+ super(OperationFailed, self).__init__(msg)
+
+class Client(object):
+ def __init__(self, host, port=0, auth=None, username=None, password=None,
+ protocol='http', verify_ssl=True, path=None, cert=None):
+ if not port:
+ port = 443 if protocol == 'https' else 80
+ self.baseurl = '{0}://{1}:{2}'.format(protocol, host, port)
+ if path:
+ self.baseurl = '{0}/{1}'.format(self.baseurl, path)
+ self.cwd = '/'
+ self.session = requests.session()
+ self.session.verify = verify_ssl
+ self.session.stream = True
+
+ if cert:
+ self.session.cert = cert
+
+ if auth:
+ self.session.auth = auth
+ elif username and password:
+ self.session.auth = (username, password)
+
+ def _send(self, method, path, expected_code, **kwargs):
+ url = self._get_url(path)
+ response = self.session.request(method, url, allow_redirects=False, **kwargs)
+ if isinstance(expected_code, Number) and response.status_code != expected_code \
+ or not isinstance(expected_code, Number) and response.status_code not in expected_code:
+ raise OperationFailed(method, path, expected_code, response.status_code)
+ return response
+
+ def _get_url(self, path):
+ path = str(path).strip()
+ if path.startswith('/'):
+ return self.baseurl + path
+ return "".join((self.baseurl, self.cwd, path))
+
+ def cd(self, path):
+ path = path.strip()
+ if not path:
+ return
+ stripped_path = '/'.join(part for part in path.split('/') if part) + '/'
+ if stripped_path == '/':
+ self.cwd = stripped_path
+ elif path.startswith('/'):
+ self.cwd = '/' + stripped_path
+ else:
+ self.cwd += stripped_path
+
+ def mkdir(self, path, safe=False):
+ expected_codes = 201 if not safe else (201, 301, 405)
+ self._send('MKCOL', path, expected_codes)
+
+ def mkdirs(self, path):
+ dirs = [d for d in path.split('/') if d]
+ if not dirs:
+ return
+ if path.startswith('/'):
+ dirs[0] = '/' + dirs[0]
+ old_cwd = self.cwd
+ try:
+ for dir in dirs:
+ try:
+ self.mkdir(dir, safe=True)
+ except Exception as e:
+ if e.actual_code == 409:
+ raise
+ finally:
+ self.cd(dir)
+ finally:
+ self.cd(old_cwd)
+
+ def rmdir(self, path, safe=False):
+ path = str(path).rstrip('/') + '/'
+ expected_codes = 204 if not safe else (204, 404)
+ self._send('DELETE', path, expected_codes)
+
+ def delete(self, path):
+ self._send('DELETE', path, 204)
+
+ def upload(self, local_path_or_fileobj, remote_path):
+ if isinstance(local_path_or_fileobj, str):
+ with open(local_path_or_fileobj, 'rb') as f:
+ self._upload(f, remote_path)
+ else:
+ self._upload(local_path_or_fileobj, remote_path)
+
+ def _upload(self, fileobj, remote_path):
+ self._send('PUT', remote_path, (200, 201, 204), data=fileobj)
+
+ def download(self, remote_path, local_path_or_fileobj):
+ response = self._send('GET', remote_path, 200, stream=True)
+ if isinstance(local_path_or_fileobj, str):
+ with open(local_path_or_fileobj, 'wb') as f:
+ self._download(f, response)
+ else:
+ self._download(local_path_or_fileobj, response)
+
+ def _download(self, fileobj, response):
+ for chunk in response.iter_content(DOWNLOAD_CHUNK_SIZE_BYTES):
+ fileobj.write(chunk)
+
+ def ls(self, remote_path='.'):
+ headers = {'Depth': '1'}
+ response = self._send('PROPFIND', remote_path, (207, 301), headers=headers)
+
+ # Redirect
+ if response.status_code == 301:
+ url = urlparse(response.headers['location'])
+ return self.ls(url.path)
+
+ tree = xml.fromstring(response.content)
+ return [elem2file(elem) for elem in tree.findall('{DAV:}response')]
+
+ def exists(self, remote_path):
+ response = self._send('HEAD', remote_path, (200, 301, 404))
+ return True if response.status_code != 404 else False
diff --git a/seafdav_tests/test_webdav.py b/seafdav_tests/test_webdav.py
new file mode 100644
index 00000000..ce3f8f78
--- /dev/null
+++ b/seafdav_tests/test_webdav.py
@@ -0,0 +1,261 @@
+#coding: UTF-8
+
+import time
+import os
+import io
+import unittest
+import posixpath
+import random
+import string
+from functools import wraps
+from contextlib import contextmanager
+from client import SeafDavClient, USER, PASSWORD
+from easywebdav3.easywebdav import OperationFailed as WebDAVOperationFailed
+from seaserv import seafile_api as api
+
+davclient = SeafDavClient()
+TEST_REPO = None
+
+def randstring(length=20):
+ return ''.join(random.choice(string.ascii_lowercase) for i in range(length))
+
+def dav_basename(f):
+ if isinstance(f, str):
+ path = f
+ else:
+ path = f.name
+ return posixpath.basename(path.rstrip('/'))
+
+@contextmanager
+def tmp_repo(name=None, desc=None):
+ """Create a temporary repo for test before the function exectutes, and delete
+ the repo after that.
+
+ Usage:
+
+ with tmp_repo() as repo:
+ ... do things with repo ...
+ """
+ name = name or randstring()
+ desc = desc or randstring()
+ repo_id = api.create_repo(name, desc, USER, enc_version=None)
+ repo = {"id" : repo_id, "name" : name}
+ try:
+ yield repo
+ finally:
+ pass
+ #api.remove_repo(repo_id)
+
+def use_tmp_repo(func):
+ """Create a temporary repo for test before the function exectutes, and delete
+ the repo after that.
+
+ Typical usage:
+
+ @use_tmp_repo
+ def test_file_ops():
+ repo = TEST_REPO
+ ... use `repo` to do things ...
+ """
+ @wraps(func)
+ def wrapper(*a, **kw):
+ with tmp_repo() as _repo:
+ global TEST_REPO
+ TEST_REPO = _repo
+ func(*a, **kw)
+ return wrapper
+
+class SeafDAVTestCase(unittest.TestCase):
+ def test_list_repos(self):
+ """Test list repos in the top level."""
+ def verify_repos_count(n=None):
+ entries = davclient.list_repos()
+ if n is not None:
+ self.assertHasLen(entries, n)
+ return entries
+
+ nrepos = len(verify_repos_count())
+
+ with tmp_repo() as repo:
+ entries = verify_repos_count(nrepos + 1)
+ self.assertIn(repo.get('name'), [dav_basename(f) for f in entries])
+
+ def test_file_ops(self):
+ """Test list/add/remove files and folders"""
+ @use_tmp_repo
+ def _test_under_path(path):
+ repo = TEST_REPO
+ path = path.rstrip('/')
+ #sdir = repo.get_dir('/')
+ parent_dir = '/'
+ if path:
+ dirs = [p for p in path.split('/') if p]
+ for d in dirs:
+ api.post_dir(repo.get('id'), parent_dir, d, USER)
+ parent_dir = parent_dir + d + '/'
+ entries = davclient.repo_listdir(repo, path)
+ self.assertEmpty(entries)
+
+ # delete non existent folder from webapi
+ dirname = 'folder-%s' % randstring()
+ api.del_file(repo.get('id'), parent_dir, dirname, USER)
+ entries = davclient.repo_listdir(repo, parent_dir)
+ self.assertEmpty(entries)
+
+ #delete non existent file from webapi
+ fname = 'uploaded-file-%s.txt' % randstring()
+ api.del_file(repo.get('id'), parent_dir, fname, USER)
+ entries = davclient.repo_listdir(repo, parent_dir)
+ self.assertEmpty(entries)
+
+ # create a folder from webapi and list it in webdav
+ dirname = 'folder-%s' % randstring()
+ api.post_dir(repo.get('id'), parent_dir, dirname, USER)
+
+ entries = davclient.repo_listdir(repo, parent_dir)
+ self.assertHasLen(entries, 1)
+ sfolder = entries[0]
+ self.assertEqual(dav_basename(sfolder), dirname)
+
+ # create a file from webapi and list it in webdav
+ testfpath = os.path.join(os.path.dirname(__file__), 'data', 'test.txt')
+ with open(testfpath, 'rb') as fp:
+ testfcontent = fp.read()
+ fname = 'uploaded-file-%s.txt' % randstring()
+ api.post_file(repo.get('id'), testfpath, parent_dir, fname, USER)
+ entries = davclient.repo_listdir(repo, parent_dir)
+ self.assertHasLen(entries, 2)
+ downloaded_file = davclient.repo_getfile(repo, posixpath.join(parent_dir, fname))
+ assert downloaded_file == testfcontent
+
+ # create a folder through webdav, and check it in webapi
+ dirname = 'another-level1-folder-%s' % randstring(10)
+ davclient.repo_mkdir(repo, parent_dir, dirname)
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 3)
+ davdir = [e for e in entries if e.obj_name == dirname][0]
+ self.assertEqual(davdir.obj_name, dirname)
+
+ # create a existent folder through webdav
+ davclient.repo_mkdir(repo, parent_dir, dirname, True)
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 3)
+
+ # upload a file through webdav, and check it in webapi
+ fname = 'uploaded-file-%s' % randstring()
+ repo_fpath = posixpath.join(parent_dir, fname)
+ davclient.repo_uploadfile(repo, testfpath, repo_fpath)
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 4)
+
+ # upload a existent file through webdav
+ repo_fpath = posixpath.join(parent_dir, fname)
+ davclient.repo_uploadfile(repo, testfpath, repo_fpath)
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 4)
+
+ # remove a dir through webdav
+ self.assertIn(dirname, [dirent.obj_name for dirent in \
+ api.list_dir_by_path(repo.get('id'), parent_dir)])
+ davclient.repo_removedir(repo, os.path.join(parent_dir, dirname))
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 3)
+ self.assertNotIn(dirname, [dirent.obj_name for dirent in entries])
+
+ # remove a file through webdav
+ self.assertIn(fname, [dirent.obj_name for dirent in \
+ api.list_dir_by_path(repo.get('id'), parent_dir)])
+ davclient.repo_removefile(repo, os.path.join(parent_dir, fname))
+ entries = api.list_dir_by_path(repo.get('id'), parent_dir)
+ self.assertHasLen(entries, 2)
+ self.assertNotIn(fname, [dirent.obj_name for dirent in entries])
+
+ _test_under_path('/')
+ _test_under_path('/level1-folder-%s' % randstring(10))
+ _test_under_path('/level1-folder-%s/level2-folder-%s' %
+ (randstring(5), randstring(5)))
+
+ def test_copy_move(self):
+ """Test copy/move files and folders."""
+ # XXX: python-easwebday does not support webdav COPY/MOVE operation yet.
+ # with tmp_repo() as ra:
+ # with tmp_repo() as rb:
+ # roota = ra.get_dir('/')
+ # rootb = rb.get_dir('/')
+ pass
+
+ def test_repo_name_conflict(self):
+ """Test the case when multiple repos have the same name"""
+ repo_name = randstring(length=20)
+ with tmp_repo(name=repo_name) as ra:
+ with tmp_repo(name=repo_name) as rb:
+ davrepos = davclient.list_repos()
+ repos = [r for r in davrepos if dav_basename(r).startswith(repo_name)]
+ self.assertHasLen(repos, 2)
+ repos = sorted(repos, key = lambda x: x.name)
+ if rb.get('id') < ra.get('id'):
+ rb, ra = ra, rb
+ self.assertEqual(dav_basename(repos[0]), '%s-%s' % (repo_name, ra.get('id')[:6]))
+ self.assertEqual(dav_basename(repos[1]), '%s-%s' % (repo_name, rb.get('id')[:6]))
+
+ @use_tmp_repo
+ def test_quota_check(self):
+ """Assert the user storage quota should not be exceeded"""
+ assert api.set_user_quota(USER, 0) >= 0
+ repo = TEST_REPO
+ testfn = 'test.txt'
+ testfpath = os.path.join(os.path.dirname(__file__), 'data', testfn)
+ testfilesize = os.stat(testfpath).st_size
+ api.post_file(repo.get('id'), testfpath, '/', '%s' % randstring(), USER)
+
+ _wait_repo_size_recompute(repo, testfilesize)
+ with _set_quota(USER, testfilesize):
+ with self.assertRaises(WebDAVOperationFailed) as cm:
+ davclient.repo_uploadfile(repo, testfpath, '/%s' % randstring())
+ self.assertEqual(cm.exception.actual_code, 403,
+ 'the operation should fail because quota is full')
+
+ # Attempts to create empty files should also fail
+ with self.assertRaises(WebDAVOperationFailed) as cm:
+ empty_fileobj = io.BytesIO()
+ davclient.repo_uploadfile(repo, empty_fileobj, '/%s' % randstring())
+ self.assertEqual(cm.exception.actual_code, 403,
+ 'the operation should fail because quota is full')
+
+ # After the quota restored, the upload should succeed
+ repo_fpath = '/%s' % randstring()
+ davclient.repo_uploadfile(repo, testfpath, repo_fpath)
+ with open(testfpath, 'rb') as fp:
+ assert fp.read() == davclient.repo_getfile(repo, repo_fpath)
+
+ def assertHasLen(self, obj, expected_length):
+ actuallen = len(obj)
+ msg = 'Expected length is %s, but actual lenght is %s' % (expected_length, actuallen)
+ self.assertEqual(actuallen, expected_length, msg)
+
+ def assertEmpty(self, obj):
+ self.assertHasLen(obj, 0)
+
+@contextmanager
+def _set_quota(user, quota):
+ """Set the quota of the user to the given value, and restore the old value when exit"""
+ oldquota = api.get_user_quota(user)
+ if api.set_user_quota(user, quota) < 0:
+ raise RuntimeError('failed to change user quota')
+ assert api.get_user_quota(user) == quota
+ try:
+ yield
+ finally:
+ api.set_user_quota(user, oldquota)
+
+
+def _wait_repo_size_recompute(repo, size, maxretry=30):
+ reposize = api.get_repo_size(repo.get('id'))
+ retry = 0
+ while reposize != size:
+ if retry >= maxretry:
+ assert False, 'repo size not recomputed in %s seconds' % maxretry
+ retry += 1
+ print('computed = %s, expected = %s' % (reposize, size))
+ time.sleep(1)
+ reposize = api.get_repo_size(repo.get('id'))
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 00000000..bf854a8b
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,3 @@
+requests>=2.3.0
+nose
+pytest
diff --git a/wsgidav/dc/domain_controller.py b/wsgidav/dc/domain_controller.py
new file mode 100644
index 00000000..7f613121
--- /dev/null
+++ b/wsgidav/dc/domain_controller.py
@@ -0,0 +1,170 @@
+import os
+import posixpath
+import seahub_settings
+from seaserv import ccnet_api as api
+from pysearpc import SearpcError
+from wsgidav.dc.seaf_utils import CCNET_CONF_DIR, SEAFILE_CENTRAL_CONF_DIR, multi_tenancy_enabled
+from wsgidav.dc import seahub_db
+import wsgidav.util as util
+from wsgidav.dc.base_dc import BaseDomainController
+from sqlalchemy.sql import exists
+# basic_auth_user, get_domain_realm, require_authentication
+_logger = util.get_module_logger(__name__)
+
+# the block size for the cipher object; must be 16, 24, or 32 for AES
+BLOCK_SIZE = 32
+
+import base64
+PADDING = b'{'
+
+# An encrypted block size must be a multiple of 16
+pad = lambda s: s + (16 - len(s) % 16) * PADDING
+# encrypt with AES, encode with base64
+EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
+
+class SeafileDomainController(BaseDomainController):
+
+ def __init__(self, wsgidav_app, config):
+ self.session_cls = seahub_db.init_db_session_class()
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+ def supports_http_digest_auth(self):
+ # We have access to a plaintext password (or stored hash)
+ return True
+
+ def get_domain_realm(self, inputURL, environ):
+ return "Seafile Authentication"
+
+ def require_authentication(self, realmname, envrion):
+ return True
+
+ def isRealmUser(self, realmname, username, environ):
+ return True
+
+ def getRealmUserPassword(self, realmname, username, environ):
+ """
+ Not applicable to seafile.
+ """
+ return ""
+
+ def basic_auth_user(self, realmname, username, password, environ):
+ if "'" in username:
+ return False
+
+ try:
+ ccnet_email = None
+ session = None
+ if self.session_cls:
+ session = self.session_cls()
+
+ user = api.get_emailuser(username)
+ if user:
+ ccnet_email = user.email
+ else:
+ if session:
+ profile_profile = seahub_db.Base.classes.profile_profile
+ q = session.query(profile_profile.user).filter(profile_profile.contact_email==username)
+ res = q.first()
+ if res:
+ ccnet_email = res[0]
+
+ if not ccnet_email:
+ _logger.warning('User %s doesn\'t exist', username)
+ return False
+
+ enable_webdav_secret = False
+ if hasattr(seahub_settings, 'ENABLE_WEBDAV_SECRET'):
+ enable_webdav_secret = seahub_settings.ENABLE_WEBDAV_SECRET
+
+ enable_two_factor_auth = False
+ if session and enableTwoFactorAuth(session, ccnet_email):
+ enable_two_factor_auth = True
+
+ if not enable_webdav_secret and enable_two_factor_auth:
+ _logger.warning("Two factor auth is enabled, no access to webdav.")
+ return False
+ elif enable_webdav_secret and enable_two_factor_auth:
+ if not validateSecret(session, password, ccnet_email):
+ return False
+ elif not enable_webdav_secret and not enable_two_factor_auth:
+ if api.validate_emailuser(ccnet_email, password) != 0:
+ return False
+ else:
+ if not validateSecret(session, password, ccnet_email) and \
+ api.validate_emailuser(ccnet_email, password) != 0:
+ return False
+
+ username = ccnet_email
+ except Exception as e:
+ _logger.warning('Failed to login: %s', e)
+ return False
+ finally:
+ if session:
+ session.close()
+
+ try:
+ user = api.get_emailuser_with_import(username)
+ if user.role == 'guest':
+ environ['seafile.is_guest'] = True
+ else:
+ environ['seafile.is_guest'] = False
+ except Exception as e:
+ _logger.exception('get_emailuser')
+
+ if multi_tenancy_enabled():
+ try:
+ orgs = api.get_orgs_by_user(username)
+ if orgs:
+ environ['seafile.org_id'] = orgs[0].org_id
+ except Exception as e:
+ _logger.exception('get_orgs_by_user')
+ pass
+
+ environ["http_authenticator.username"] = username
+
+ return True
+
+def validateSecret(session, password, ccnet_email):
+ if not session:
+ return False
+ from Crypto.Cipher import AES
+ secret = seahub_settings.SECRET_KEY[:BLOCK_SIZE]
+ cipher = AES.new(secret.encode('utf8'), AES.MODE_ECB)
+ encoded_str = 'aes$' + EncodeAES(cipher, password.encode('utf8')).decode('utf8')
+ options_useroptions = seahub_db.Base.classes.options_useroptions
+ q = session.query(options_useroptions.email)
+ q = q.filter(options_useroptions.email==ccnet_email,
+ options_useroptions.option_val==encoded_str)
+ res = q.first()
+ if not res:
+ return False
+ return True
+
+def enableTwoFactorAuth(session, email):
+ enable_settings_via_web = True
+ if hasattr(seahub_settings, 'ENABLE_SETTINGS_VIA_WEB'):
+ enable_settings_via_web = seahub_settings.ENABLE_SETTINGS_VIA_WEB
+
+ global_two_factor_auth = False
+ if enable_settings_via_web:
+ constance_config = seahub_db.Base.classes.constance_config
+ q = session.query(constance_config.value).filter(constance_config.constance_key=='ENABLE_TWO_FACTOR_AUTH')
+ res = q.first()
+ if res:
+ if res[0] == 'gAJLAS4=':
+ global_two_factor_auth = True
+ else:
+ return False
+ elif hasattr(seahub_settings, 'ENABLE_TWO_FACTOR_AUTH'):
+ global_two_factor_auth = seahub_settings.ENABLE_TWO_FACTOR_AUTH
+
+ if global_two_factor_auth:
+ two_factor_staticdevice = seahub_db.Base.classes.two_factor_staticdevice
+ two_factor_totpdevice = seahub_db.Base.classes.two_factor_totpdevice
+ if session.query(exists().where(two_factor_staticdevice.user==email)).scalar() \
+ or session.query(exists().where(two_factor_totpdevice.user==email)).scalar():
+ return True
+
+ return False
diff --git a/wsgidav/dc/seaf_utils.py b/wsgidav/dc/seaf_utils.py
new file mode 100644
index 00000000..8d05947c
--- /dev/null
+++ b/wsgidav/dc/seaf_utils.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os
+import configparser
+import wsgidav.util as util
+
+_logger = util.get_module_logger(__name__)
+
+
+def _load_path_from_env(key, check=True):
+ v = os.environ.get(key, '')
+ if not v:
+ if check:
+ raise ImportError(
+ "seaf_util cannot be imported, because environment variable %s is undefined." % key)
+ else:
+ return None
+ return os.path.normpath(os.path.expanduser(v))
+
+CCNET_CONF_DIR = _load_path_from_env('CCNET_CONF_DIR')
+SEAFILE_CONF_DIR = _load_path_from_env('SEAFILE_CONF_DIR')
+SEAFILE_CENTRAL_CONF_DIR = _load_path_from_env(
+ 'SEAFILE_CENTRAL_CONF_DIR', check=False)
+
+_multi_tenancy_enabled = None
+
+
+def multi_tenancy_enabled():
+ global _multi_tenancy_enabled
+ if _multi_tenancy_enabled is None:
+ _multi_tenancy_enabled = False
+ try:
+ cp = configparser.ConfigParser()
+ cp.read(
+ os.path.join(SEAFILE_CENTRAL_CONF_DIR if SEAFILE_CENTRAL_CONF_DIR else SEAFILE_CONF_DIR, 'seafile.conf'))
+ if cp.has_option('general', 'multi_tenancy'):
+ _multi_tenancy_enabled = cp.getboolean(
+ 'general', 'multi_tenancy')
+ except:
+ _logger.exception('failed to read multi_tenancy')
+ return _multi_tenancy_enabled
diff --git a/wsgidav/dc/seahub_db.py b/wsgidav/dc/seahub_db.py
new file mode 100644
index 00000000..2ae72a82
--- /dev/null
+++ b/wsgidav/dc/seahub_db.py
@@ -0,0 +1,100 @@
+from urllib.parse import quote_plus
+
+from sqlalchemy import create_engine
+from sqlalchemy.event import contains as has_event_listener, listen as add_event_listener
+from sqlalchemy.exc import DisconnectionError
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.pool import Pool
+from sqlalchemy.ext.automap import automap_base
+
+Base = automap_base()
+
+import wsgidav.util as util
+_logger = util.get_module_logger(__name__)
+
+def init_db_session_class():
+ try:
+ _logger.info('Init seahub database...')
+ engine = create_seahub_db_engine()
+ Base.prepare(engine, reflect=True)
+ Session = sessionmaker(bind=engine)
+ return Session
+ except ImportError:
+ return None
+ except Exception as e:
+ _logger.warning('Failed to init seahub db: %s.', e)
+ return None
+
+def create_seahub_db_engine():
+ import seahub_settings
+ db_infos = seahub_settings.DATABASES['default']
+ #import local_settings
+ #db_infos = local_settings.DATABASES['default']
+
+ if db_infos.get('ENGINE') == 'django.db.backends.mysql':
+ db_type = "mysql"
+ pass
+ elif db_infos.get('ENGINE') == 'django.db.backends.postgresql_psycopg2':
+ db_type = "pgsql"
+ pass
+ elif db_infos.get('ENGINE') == 'django.db.backends.postgresql':
+ db_type = "pgsql"
+ pass
+ else:
+ _logger.warning('Failed to init seahub db, only mysql and postgres db supported.')
+ return
+
+ db_host = db_infos.get('HOST', '127.0.0.1')
+ if db_type == "mysql":
+ db_port = int(db_infos.get('PORT', '3306'))
+ elif db_type == "pgsql":
+ db_port = int(db_infos.get('PORT', '3306'))
+ else:
+ _logger.warning('Failed to init seahub db, only mysql and postgres db supported.')
+ return
+ db_name = db_infos.get('NAME')
+ if not db_name:
+ _logger.warning ('Failed to init seahub db, db name is not set.')
+ return
+ db_user = db_infos.get('USER')
+ if not db_user:
+ _logger.warning ('Failed to init seahub db, db user is not set.')
+ return
+ db_passwd = db_infos.get('PASSWORD')
+
+ if db_type == "mysql":
+ db_url = "mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8" % (db_user, quote_plus(db_passwd), db_host, db_port, db_name)
+ elif db_type == "pgsql":
+ db_url = "postgresql://%s:%s@%s:%s/%s" % (db_user, quote_plus(db_passwd), db_host, db_port, db_name)
+ else:
+ _logger.warning('Failed to init seahub db, only mysql and postgres db supported.')
+ return
+
+ # Add pool recycle, or mysql connection will be closed by mysqld if idle
+ # for too long.
+ kwargs = dict(pool_recycle=300, echo=False, echo_pool=False)
+
+ engine = create_engine(db_url, **kwargs)
+ if not has_event_listener(Pool, 'checkout', ping_connection):
+ # We use has_event_listener to double check in case we call create_engine
+ # multipe times in the same process.
+ add_event_listener(Pool, 'checkout', ping_connection)
+
+ return engine
+
+# This is used to fix the problem of "MySQL has gone away" that happens when
+# mysql server is restarted or the pooled connections are closed by the mysql
+# server beacause being idle for too long.
+#
+# See http://stackoverflow.com/a/17791117/1467959
+def ping_connection(dbapi_connection, connection_record, connection_proxy): # pylint: disable=unused-argument
+ cursor = dbapi_connection.cursor()
+ try:
+ cursor.execute("SELECT 1")
+ cursor.close()
+ except:
+ logger.info('fail to ping database server, disposing all cached connections')
+ connection_proxy._pool.dispose() # pylint: disable=protected-access
+
+ # Raise DisconnectionError so the pool would create a new connection
+ raise DisconnectionError()
diff --git a/wsgidav/seafile_dav_provider.py b/wsgidav/seafile_dav_provider.py
new file mode 100644
index 00000000..278f0837
--- /dev/null
+++ b/wsgidav/seafile_dav_provider.py
@@ -0,0 +1,887 @@
+from wsgidav.dav_error import DAVError, HTTP_BAD_REQUEST, HTTP_FORBIDDEN, \
+ HTTP_NOT_FOUND, HTTP_INTERNAL_ERROR
+from wsgidav.dav_provider import DAVProvider, DAVCollection, DAVNonCollection
+from threading import Timer, Lock
+
+import wsgidav.util as util
+import os
+import time
+import posixpath
+import unicodedata
+
+import tempfile
+
+from seaserv import seafile_api, CALC_SHARE_USAGE
+from pysearpc import SearpcError
+from seafobj import commit_mgr, fs_mgr
+from seafobj.fs import SeafFile, SeafDir
+from seafobj.blocks import block_mgr
+from wsgidav.dc.seaf_utils import SEAFILE_CONF_DIR
+
+__docformat__ = "reStructuredText"
+
+_logger = util.get_module_logger(__name__)
+
+NEED_PROGRESS = 0
+SYNCHRONOUS = 1
+
+INFINITE_QUOTA = -2
+
+def sort_repo_list(repos):
+ return sorted(repos, key = lambda r: r.id)
+
+class BlockMap(object):
+ def __init__(self):
+ self.block_sizes = []
+ self.timestamp = time.time()
+
+class SeafileStream(object):
+ '''Implements basic file-like interface'''
+ def __init__(self, file_obj, block_map, block_map_lock):
+ self.file_obj = file_obj
+ self.block = None
+ self.block_idx = 0
+ self.block_offset = 0
+ self.block_map = block_map
+ self.block_map_lock = block_map_lock
+
+ def read(self, size):
+ remain = size
+ blocks = self.file_obj.blocks
+ ret = b''
+
+ while True:
+ if not self.block:
+ if self.block_idx == len(blocks):
+ break
+ self.block = block_mgr.load_block(self.file_obj.store_id,
+ self.file_obj.version,
+ blocks[self.block_idx])
+
+ if self.block_offset + remain >= len(self.block):
+ self.block_idx += 1
+ ret += self.block[self.block_offset:]
+ remain -= (len(self.block) - self.block_offset)
+ self.block = None
+ self.block_offset = 0
+ else:
+ ret += self.block[self.block_offset:self.block_offset+remain]
+ self.block_offset += remain
+ remain = 0
+
+ if remain == 0:
+ break
+
+ return ret
+
+ def close(self):
+ pass
+
+ def seek(self, pos):
+ self.block = None
+ self.block_idx = 0
+ self.block_offset = 0
+
+ current_pos = pos
+ if current_pos == 0:
+ return
+
+ with self.block_map_lock:
+ if self.file_obj.obj_id not in self.block_map:
+ block_map = BlockMap()
+ for i in range(len(self.file_obj.blocks)):
+ block_size = block_mgr.stat_block(self.file_obj.store_id, self.file_obj.version, self.file_obj.blocks[i])
+ block_map.block_sizes.append(block_size)
+ self.block_map[self.file_obj.obj_id] = block_map
+ block_map = self.block_map[self.file_obj.obj_id]
+ block_map.timestamp = time.time()
+
+ while current_pos > 0:
+ if self.block_idx == len(self.file_obj.blocks):
+ break
+ block_size = block_map.block_sizes[self.block_idx]
+ if current_pos >= block_size:
+ self.block_idx += 1
+ current_pos -= block_size
+ self.block_offset = 0
+ else:
+ self.block_offset = current_pos
+ current_pos = 0
+
+#===============================================================================
+# SeafileResource
+#===============================================================================
+class SeafileResource(DAVNonCollection):
+ def __init__(self, path, repo, rel_path, obj, environ, block_map={}, block_map_lock=None):
+ super(SeafileResource, self).__init__(path, environ)
+ self.repo = repo
+ self.rel_path = rel_path
+ self.obj = obj
+ self.username = environ.get("http_authenticator.username", "")
+ self.org_id = environ.get("seafile.org_id", "")
+ self.is_guest = environ.get("seafile.is_guest", False)
+ self.tmpfile_path = None
+ self.owner = None
+ self.block_map = block_map
+ self.block_map_lock = block_map_lock
+
+ # Getter methods for standard live properties
+ def get_content_length(self):
+ return self.obj.size
+ def get_content_type(self):
+# (mimetype, _mimeencoding) = mimetypes.guess_type(self.path)
+# print "mimetype(%s): %r, %r" % (self.path, mimetype, _mimeencoding)
+# if not mimetype:
+# mimetype = "application/octet-stream"
+# print "mimetype(%s): return %r" % (self.path, mimetype)
+# return mimetype
+ return util.guess_mime_type(self.path)
+ def get_creation_date(self):
+# return int(time.time())
+ return None
+ def get_display_name(self):
+ return self.name
+ def get_etag(self):
+ return self.obj.obj_id
+
+ def get_last_modified(self):
+ cached_mtime = getattr(self.obj, 'last_modified', None)
+ if cached_mtime:
+ return cached_mtime
+
+ if self.obj.mtime > 0:
+ return self.obj.mtime
+
+ # XXX: What about not return last modified for files in v0 repos,
+ # since they can be too expensive sometimes?
+ parent, filename = os.path.split(self.rel_path)
+ try:
+ mtimes = seafile_api.get_files_last_modified(self.repo.id, parent, -1)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+ for mtime in mtimes:
+ if (mtime.file_name == filename):
+ return mtime.last_modified
+
+ return None
+
+ def support_etag(self):
+ return True
+ def support_ranges(self):
+ return True
+
+ def get_content(self):
+ """Open content as a stream for reading.
+
+ See DAVResource.getContent()
+ """
+ assert not self.is_collection
+ return SeafileStream(self.obj, self.block_map, self.block_map_lock)
+
+ def check_repo_owner_quota(self, isnewfile=True, contentlength=-1):
+ """Check if the upload would cause the user quota be exceeded
+
+ `contentlength` is only positive when the client does not use "transfer-encode: chunking"
+
+ Return True if the quota would not be exceeded, otherwise return False.
+ """
+ try:
+ if contentlength <= 0:
+ # When client use "transfer-encode: chunking", the content length
+ # is not included in the request headers
+ if isnewfile:
+ return seafile_api.check_quota(self.repo.id) >= 0
+ else:
+ return True
+ else:
+ delta = contentlength - self.obj.size
+ return seafile_api.check_quota(self.repo.id, delta) >= 0
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ def begin_write(self, content_type=None, isnewfile=True, contentlength=-1):
+ """Open content as a stream for writing.
+
+ See DAVResource.beginWrite()
+ """
+ assert not self.is_collection
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ try:
+ if seafile_api.check_permission_by_path(self.repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ if not self.check_repo_owner_quota(isnewfile, contentlength):
+ raise DAVError(HTTP_FORBIDDEN, "The quota of the repo owner is exceeded")
+
+ fd, path = tempfile.mkstemp(dir=self.provider.tmpdir)
+ self.tmpfile_path = path
+ return os.fdopen(fd, "wb")
+
+ def end_write(self, with_errors, isnewfile=True):
+ try:
+ if not with_errors:
+ parent, filename = os.path.split(self.rel_path)
+ contentlength = os.stat(self.tmpfile_path).st_size
+ if not self.check_repo_owner_quota(isnewfile=isnewfile, contentlength=contentlength):
+ if self.tmpfile_path:
+ try:
+ os.unlink(self.tmpfile_path)
+ finally:
+ self.tmpfile_path = None
+ raise DAVError(HTTP_FORBIDDEN, "The quota of the repo owner is exceeded")
+ seafile_api.put_file(self.repo.id, self.tmpfile_path, parent, filename,
+ self.username, None)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+ finally:
+ if self.tmpfile_path:
+ try:
+ os.unlink(self.tmpfile_path)
+ finally:
+ self.tmpfile_path = None
+
+ def handle_delete(self):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ try:
+ if seafile_api.check_permission_by_path(self.repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ file_id = seafile_api.get_file_id_by_path(self.repo.id, self.rel_path)
+ if file_id is None:
+ return True
+
+ parent, filename = os.path.split(self.rel_path)
+ seafile_api.del_file(self.repo.id, parent, filename, self.username)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+ def handle_move(self, dest_path):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ parts = dest_path.strip("/").split("/", 1)
+ if len(parts) <= 1:
+ raise DAVError(HTTP_BAD_REQUEST)
+ repo_name = parts[0]
+ rel_path = parts[1]
+
+ dest_dir, dest_file = os.path.split(rel_path)
+ dest_repo = getRepoByName(repo_name, self.username, self.org_id, self.is_guest)
+ if dest_repo.id is None:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ try:
+ if seafile_api.check_permission_by_path(dest_repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ src_dir, src_file = os.path.split(self.rel_path)
+
+ if not seafile_api.is_valid_filename(dest_repo.id, dest_file):
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ # some clients such as GoodReader requires "overwrite" semantics
+ file_id_dest = seafile_api.get_file_id_by_path(dest_repo.id, rel_path)
+ if file_id_dest != None:
+ seafile_api.del_file(dest_repo.id, dest_dir, dest_file, self.username)
+
+ seafile_api.move_file(self.repo.id, src_dir, src_file,
+ dest_repo.id, dest_dir, dest_file, 1, self.username, NEED_PROGRESS, SYNCHRONOUS)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+ def handle_copy(self, dest_path, depth_infinity):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ parts = dest_path.strip("/").split("/", 1)
+ if len(parts) <= 1:
+ raise DAVError(HTTP_BAD_REQUEST)
+ repo_name = parts[0]
+ rel_path = parts[1]
+
+ dest_dir, dest_file = os.path.split(rel_path)
+ dest_repo = getRepoByName(repo_name, self.username, self.org_id, self.is_guest)
+ if dest_repo.id is None:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ try:
+ if seafile_api.check_permission_by_path(dest_repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ src_dir, src_file = os.path.split(self.rel_path)
+ if not src_file:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ if not seafile_api.is_valid_filename(dest_repo.id, dest_file):
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ seafile_api.copy_file(self.repo.id, src_dir, src_file,
+ dest_repo.id, dest_dir, dest_file, self.username, NEED_PROGRESS, SYNCHRONOUS)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+#===============================================================================
+# SeafDirResource
+#===============================================================================
+class SeafDirResource(DAVCollection):
+ def __init__(self, path, repo, rel_path, obj, environ):
+ super(SeafDirResource, self).__init__(path, environ)
+ self.repo = repo
+ self.rel_path = rel_path
+ self.obj = obj
+ self.username = environ.get("http_authenticator.username", "")
+ self.org_id = environ.get("seafile.org_id", "")
+ self.is_guest = environ.get("seafile.is_guest", False)
+
+ # Getter methods for standard live properties
+ def get_creation_date(self):
+# return int(time.time())
+ return None
+ def get_display_name(self):
+ return self.name
+ def get_directory_info(self):
+ return None
+ def get_etag(self):
+ return self.obj.obj_id
+ def get_last_modified(self):
+# return int(time.time())
+ return None
+
+ def get_member_names(self):
+ namelist = []
+ for e in self.obj.dirs:
+ namelist.append(e[0])
+ for e in self.obj.files:
+ namelist.append(e[0])
+ return namelist
+
+ def get_member(self, name):
+ member_rel_path = "/".join([self.rel_path, name])
+ member_path = "/".join([self.path, name])
+ member = self.obj.lookup(name)
+
+ if not member:
+ raise DAVError(HTTP_NOT_FOUND)
+
+ if isinstance(member, SeafFile):
+ return SeafileResource(member_path, self.repo, member_rel_path, member, self.environ)
+ else:
+ return SeafDirResource(member_path, self.repo, member_rel_path, member, self.environ)
+
+ def get_member_list(self):
+ member_list = []
+ d = self.obj
+
+ if d.version == 0:
+ file_mtimes = []
+ try:
+ file_mtimes = seafile_api.get_files_last_modified(self.repo.id, self.rel_path, -1)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ mtimes = {}
+ for entry in file_mtimes:
+ mtimes[entry.file_name] = entry.last_modified
+ for name, dent in d.dirents.items():
+ member_path = posixpath.join(self.path, name)
+ member_rel_path = posixpath.join(self.rel_path, name)
+
+ if dent.is_dir():
+ obj = fs_mgr.load_seafdir(d.store_id, d.version, dent.id)
+ res = SeafDirResource(member_path, self.repo, member_rel_path, obj, self.environ)
+ elif dent.is_file():
+ obj = fs_mgr.load_seafile(d.store_id, d.version, dent.id)
+ res = SeafileResource(member_path, self.repo, member_rel_path, obj, self.environ)
+ else:
+ continue
+
+ if d.version == 1:
+ obj.last_modified = dent.mtime
+ else:
+ obj.last_modified = mtimes[name]
+
+ member_list.append(res)
+
+ return member_list
+
+ # --- Read / write ---------------------------------------------------------
+ def create_empty_resource(self, name):
+ """Create an empty (length-0) resource.
+
+ See DAVResource.createEmptyResource()
+ """
+ assert not "/" in name
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ try:
+ if seafile_api.check_permission_by_path(self.repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ if seafile_api.check_quota(self.repo.id) < 0:
+ raise DAVError(HTTP_FORBIDDEN, "The quota of the repo owner is exceeded")
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ try:
+ seafile_api.post_empty_file(self.repo.id, self.rel_path, name, self.username)
+ except SearpcError as e:
+ if e.msg == 'Invalid file name':
+ raise DAVError(HTTP_BAD_REQUEST, e.msg)
+ if e.msg != 'file already exists':
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ try:
+ # Repo was updated, can't use self.repo
+ repo = seafile_api.get_repo(self.repo.id)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+ if not repo:
+ raise DAVError(HTTP_INTERNAL_ERROR)
+
+ member_rel_path = "/".join([self.rel_path, name])
+ member_path = "/".join([self.path, name])
+ obj = resolveRepoPath(repo, member_rel_path)
+ if not obj or not isinstance(obj, SeafFile):
+ raise DAVError(HTTP_INTERNAL_ERROR)
+
+ return SeafileResource(member_path, repo, member_rel_path, obj, self.environ)
+
+ def create_collection(self, name):
+ """Create a new collection as member of self.
+
+ See DAVResource.createCollection()
+ """
+ assert not "/" in name
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ try:
+ if seafile_api.check_permission_by_path(self.repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ if not seafile_api.is_valid_filename(self.repo.id, name):
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ seafile_api.post_dir(self.repo.id, self.rel_path, name, self.username)
+ except SearpcError as e:
+ if e.msg != 'file already exists':
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ def handle_delete(self):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ try:
+ if seafile_api.check_permission_by_path(self.repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ parent, filename = os.path.split(self.rel_path)
+ # Can't delete repo root
+ if not filename:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ seafile_api.del_file(self.repo.id, parent, filename, self.username)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+ def handle_move(self, dest_path):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ parts = dest_path.strip("/").split("/", 1)
+ if len(parts) <= 1:
+ raise DAVError(HTTP_BAD_REQUEST)
+ repo_name = parts[0]
+ rel_path = parts[1]
+
+ dest_dir, dest_file = os.path.split(rel_path)
+ dest_repo = getRepoByName(repo_name, self.username, self.org_id, self.is_guest)
+
+ if dest_repo.id is None or self.rel_path is None or self.username is None:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ try:
+ if seafile_api.check_permission_by_path(dest_repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ src_dir, src_file = os.path.split(self.rel_path)
+ if not src_file:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ if not seafile_api.is_valid_filename(dest_repo.id, dest_file):
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ seafile_api.move_file(self.repo.id, src_dir, src_file,
+ dest_repo.id, dest_dir, dest_file, 0, self.username, NEED_PROGRESS, SYNCHRONOUS)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+ def handle_copy(self, dest_path, depth_infinity):
+ if self.provider.readonly:
+ raise DAVError(HTTP_FORBIDDEN)
+
+ parts = dest_path.strip("/").split("/", 1)
+ if len(parts) <= 1:
+ raise DAVError(HTTP_BAD_REQUEST)
+ repo_name = parts[0]
+ rel_path = parts[1]
+
+ dest_dir, dest_file = os.path.split(rel_path)
+ dest_repo = getRepoByName(repo_name, self.username, self.org_id, self.is_guest)
+
+ if dest_repo.id is None or self.rel_path is None or self.username is None:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ try:
+ if seafile_api.check_permission_by_path(dest_repo.id, self.rel_path, self.username) != "rw":
+ raise DAVError(HTTP_FORBIDDEN)
+
+ src_dir, src_file = os.path.split(self.rel_path)
+ if not src_file:
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ if not seafile_api.is_valid_filename(dest_repo.id, dest_file):
+ raise DAVError(HTTP_BAD_REQUEST)
+
+ seafile_api.copy_file(self.repo.id, src_dir, src_file,
+ dest_repo.id, dest_dir, dest_file, self.username, NEED_PROGRESS, SYNCHRONOUS)
+ except SearpcError as e:
+ raise DAVError(HTTP_INTERNAL_ERROR, e.msg)
+
+ return True
+
+class RootResource(DAVCollection):
+ def __init__(self, username, environ, show_repo_id):
+ super(RootResource, self).__init__("/", environ)
+ self.username = username
+ self.show_repo_id = show_repo_id
+ self.org_id = environ.get('seafile.org_id', '')
+ self.is_guest = environ.get('seafile.is_guest', False)
+
+ # Getter methods for standard live properties
+ def get_creation_date(self):
+# return int(time.time())
+ return None
+ def get_display_name(self):
+ return ""
+ def get_directory_info(self):
+ return None
+ def get_etag(self):
+ return None
+ def getLastModified(self):
+# return int(time.time())
+ return None
+
+ def get_member_names(self):
+ all_repos = getAccessibleRepos(self.username, self.org_id, self.is_guest)
+
+ name_hash = {}
+ for r in all_repos:
+ r_list = name_hash[r.name]
+ if not r_list:
+ name_hash[r.name] = [r]
+ else:
+ r_list.append(r)
+
+ namelist = []
+ for r_list in name_hash.values():
+ if len(r_list) == 1:
+ repo = r_list[0]
+ namelist.append(repo.name)
+ else:
+ for repo in sort_repo_list(r_list):
+ unique_name = repo.name + "-" + repo.id[:6]
+ namelist.append(unique_name)
+
+ return namelist
+
+ def get_member(self, name):
+ repo = getRepoByName(name, self.username, self.org_id, self.is_guest)
+ return self._createRootRes(repo, name)
+
+ def get_member_list(self):
+ """
+ Overwrite this method for better performance.
+ The default implementation call getMemberNames() then call getMember()
+ for each name. This calls getAccessibleRepos() for too many times.
+ """
+ all_repos = getAccessibleRepos(self.username, self.org_id, self.is_guest)
+
+ name_hash = {}
+ for r in all_repos:
+ r_list = name_hash.get(r.name, [])
+ if not r_list:
+ name_hash[r.name] = [r]
+ else:
+ r_list.append(r)
+
+ member_list = []
+ for r_list in name_hash.values():
+ if len(r_list) == 1:
+ repo = r_list[0]
+ unique_name = repo.name
+ if self.show_repo_id:
+ unique_name = repo.name + "-" + repo.id[:6]
+ res = self._createRootRes(repo, unique_name)
+ member_list.append(res)
+ else:
+ for repo in sort_repo_list(r_list):
+ unique_name = repo.name + "-" + repo.id[:6]
+ res = self._createRootRes(repo, unique_name)
+ member_list.append(res)
+
+ return member_list
+
+ def _createRootRes(self, repo, name):
+ obj = get_repo_root_seafdir(repo)
+ return SeafDirResource("/"+name, repo, "", obj, self.environ)
+
+ # --- Read / write ---------------------------------------------------------
+
+ def create_empty_resource(self, name):
+ raise DAVError(HTTP_FORBIDDEN)
+
+ def create_collection(self, name):
+ raise DAVError(HTTP_FORBIDDEN)
+
+ def handle_delete(self):
+ raise DAVError(HTTP_FORBIDDEN)
+
+ def handle_move(self, dest_path):
+ raise DAVError(HTTP_FORBIDDEN)
+
+ def handle_copy(self, dest_path, depth_infinity):
+ raise DAVError(HTTP_FORBIDDEN)
+
+
+#===============================================================================
+# SeafileProvider
+#===============================================================================
+class SeafileProvider(DAVProvider):
+
+ def __init__(self, show_repo_id, readonly=False):
+ super(SeafileProvider, self).__init__()
+ self.readonly = readonly
+ self.show_repo_id = show_repo_id
+ self.tmpdir = os.path.join(SEAFILE_CONF_DIR, "webdavtmp")
+ self.block_map = {}
+ self.block_map_lock = Lock()
+ self.clean_block_map_task_started = False
+ if not os.access(self.tmpdir, os.F_OK):
+ os.mkdir(self.tmpdir)
+
+ def clean_block_map_per_hour(self):
+ delete_items = []
+ with self.block_map_lock:
+ for obj_id, block in self.block_map.items():
+ if time.time() - block.timestamp >= 3600*24:
+ delete_items.append(obj_id)
+ for i in range(len(delete_items)):
+ self.block_map.pop(delete_items[i])
+ t = Timer(3600, self.clean_block_map_per_hour)
+ t.start()
+
+ def __repr__(self):
+ rw = "Read-Write"
+ if self.readonly:
+ rw = "Read-Only"
+ return "%s for Seafile (%s)" % (self.__class__.__name__, rw)
+
+
+ def get_resource_inst(self, path, environ):
+ """Return info dictionary for path.
+
+ See DAVProvider.getResourceInst()
+ """
+
+ # start the scheduled task of cleaning up the block map here,
+ # because __init__ runs in a separate process.
+ if not self.clean_block_map_task_started:
+ self.clean_block_map_task_started = True
+ self.clean_block_map_per_hour()
+
+ self._count_get_resource_inst += 1
+
+ username = environ.get("http_authenticator.username", "")
+ org_id = environ.get("seafile.org_id", "")
+ is_guest = environ.get("seafile.is_guest", False)
+
+ if path == "/" or path == "":
+ return RootResource(username, environ, self.show_repo_id)
+
+ path = path.rstrip("/")
+ try:
+ repo, rel_path, obj = resolvePath(path, username, org_id, is_guest)
+ except DAVError as e:
+ if e.value == HTTP_NOT_FOUND:
+ return None
+ raise
+
+ if isinstance(obj, SeafDir):
+ return SeafDirResource(path, repo, rel_path, obj, environ)
+ return SeafileResource(path, repo, rel_path, obj, environ, self.block_map, self.block_map_lock)
+
+def resolvePath(path, username, org_id, is_guest):
+ path = unicodedata.normalize('NFC', path)
+ segments = path.strip("/").split("/")
+ if len(segments) == 0:
+ raise DAVError(HTTP_BAD_REQUEST)
+ repo_name = segments.pop(0)
+
+ repo = getRepoByName(repo_name, username, org_id, is_guest)
+
+ rel_path = ""
+ obj = get_repo_root_seafdir(repo)
+
+ n_segs = len(segments)
+ i = 0
+ parent = None
+ for segment in segments:
+ parent = obj
+ obj = parent.lookup(segment)
+
+ if not obj or (isinstance(obj, SeafFile) and i != n_segs-1):
+ raise DAVError(HTTP_NOT_FOUND)
+
+ rel_path += "/" + segment
+ i += 1
+
+ if parent:
+ obj.mtime = parent.lookup_dent(segment).mtime
+
+ return (repo, rel_path, obj)
+
+def resolveRepoPath(repo, path):
+ path = unicodedata.normalize('NFC', path)
+ segments = path.strip("/").split("/")
+
+ obj = get_repo_root_seafdir(repo)
+
+ n_segs = len(segments)
+ i = 0
+ for segment in segments:
+ obj = obj.lookup(segment)
+
+ if not obj or (isinstance(obj, SeafFile) and i != n_segs-1):
+ return None
+
+ i += 1
+
+ return obj
+
+def get_repo_root_seafdir(repo):
+ root_id = commit_mgr.get_commit_root_id(repo.id, repo.version, repo.head_cmmt_id)
+ return fs_mgr.load_seafdir(repo.store_id, repo.version, root_id)
+
+def getRepoByName(repo_name, username, org_id, is_guest):
+ repos = getAccessibleRepos(username, org_id, is_guest)
+
+ ret_repo = None
+ for repo in repos:
+ if repo.name == repo_name:
+ ret_repo = repo
+ break
+
+ if not ret_repo:
+ for repo in repos:
+ if repo.name + "-" + repo.id[:6] == repo_name:
+ ret_repo = repo
+ break
+ if not ret_repo:
+ raise DAVError(HTTP_NOT_FOUND)
+
+ return ret_repo
+
+def getAccessibleRepos(username, org_id, is_guest):
+ all_repos = {}
+
+ def addRepo(repo):
+ if all_repos.get(repo.repo_id):
+ return
+ if not repo.encrypted:
+ all_repos[repo.repo_id] = repo
+
+ try:
+ owned_repos = get_owned_repos(username, org_id)
+ except SearpcError as e:
+ util.warn("Failed to list owned repos: %s" % e.msg)
+
+ for orepo in owned_repos:
+ if orepo:
+ # store_id is used by seafobj to access fs object.
+ # repo's store_id is equal to repo_id except virtual_repo.
+ orepo.store_id = orepo.repo_id
+ addRepo(orepo)
+
+ try:
+ shared_repos = get_share_in_repo_list(username, org_id)
+ except SearpcError as e:
+ util.warn("Failed to list shared repos: %s" % e.msg)
+
+ for srepo in shared_repos:
+ if srepo:
+ addRepo(srepo)
+ pass
+
+ try:
+ repos = get_group_repos(username, org_id)
+ except SearpcError as e:
+ util.warn("Failed to get groups for %s" % username)
+ for grepo in repos:
+ if grepo:
+ addRepo(grepo)
+
+ for prepo in list_inner_pub_repos(username, org_id, is_guest):
+ if prepo:
+ addRepo(prepo)
+
+ return all_repos.values()
+
+def get_group_repos(username, org_id):
+ if org_id:
+ return seafile_api.get_org_group_repos_by_user(username, org_id)
+ else:
+ return seafile_api.get_group_repos_by_user(username)
+
+def get_owned_repos(username, org_id):
+ if org_id:
+ return seafile_api.get_org_owned_repo_list(org_id, username)
+ else:
+ return seafile_api.get_owned_repo_list(username)
+
+def get_share_in_repo_list(username, org_id):
+ """List share in repos.
+ """
+ if org_id:
+ repo_list = seafile_api.get_org_share_in_repo_list(org_id, username,
+ -1, -1)
+ else:
+ repo_list = seafile_api.get_share_in_repo_list(username, -1, -1)
+
+ return repo_list
+
+def list_inner_pub_repos(username, org_id, is_guest):
+ if is_guest:
+ return []
+
+ if org_id:
+ return seafile_api.list_org_inner_pub_repos(org_id)
+
+ return seafile_api.get_inner_pub_repo_list()
diff --git a/wsgidav/server/server_cli.py b/wsgidav/server/server_cli.py
index 7c76afa5..8da4664a 100644
--- a/wsgidav/server/server_cli.py
+++ b/wsgidav/server/server_cli.py
@@ -40,6 +40,8 @@
from wsgidav.fs_dav_provider import FilesystemProvider
from wsgidav.wsgidav_app import WsgiDAVApp
from wsgidav.xml_tools import use_lxml
+from wsgidav.dc.domain_controller import SeafileDomainController
+from wsgidav.seafile_dav_provider import SeafileProvider
import argparse
import copy
@@ -171,6 +173,16 @@ def _init_command_line_options():
help="used by 'cheroot' server if SSL certificates are configured "
"(default: builtin).",
)
+ parser.add_argument(
+ "--pid",
+ dest="pidfile",
+ help="PID file path",
+ )
+ parser.add_argument(
+ "--log-file",
+ dest="log_file",
+ help="log file path",
+ )
qv_group = parser.add_mutually_exclusive_group()
qv_group.add_argument(
@@ -264,6 +276,58 @@ def _init_command_line_options():
return cmdLineOpts, parser
+def _loadSeafileSettings(config):
+ # Seafile cannot support digest auth, since plain text password is needed.
+ config['http_authenticator'] = {
+ 'accept_basic': True,
+ 'accept_digest': False,
+ 'default_to_digest': False,
+ 'domain_controller': SeafileDomainController
+ }
+ # Load share_name from seafdav config file
+
+ # haiwen
+ # - conf
+ # - seafdav.conf
+
+ ##### a sample seafdav.conf, we only care: "share_name", "workers", "timeout"
+ # [WEBDAV]
+ # enabled = true
+ # port = 8080
+ # share_name = /seafdav
+ # workers = 5
+ # timeout = 1200
+ ##### a sample seafdav.conf
+
+ share_name = '/'
+ workers = 5
+ timeout = 1200
+ show_repo_id = False
+
+ seafdav_conf = os.environ.get('SEAFDAV_CONF')
+ if seafdav_conf and os.path.exists(seafdav_conf):
+ import configparser
+ cp = configparser.ConfigParser()
+ cp.read(seafdav_conf)
+ section_name = 'WEBDAV'
+
+ if cp.has_option(section_name, 'share_name'):
+ share_name = cp.get(section_name, 'share_name')
+ if cp.has_option(section_name, 'workers'):
+ workers = cp.get(section_name, 'workers')
+ if cp.has_option(section_name, 'timeout'):
+ timeout = cp.get(section_name, 'timeout')
+ if cp.has_option(section_name, 'show_repo_id'):
+ if cp.get(section_name, 'show_repo_id').lower() == 'true':
+ show_repo_id = True
+
+ # Setup provider mapping for Seafile. E.g. /seafdav -> seafile provider.
+ provider_mapping = {}
+ provider_mapping[share_name] = SeafileProvider(show_repo_id=show_repo_id)
+ config['provider_mapping'] = provider_mapping
+ config['workers'] = workers
+ config['timeout'] = timeout
+
def _read_config_file(config_file, verbose):
"""Read configuration file options into a dictionary."""
@@ -336,6 +400,9 @@ def _init_config():
print("Running without configuration file.")
# Command line overrides file
+ if cli_opts.get("log_file"):
+ log_file = cli_opts.get("log_file")
+ config['log_file'] = log_file
if cli_opts.get("port"):
config["port"] = cli_opts.get("port")
if cli_opts.get("host"):
@@ -363,6 +430,12 @@ def _init_config():
if not config["provider_mapping"]:
parser.error("No DAV provider defined.")
+ _loadSeafileSettings(config)
+
+ pid_file = cli_opts.get("pidfile")
+ if pid_file:
+ pid_file = os.path.abspath(pid_file)
+ config["pidfile"] = pid_file
# Quick-configuration of DomainController
auth = cli_opts.get("auth")
auth_conf = config.get("http_authenticator", {})
@@ -428,6 +501,34 @@ def _init_config():
return config
+import gunicorn.app.base
+
+class GunicornApplication(gunicorn.app.base.BaseApplication):
+
+ def __init__(self, app, options=None):
+ self.options = options or {}
+ self.application = app
+ super().__init__()
+
+ def load_config(self):
+ config = {key: value for key, value in self.options.items()
+ if key in self.cfg.settings and value is not None}
+ for key, value in config.items():
+ self.cfg.set(key.lower(), value)
+
+ def load(self):
+ return self.application
+
+def _run_gunicorn(app, config, mode):
+ options = {
+ 'bind': '%s:%s' % (config.get('host'), config.get('port')),
+ 'threads': config.get('workers'),
+ "pidfile": config.get('pidfile'),
+ "timeout": config.get('timeout')
+ }
+
+ GunicornApplication(app, options).run()
+
def _run_paste(app, config, mode):
"""Run WsgiDAV using paste.httpserver, if Paste is installed.
@@ -791,6 +892,7 @@ def _run_ext_wsgiutils(app, config, mode):
SUPPORTED_SERVERS = {
+ "gunicorn": _run_gunicorn,
"paste": _run_paste,
"gevent": _run_gevent,
"cheroot": _run_cheroot,
diff --git a/wsgidav/util.py b/wsgidav/util.py
index c602f0c1..9dba2492 100644
--- a/wsgidav/util.py
+++ b/wsgidav/util.py
@@ -194,13 +194,17 @@ def init_logging(config):
"logger_format",
"%(asctime)s.%(msecs)03d - <%(thread)d> %(name)-27s %(levelname)-8s: %(message)s",
)
-
+ log_file = config.get('log_file', None)
+ if not log_file:
+ myHandler = logging.StreamHandler(sys.stdout)
+ else:
+ myHandler = logging.FileHandler(log_file)
formatter = logging.Formatter(logger_format, logger_date_format)
# Define handlers
- consoleHandler = logging.StreamHandler(sys.stdout)
+ #consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler = logging.StreamHandler(sys.stderr)
- consoleHandler.setFormatter(formatter)
+ myHandler.setFormatter(formatter)
# consoleHandler.setLevel(logging.DEBUG)
# Add the handlers to the base logger
@@ -232,7 +236,7 @@ def init_logging(config):
pass
logger.removeHandler(hdlr)
- logger.addHandler(consoleHandler)
+ logger.addHandler(myHandler)
if verbose >= 3:
for e in enable_loggers: