2013-08-29 16:55:36 +02:00
|
|
|
# This file is part of Radicale Server - Calendar Server
|
2015-02-07 17:26:20 +01:00
|
|
|
# Copyright © 2014 Jean-Marc Martins
|
2016-04-07 19:02:52 +02:00
|
|
|
# Copyright © 2012-2016 Guillaume Ayoub
|
2013-08-29 16:55:36 +02:00
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Radicale. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
"""
|
2016-04-07 19:02:52 +02:00
|
|
|
Storage backends.
|
|
|
|
|
2016-04-10 02:37:43 +02:00
|
|
|
This module loads the storage backend, according to the storage configuration.
|
2016-04-07 19:02:52 +02:00
|
|
|
|
|
|
|
Default storage uses one folder per collection and one file per collection
|
|
|
|
entry.
|
2013-08-29 16:55:36 +02:00
|
|
|
|
|
|
|
"""
|
|
|
|
|
2014-09-15 17:42:33 +02:00
|
|
|
import json
|
2016-04-07 19:02:52 +02:00
|
|
|
import os
|
|
|
|
import posixpath
|
2013-08-29 16:55:36 +02:00
|
|
|
import shutil
|
2016-05-21 00:38:42 +02:00
|
|
|
import stat
|
|
|
|
import threading
|
2016-04-07 19:02:52 +02:00
|
|
|
import time
|
2014-09-15 17:42:33 +02:00
|
|
|
from contextlib import contextmanager
|
2016-04-10 02:37:43 +02:00
|
|
|
from hashlib import md5
|
2016-04-22 11:37:02 +09:00
|
|
|
from importlib import import_module
|
2016-04-10 02:08:07 +02:00
|
|
|
from uuid import uuid4
|
2016-03-31 19:57:40 +02:00
|
|
|
|
2016-04-10 02:08:07 +02:00
|
|
|
import vobject
|
|
|
|
|
2016-05-21 00:38:42 +02:00
|
|
|
if os.name == "nt":
|
|
|
|
import ctypes
|
|
|
|
import ctypes.wintypes
|
|
|
|
import msvcrt
|
|
|
|
|
|
|
|
LOCKFILE_EXCLUSIVE_LOCK = 2
|
|
|
|
if ctypes.sizeof(ctypes.c_void_p) == 4:
|
|
|
|
ULONG_PTR = ctypes.c_uint32
|
|
|
|
else:
|
|
|
|
ULONG_PTR = ctypes.c_uint64
|
|
|
|
|
|
|
|
class Overlapped(ctypes.Structure):
|
|
|
|
_fields_ = [("internal", ULONG_PTR),
|
|
|
|
("internal_high", ULONG_PTR),
|
|
|
|
("offset", ctypes.wintypes.DWORD),
|
|
|
|
("offset_high", ctypes.wintypes.DWORD),
|
|
|
|
("h_event", ctypes.wintypes.HANDLE)]
|
|
|
|
|
|
|
|
lock_file_ex = ctypes.windll.kernel32.LockFileEx
|
|
|
|
lock_file_ex.argtypes = [ctypes.wintypes.HANDLE,
|
|
|
|
ctypes.wintypes.DWORD,
|
|
|
|
ctypes.wintypes.DWORD,
|
|
|
|
ctypes.wintypes.DWORD,
|
|
|
|
ctypes.wintypes.DWORD,
|
|
|
|
ctypes.POINTER(Overlapped)]
|
|
|
|
lock_file_ex.restype = ctypes.wintypes.BOOL
|
|
|
|
elif os.name == "posix":
|
|
|
|
import fcntl
|
|
|
|
|
2016-04-07 19:02:52 +02:00
|
|
|
|
2016-04-22 11:37:02 +09:00
|
|
|
def load(configuration, logger):
|
2016-04-07 19:02:52 +02:00
|
|
|
"""Load the storage manager chosen in configuration."""
|
2016-04-22 11:37:02 +09:00
|
|
|
storage_type = configuration.get("storage", "type")
|
2016-04-07 19:02:52 +02:00
|
|
|
if storage_type == "multifilesystem":
|
2016-04-22 11:37:02 +09:00
|
|
|
collection_class = Collection
|
2016-04-07 19:02:52 +02:00
|
|
|
else:
|
2016-04-22 11:37:02 +09:00
|
|
|
collection_class = import_module(storage_type).Collection
|
2016-05-04 19:25:34 +02:00
|
|
|
|
2016-04-22 11:37:02 +09:00
|
|
|
class CollectionCopy(collection_class):
|
|
|
|
"""Collection copy, avoids overriding the original class attributes."""
|
|
|
|
CollectionCopy.configuration = configuration
|
|
|
|
CollectionCopy.logger = logger
|
|
|
|
return CollectionCopy
|
2016-04-07 19:02:52 +02:00
|
|
|
|
2013-08-29 16:55:36 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
MIMETYPES = {"VADDRESSBOOK": "text/vcard", "VCALENDAR": "text/calendar"}
|
2016-04-10 02:37:43 +02:00
|
|
|
|
2013-08-29 16:55:36 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def get_etag(text):
|
|
|
|
"""Etag from collection or item."""
|
|
|
|
etag = md5()
|
|
|
|
etag.update(text.encode("utf-8"))
|
|
|
|
return '"%s"' % etag.hexdigest()
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
|
|
|
|
def sanitize_path(path):
|
|
|
|
"""Make path absolute with leading slash to prevent access to other data.
|
|
|
|
|
|
|
|
Preserve a potential trailing slash.
|
|
|
|
|
|
|
|
"""
|
|
|
|
trailing_slash = "/" if path.endswith("/") else ""
|
|
|
|
path = posixpath.normpath(path)
|
|
|
|
new_path = "/"
|
|
|
|
for part in path.split("/"):
|
|
|
|
if not part or part in (".", ".."):
|
|
|
|
continue
|
|
|
|
new_path = posixpath.join(new_path, part)
|
|
|
|
trailing_slash = "" if new_path.endswith("/") else trailing_slash
|
|
|
|
return new_path + trailing_slash
|
|
|
|
|
|
|
|
|
2016-04-09 22:44:34 +02:00
|
|
|
def is_safe_filesystem_path_component(path):
|
|
|
|
"""Check if path is a single component of a filesystem path.
|
|
|
|
|
|
|
|
Check that the path is safe to join too.
|
|
|
|
|
|
|
|
"""
|
2016-04-10 02:37:43 +02:00
|
|
|
return (
|
|
|
|
path and not os.path.splitdrive(path)[0] and
|
|
|
|
not os.path.split(path)[0] and path not in (os.curdir, os.pardir))
|
2016-04-09 22:44:34 +02:00
|
|
|
|
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def path_to_filesystem(root, *paths):
|
2016-04-09 22:44:34 +02:00
|
|
|
"""Convert path to a local filesystem path relative to base_folder.
|
|
|
|
|
|
|
|
Conversion is done in a secure manner, or raises ``ValueError``.
|
|
|
|
|
|
|
|
"""
|
2016-04-11 20:11:35 +02:00
|
|
|
root = sanitize_path(root)
|
|
|
|
paths = [sanitize_path(path).strip("/") for path in paths]
|
|
|
|
safe_path = root
|
|
|
|
for path in paths:
|
|
|
|
if not path:
|
|
|
|
continue
|
|
|
|
for part in path.split("/"):
|
|
|
|
if not is_safe_filesystem_path_component(part):
|
|
|
|
raise ValueError("Unsafe path")
|
|
|
|
safe_path = os.path.join(safe_path, part)
|
2016-04-09 22:44:34 +02:00
|
|
|
return safe_path
|
|
|
|
|
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
class Item:
|
2016-04-22 11:37:02 +09:00
|
|
|
def __init__(self, collection, item, href, last_modified=None):
|
|
|
|
self.collection = collection
|
2016-04-11 20:11:35 +02:00
|
|
|
self.item = item
|
|
|
|
self.href = href
|
2016-04-18 09:11:00 +09:00
|
|
|
self.last_modified = last_modified
|
2016-04-10 02:08:07 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def __getattr__(self, attr):
|
|
|
|
return getattr(self.item, attr)
|
2016-04-10 02:08:07 +02:00
|
|
|
|
2016-04-20 07:49:03 +09:00
|
|
|
@property
|
|
|
|
def etag(self):
|
|
|
|
return get_etag(self.serialize())
|
|
|
|
|
2016-04-10 02:08:07 +02:00
|
|
|
|
2016-04-22 11:37:02 +09:00
|
|
|
class BaseCollection:
|
|
|
|
|
|
|
|
# Overriden on copy by the "load" function
|
|
|
|
configuration = None
|
|
|
|
logger = None
|
|
|
|
|
2016-04-10 02:08:07 +02:00
|
|
|
def __init__(self, path, principal=False):
|
|
|
|
"""Initialize the collection.
|
|
|
|
|
|
|
|
``path`` must be the normalized relative path of the collection, using
|
|
|
|
the slash as the folder delimiter, with no leading nor trailing slash.
|
|
|
|
|
|
|
|
"""
|
2016-04-22 11:37:02 +09:00
|
|
|
raise NotImplementedError
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
@classmethod
|
2016-04-11 20:11:35 +02:00
|
|
|
def discover(cls, path, depth="1"):
|
|
|
|
"""Discover a list of collections under the given ``path``.
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
If ``depth`` is "0", only the actual object under ``path`` is
|
|
|
|
returned.
|
|
|
|
|
|
|
|
If ``depth`` is anything but "0", it is considered as "1" and direct
|
|
|
|
children are included in the result. If ``include_container`` is
|
|
|
|
``True`` (the default), the containing object is included in the
|
|
|
|
result.
|
|
|
|
|
|
|
|
The ``path`` is relative.
|
|
|
|
|
|
|
|
"""
|
2016-04-22 11:37:02 +09:00
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
@property
|
|
|
|
def etag(self):
|
|
|
|
return get_etag(self.serialize())
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def create_collection(cls, href, collection=None, tag=None):
|
|
|
|
"""Create a collection.
|
|
|
|
|
|
|
|
``collection`` is a list of vobject components.
|
|
|
|
|
|
|
|
``tag`` is the type of collection (VCALENDAR or VADDRESSBOOK). If
|
|
|
|
``tag`` is not given, it is guessed from the collection.
|
|
|
|
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def list(self):
|
|
|
|
"""List collection items."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def get(self, href):
|
|
|
|
"""Fetch a single item."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def get_multi(self, hrefs):
|
|
|
|
"""Fetch multiple items. Duplicate hrefs must be ignored.
|
|
|
|
|
|
|
|
Functionally similar to ``get``, but might bring performance benefits
|
|
|
|
on some storages when used cleverly.
|
|
|
|
|
|
|
|
"""
|
|
|
|
for href in set(hrefs):
|
|
|
|
yield self.get(href)
|
|
|
|
|
|
|
|
def has(self, href):
|
|
|
|
"""Check if an item exists by its href.
|
|
|
|
|
|
|
|
Functionally similar to ``get``, but might bring performance benefits
|
|
|
|
on some storages when used cleverly.
|
|
|
|
|
|
|
|
"""
|
|
|
|
return self.get(href) is not None
|
|
|
|
|
|
|
|
def upload(self, href, vobject_item):
|
|
|
|
"""Upload a new item."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def update(self, href, vobject_item, etag=None):
|
|
|
|
"""Update an item.
|
|
|
|
|
|
|
|
Functionally similar to ``delete`` plus ``upload``, but might bring
|
|
|
|
performance benefits on some storages when used cleverly.
|
|
|
|
|
|
|
|
"""
|
|
|
|
self.delete(href, etag)
|
|
|
|
self.upload(href, vobject_item)
|
|
|
|
|
|
|
|
def delete(self, href=None, etag=None):
|
|
|
|
"""Delete an item.
|
|
|
|
|
|
|
|
When ``href`` is ``None``, delete the collection.
|
|
|
|
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def at_once(self):
|
|
|
|
"""Set a context manager buffering the reads and writes."""
|
|
|
|
# TODO: use in code
|
|
|
|
yield
|
|
|
|
|
|
|
|
def get_meta(self, key):
|
|
|
|
"""Get metadata value for collection."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def set_meta(self, key, value):
|
|
|
|
"""Set metadata value for collection."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
@property
|
|
|
|
def last_modified(self):
|
|
|
|
"""Get the HTTP-datetime of when the collection was modified."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def serialize(self):
|
|
|
|
"""Get the unicode string representing the whole collection."""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2016-05-21 00:38:42 +02:00
|
|
|
@classmethod
|
2016-05-22 08:47:19 +02:00
|
|
|
@contextmanager
|
2016-05-21 00:38:42 +02:00
|
|
|
def acquire_lock(cls, mode):
|
2016-05-22 08:47:19 +02:00
|
|
|
"""Set a context manager to lock the whole storage.
|
2016-05-21 00:38:42 +02:00
|
|
|
|
|
|
|
``mode`` must either be "r" for shared access or "w" for exclusive
|
|
|
|
access.
|
|
|
|
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2016-04-22 11:37:02 +09:00
|
|
|
|
|
|
|
class Collection(BaseCollection):
|
|
|
|
"""Collection stored in several files per calendar."""
|
|
|
|
|
|
|
|
def __init__(self, path, principal=False):
|
|
|
|
folder = os.path.expanduser(
|
|
|
|
self.configuration.get("storage", "filesystem_folder"))
|
|
|
|
# path should already be sanitized
|
|
|
|
self.path = sanitize_path(path).strip("/")
|
|
|
|
self.storage_encoding = self.configuration.get("encoding", "stock")
|
|
|
|
self._filesystem_path = path_to_filesystem(folder, self.path)
|
|
|
|
split_path = self.path.split("/")
|
|
|
|
if len(split_path) > 1:
|
|
|
|
# URL with at least one folder
|
|
|
|
self.owner = split_path[0]
|
|
|
|
else:
|
|
|
|
self.owner = None
|
|
|
|
self.is_principal = principal
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def discover(cls, path, depth="1"):
|
2016-04-10 02:08:07 +02:00
|
|
|
# path == None means wrong URL
|
|
|
|
if path is None:
|
2016-04-11 20:11:35 +02:00
|
|
|
return
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
# path should already be sanitized
|
|
|
|
sane_path = sanitize_path(path).strip("/")
|
|
|
|
attributes = sane_path.split("/")
|
|
|
|
if not attributes:
|
2016-04-11 20:11:35 +02:00
|
|
|
return
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
# Try to guess if the path leads to a collection or an item
|
2016-04-22 11:37:02 +09:00
|
|
|
folder = os.path.expanduser(
|
|
|
|
cls.configuration.get("storage", "filesystem_folder"))
|
|
|
|
if not os.path.isdir(path_to_filesystem(folder, sane_path)):
|
2016-04-19 10:39:52 +09:00
|
|
|
# path is not a collection
|
2016-04-22 11:37:02 +09:00
|
|
|
if os.path.isfile(path_to_filesystem(folder, sane_path)):
|
2016-04-19 10:39:52 +09:00
|
|
|
# path is an item
|
|
|
|
attributes.pop()
|
2016-04-22 11:37:02 +09:00
|
|
|
elif os.path.isdir(path_to_filesystem(folder, *attributes[:-1])):
|
2016-04-19 10:39:52 +09:00
|
|
|
# path parent is a collection
|
|
|
|
attributes.pop()
|
|
|
|
# TODO: else: return?
|
2016-04-10 02:08:07 +02:00
|
|
|
|
|
|
|
path = "/".join(attributes)
|
|
|
|
|
|
|
|
principal = len(attributes) <= 1
|
2016-04-11 20:11:35 +02:00
|
|
|
collection = cls(path, principal)
|
|
|
|
yield collection
|
|
|
|
if depth != "0":
|
2016-04-12 18:21:18 +02:00
|
|
|
# TODO: fix this
|
2016-04-11 20:11:35 +02:00
|
|
|
items = list(collection.list())
|
|
|
|
if items:
|
|
|
|
for item in items:
|
|
|
|
yield collection.get(item[0])
|
2016-04-18 09:11:00 +09:00
|
|
|
_, directories, _ = next(os.walk(collection._filesystem_path))
|
|
|
|
for sub_path in directories:
|
|
|
|
full_path = os.path.join(collection._filesystem_path, sub_path)
|
|
|
|
if os.path.exists(path_to_filesystem(full_path)):
|
|
|
|
yield cls(posixpath.join(path, sub_path))
|
2016-04-10 02:08:07 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
@classmethod
|
|
|
|
def create_collection(cls, href, collection=None, tag=None):
|
2016-04-22 11:37:02 +09:00
|
|
|
folder = os.path.expanduser(
|
|
|
|
cls.configuration.get("storage", "filesystem_folder"))
|
|
|
|
path = path_to_filesystem(folder, href)
|
2016-04-11 20:11:35 +02:00
|
|
|
if not os.path.exists(path):
|
|
|
|
os.makedirs(path)
|
|
|
|
if not tag and collection:
|
|
|
|
tag = collection[0].name
|
|
|
|
self = cls(href)
|
|
|
|
if tag == "VCALENDAR":
|
|
|
|
self.set_meta("tag", "VCALENDAR")
|
|
|
|
if collection:
|
|
|
|
collection, = collection
|
|
|
|
for content in ("vevent", "vtodo", "vjournal"):
|
|
|
|
if content in collection.contents:
|
|
|
|
for item in getattr(collection, "%s_list" % content):
|
|
|
|
new_collection = vobject.iCalendar()
|
|
|
|
new_collection.add(item)
|
|
|
|
self.upload(uuid4().hex, new_collection)
|
|
|
|
elif tag == "VCARD":
|
|
|
|
self.set_meta("tag", "VADDRESSBOOK")
|
|
|
|
if collection:
|
|
|
|
for card in collection:
|
|
|
|
self.upload(uuid4().hex, card)
|
|
|
|
return self
|
|
|
|
|
|
|
|
def list(self):
|
2016-04-13 22:08:57 +02:00
|
|
|
try:
|
|
|
|
hrefs = os.listdir(self._filesystem_path)
|
|
|
|
except IOError:
|
|
|
|
return
|
|
|
|
|
|
|
|
for href in hrefs:
|
2016-04-11 20:11:35 +02:00
|
|
|
path = os.path.join(self._filesystem_path, href)
|
|
|
|
if not href.endswith(".props") and os.path.isfile(path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(path, encoding=self.storage_encoding) as fd:
|
2016-04-11 20:11:35 +02:00
|
|
|
yield href, get_etag(fd.read())
|
2016-04-09 15:11:47 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def get(self, href):
|
|
|
|
if not href:
|
|
|
|
return
|
|
|
|
href = href.strip("{}").replace("/", "_")
|
|
|
|
if is_safe_filesystem_path_component(href):
|
|
|
|
path = os.path.join(self._filesystem_path, href)
|
|
|
|
if os.path.isfile(path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(path, encoding=self.storage_encoding) as fd:
|
2016-04-11 20:11:35 +02:00
|
|
|
text = fd.read()
|
2016-04-18 09:11:00 +09:00
|
|
|
last_modified = time.strftime(
|
|
|
|
"%a, %d %b %Y %H:%M:%S GMT",
|
|
|
|
time.gmtime(os.path.getmtime(path)))
|
2016-04-22 11:37:02 +09:00
|
|
|
return Item(self, vobject.readOne(text), href, last_modified)
|
2016-04-11 20:11:35 +02:00
|
|
|
else:
|
2016-04-22 11:37:02 +09:00
|
|
|
self.logger.debug(
|
2016-04-11 20:11:35 +02:00
|
|
|
"Can't tranlate name safely to filesystem, "
|
|
|
|
"skipping component: %s", href)
|
2016-04-07 19:02:52 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def has(self, href):
|
|
|
|
return self.get(href) is not None
|
|
|
|
|
2016-04-20 08:09:00 +09:00
|
|
|
def upload(self, href, vobject_item):
|
2016-04-11 20:11:35 +02:00
|
|
|
# TODO: use returned object in code
|
|
|
|
if is_safe_filesystem_path_component(href):
|
|
|
|
path = path_to_filesystem(self._filesystem_path, href)
|
|
|
|
if not os.path.exists(path):
|
2016-04-22 11:37:02 +09:00
|
|
|
item = Item(self, vobject_item, href)
|
|
|
|
with open(path, "w", encoding=self.storage_encoding) as fd:
|
2016-04-20 08:09:00 +09:00
|
|
|
fd.write(item.serialize())
|
|
|
|
return item
|
2016-04-11 20:11:35 +02:00
|
|
|
else:
|
2016-04-22 11:37:02 +09:00
|
|
|
self.logger.debug(
|
2015-12-24 13:32:30 +01:00
|
|
|
"Can't tranlate name safely to filesystem, "
|
2016-04-11 20:11:35 +02:00
|
|
|
"skipping component: %s", href)
|
|
|
|
|
2016-04-20 08:09:00 +09:00
|
|
|
def update(self, href, vobject_item, etag=None):
|
2016-04-11 20:11:35 +02:00
|
|
|
# TODO: use etag in code and test it here
|
|
|
|
# TODO: use returned object in code
|
|
|
|
if is_safe_filesystem_path_component(href):
|
|
|
|
path = path_to_filesystem(self._filesystem_path, href)
|
|
|
|
if os.path.exists(path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(path, encoding=self.storage_encoding) as fd:
|
2016-04-11 20:11:35 +02:00
|
|
|
text = fd.read()
|
|
|
|
if not etag or etag == get_etag(text):
|
2016-04-22 11:37:02 +09:00
|
|
|
item = Item(self, vobject_item, href)
|
|
|
|
with open(path, "w", encoding=self.storage_encoding) as fd:
|
2016-04-20 08:09:00 +09:00
|
|
|
fd.write(item.serialize())
|
|
|
|
return item
|
2016-04-11 20:11:35 +02:00
|
|
|
else:
|
2016-04-22 11:37:02 +09:00
|
|
|
self.logger.debug(
|
2016-04-11 20:11:35 +02:00
|
|
|
"Can't tranlate name safely to filesystem, "
|
|
|
|
"skipping component: %s", href)
|
2014-11-30 13:49:11 +01:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def delete(self, href=None, etag=None):
|
|
|
|
# TODO: use etag in code and test it here
|
|
|
|
# TODO: use returned object in code
|
|
|
|
if href is None:
|
|
|
|
# Delete the collection
|
|
|
|
if os.path.isdir(self._filesystem_path):
|
|
|
|
shutil.rmtree(self._filesystem_path)
|
|
|
|
props_path = self._filesystem_path + ".props"
|
|
|
|
if os.path.isfile(props_path):
|
|
|
|
os.remove(props_path)
|
|
|
|
return
|
|
|
|
elif is_safe_filesystem_path_component(href):
|
|
|
|
# Delete an item
|
|
|
|
path = path_to_filesystem(self._filesystem_path, href)
|
|
|
|
if os.path.isfile(path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(path, encoding=self.storage_encoding) as fd:
|
2016-04-11 20:11:35 +02:00
|
|
|
text = fd.read()
|
|
|
|
if not etag or etag == get_etag(text):
|
|
|
|
os.remove(path)
|
|
|
|
return
|
|
|
|
else:
|
2016-04-22 11:37:02 +09:00
|
|
|
self.logger.debug(
|
2016-04-11 20:11:35 +02:00
|
|
|
"Can't tranlate name safely to filesystem, "
|
|
|
|
"skipping component: %s", href)
|
2013-08-29 16:55:36 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
@contextmanager
|
|
|
|
def at_once(self):
|
|
|
|
# TODO: use a file locker
|
|
|
|
yield
|
|
|
|
|
|
|
|
def get_meta(self, key):
|
|
|
|
props_path = self._filesystem_path + ".props"
|
|
|
|
if os.path.exists(props_path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(props_path, encoding=self.storage_encoding) as prop:
|
|
|
|
return json.load(prop).get(key)
|
2016-04-11 20:11:35 +02:00
|
|
|
|
|
|
|
def set_meta(self, key, value):
|
|
|
|
props_path = self._filesystem_path + ".props"
|
|
|
|
properties = {}
|
|
|
|
if os.path.exists(props_path):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(props_path, encoding=self.storage_encoding) as prop:
|
|
|
|
properties.update(json.load(prop))
|
2016-04-13 23:02:00 +02:00
|
|
|
|
|
|
|
if value:
|
|
|
|
properties[key] = value
|
|
|
|
else:
|
|
|
|
properties.pop(key, None)
|
|
|
|
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(props_path, "w+", encoding=self.storage_encoding) as prop:
|
|
|
|
json.dump(properties, prop)
|
2013-08-29 16:55:36 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def last_modified(self):
|
2016-04-18 09:11:00 +09:00
|
|
|
last = max([os.path.getmtime(self._filesystem_path)] + [
|
2015-12-24 12:37:11 +01:00
|
|
|
os.path.getmtime(os.path.join(self._filesystem_path, filename))
|
|
|
|
for filename in os.listdir(self._filesystem_path)] or [0])
|
2016-04-18 09:11:00 +09:00
|
|
|
return time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(last))
|
2014-09-15 17:42:33 +02:00
|
|
|
|
2016-04-11 20:11:35 +02:00
|
|
|
def serialize(self):
|
|
|
|
items = []
|
|
|
|
for href in os.listdir(self._filesystem_path):
|
|
|
|
path = os.path.join(self._filesystem_path, href)
|
2016-04-12 18:21:18 +02:00
|
|
|
if os.path.isfile(path) and not path.endswith(".props"):
|
2016-04-22 11:37:02 +09:00
|
|
|
with open(path, encoding=self.storage_encoding) as fd:
|
2016-04-11 20:11:35 +02:00
|
|
|
items.append(vobject.readOne(fd.read()))
|
|
|
|
if self.get_meta("tag") == "VCALENDAR":
|
|
|
|
collection = vobject.iCalendar()
|
|
|
|
for item in items:
|
|
|
|
for content in ("vevent", "vtodo", "vjournal"):
|
|
|
|
if content in item.contents:
|
|
|
|
collection.add(getattr(item, content))
|
|
|
|
break
|
|
|
|
return collection.serialize()
|
|
|
|
elif self.get_meta("tag") == "VADDRESSBOOK":
|
|
|
|
return "".join([item.serialize() for item in items])
|
2016-04-12 18:21:18 +02:00
|
|
|
return ""
|
2016-05-21 00:38:42 +02:00
|
|
|
|
2016-05-22 09:16:18 +02:00
|
|
|
_lock = threading.Condition()
|
|
|
|
_readers = 0
|
|
|
|
_writer = False
|
2016-05-21 00:38:42 +02:00
|
|
|
|
|
|
|
@classmethod
|
2016-05-22 08:47:19 +02:00
|
|
|
@contextmanager
|
2016-05-21 00:38:42 +02:00
|
|
|
def acquire_lock(cls, mode):
|
2016-05-22 09:16:18 +02:00
|
|
|
def condition():
|
|
|
|
if mode == "r":
|
|
|
|
return not cls._writer
|
|
|
|
else:
|
|
|
|
return not cls._writer and cls._readers == 0
|
2016-05-21 00:38:42 +02:00
|
|
|
|
|
|
|
if mode not in ("r", "w"):
|
|
|
|
raise ValueError("Invalid lock mode: %s" % mode)
|
2016-05-22 09:16:18 +02:00
|
|
|
# Use a primitive lock which only works within one process as a
|
|
|
|
# precondition for inter-process file-based locking
|
|
|
|
with cls._lock:
|
|
|
|
cls._lock.wait_for(condition)
|
|
|
|
if mode == "r":
|
|
|
|
cls._readers += 1
|
|
|
|
# notify additional potential readers
|
|
|
|
cls._lock.notify()
|
|
|
|
else:
|
|
|
|
cls._writer = True
|
2016-05-21 00:38:42 +02:00
|
|
|
folder = os.path.expanduser(
|
|
|
|
cls.configuration.get("storage", "filesystem_folder"))
|
|
|
|
if not os.path.exists(folder):
|
|
|
|
os.makedirs(folder, exist_ok=True)
|
|
|
|
lock_path = os.path.join(folder, "Radicale.lock")
|
|
|
|
lock_file = open(lock_path, "w+")
|
|
|
|
# set access rights to a necessary minimum to prevent locking by
|
|
|
|
# arbitrary users
|
|
|
|
try:
|
|
|
|
os.chmod(lock_path, stat.S_IWUSR | stat.S_IRUSR)
|
|
|
|
except OSError:
|
|
|
|
cls.logger.debug("Failed to set permissions on lock file")
|
|
|
|
if os.name == "nt":
|
|
|
|
handle = msvcrt.get_osfhandle(lock_file.fileno())
|
|
|
|
flags = LOCKFILE_EXCLUSIVE_LOCK if mode == "w" else 0
|
|
|
|
overlapped = Overlapped()
|
2016-05-22 09:16:18 +02:00
|
|
|
if not lock_file_ex(handle, flags, 0, 1, 0, overlapped):
|
|
|
|
cls.logger.debug("Locking not supported")
|
2016-05-21 00:38:42 +02:00
|
|
|
elif os.name == "posix":
|
|
|
|
operation = fcntl.LOCK_EX if mode == "w" else fcntl.LOCK_SH
|
|
|
|
# According to documentation flock() is emulated with fcntl() on
|
|
|
|
# some platforms. fcntl() locks are not associated with an open
|
|
|
|
# file descriptor. The same file can be locked multiple times
|
|
|
|
# within the same process and if any fd of the file is closed,
|
|
|
|
# all locks are released.
|
|
|
|
# flock() does not work on NFS shares.
|
|
|
|
try:
|
|
|
|
fcntl.flock(lock_file.fileno(), operation)
|
|
|
|
except OSError:
|
2016-05-22 09:16:18 +02:00
|
|
|
cls.logger.debug("Locking not supported")
|
2016-05-22 08:47:19 +02:00
|
|
|
yield
|
2016-05-22 09:16:18 +02:00
|
|
|
with cls._lock:
|
|
|
|
if mode == "r":
|
|
|
|
cls._readers -= 1
|
|
|
|
else:
|
|
|
|
cls._writer = False
|
|
|
|
cls._lock.notify()
|