#!/usr/bin/python3
+import difflib
import logging
import os.path
import re
+import urllib.parse
from . import misc
+from . import util
from .decorators import *
class Wiki(misc.Object):
if res:
return Page(self.backend, res.id, data=res)
+ def make_path(self, page, path):
+ # Nothing to do for absolute links
+ if path.startswith("/"):
+ pass
+
+ # Relative links (one-level down)
+ elif path.startswith("./"):
+ path = os.path.join(page, path)
+
+ # All other relative links
+ else:
+ p = os.path.dirname(page)
+ path = os.path.join(p, path)
+
+ # Normalise links
+ return os.path.normpath(path)
+
+ def page_exists(self, path):
+ page = self.get_page(path)
+
+ # Page must have been found and not deleted
+ return page and not page.was_deleted()
+
def get_page_title(self, page, default=None):
+ # Try to retrieve title from cache
+ title = self.memcache.get("wiki:title:%s" % page)
+ if title:
+ return title
+
+ # If the title has not been in the cache, we will
+ # have to look it up
doc = self.get_page(page)
if doc:
- return doc.title
+ title = doc.title
+ else:
+ title = os.path.basename(page)
- return default
+ # Save in cache for forever
+ self.memcache.set("wiki:title:%s" % page, title)
+
+ return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
return self._get_page("SELECT * FROM wiki WHERE page = %s \
ORDER BY timestamp DESC LIMIT 1", page)
- def get_recent_changes(self, limit=None):
- return self._get_pages("SELECT * FROM wiki \
- WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
- ORDER BY timestamp DESC LIMIT %s", limit)
+ def get_recent_changes(self, account, limit=None):
+ pages = self._get_pages("SELECT * FROM wiki \
+ ORDER BY timestamp DESC")
+
+ for page in pages:
+ if not page.check_acl(account):
+ continue
+
+ yield page
+
+ limit -= 1
+ if not limit:
+ break
def create_page(self, page, author, content, changes=None, address=None):
page = Page.sanitise_page_name(page)
- return self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
+ # Write page to the database
+ page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ # Update cache
+ self.memcache.set("wiki:title:%s" % page.page, page.title)
+
+ # Send email to all watchers
+ page._send_watcher_emails(excludes=[author])
+
+ return page
+
def delete_page(self, page, author, **kwargs):
# Do nothing if the page does not exist
if not self.get_page(page):
# Just creates a blank last version of the page
self.create_page(page, author=author, content=None, **kwargs)
- @staticmethod
- def _split_url(url):
+ def make_breadcrumbs(self, url):
+ # Split and strip all empty elements (double slashes)
parts = list(e for e in url.split("/") if e)
- num_parts = len(parts)
- for i in range(1, num_parts):
- yield "/".join(parts[:i])
+ ret = []
+ for part in ("/".join(parts[:i]) for i in range(1, len(parts))):
+ ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part))))
+
+ return ret
+
+ def search(self, query, account=None, limit=None):
+ query = util.parse_search_query(query)
+
+ res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
+ LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
+ WHERE search_index.document @@ to_tsquery('english', %s) \
+ ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
+ query, query)
+
+ pages = []
+ for page in res:
+ # Skip any pages the user doesn't have permission for
+ if not page.check_acl(account):
+ continue
+
+ # Return any other pages
+ pages.append(page)
+
+ # Break when we have found enough pages
+ if limit and len(pages) >= limit:
+ break
+
+ return pages
+
+ def refresh(self):
+ """
+ Needs to be called after a page has been changed
+ """
+ self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+
+ def get_watchlist(self, account):
+ pages = self._get_pages(
+ "WITH pages AS (SELECT * FROM wiki_current \
+ LEFT JOIN wiki ON wiki_current.id = wiki.id) \
+ SELECT * FROM wiki_watchlist watchlist \
+ LEFT JOIN pages ON watchlist.page = pages.page \
+ WHERE watchlist.uid = %s",
+ account.uid,
+ )
- def make_breadcrumbs(self, url):
- for part in self._split_url(url):
- title = self.get_page_title(part, os.path.basename(part))
+ return sorted(pages)
+
+ # ACL
+
+ def check_acl(self, page, account):
+ res = self.db.query("SELECT * FROM wiki_acls \
+ WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page)
+
+ for row in res:
+ # Access not permitted when user is not logged in
+ if not account:
+ return False
+
+ # If user is in a matching group, we grant permission
+ for group in row.groups:
+ if group in account.groups:
+ return True
+
+ # Otherwise access is not permitted
+ return False
+
+ # If no ACLs are found, we permit access
+ return True
+
+ # Files
+
+ def _get_files(self, query, *args):
+ res = self.db.query(query, *args)
+
+ for row in res:
+ yield File(self.backend, row.id, data=row)
+
+ def _get_file(self, query, *args):
+ res = self.db.get(query, *args)
+
+ if res:
+ return File(self.backend, res.id, data=res)
+
+ def get_files(self, path):
+ files = self._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path)
+
+ return list(files)
+
+ def get_file_by_path(self, path, revision=None):
+ path, filename = os.path.dirname(path), os.path.basename(path)
+
+ if revision:
+ # Fetch a specific revision
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND created_at <= %s \
+ ORDER BY created_at DESC LIMIT 1", path, filename, revision)
- yield ("/%s" % part, title)
+ # Fetch latest version
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def get_file_by_path_and_filename(self, path, filename):
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def upload(self, path, filename, data, mimetype, author, address):
+ # Replace any existing files
+ file = self.get_file_by_path_and_filename(path, filename)
+ if file:
+ file.delete(author)
+
+ # Upload the blob first
+ blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
+ ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
+ RETURNING id", data, "MD5")
+
+ # Create entry for file
+ return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
+ mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
+ filename, author.uid, address, mimetype, blob.id, len(data))
+
+ def render(self, path, text):
+ r = WikiRenderer(self.backend, path)
+
+ return r.render(text)
class Page(misc.Object):
self.id = id
self.data = data
+ def __repr__(self):
+ return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp)
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
def url(self):
return self.page
+ @property
+ def full_url(self):
+ return "https://wiki.ipfire.org%s" % self.url
+
@property
def page(self):
return self.data.page
@property
def title(self):
- return self._title or self.page[1:]
+ return self._title or os.path.basename(self.page[1:])
@property
def _title(self):
# Find first H1 headline in markdown
markdown = self.markdown.splitlines()
- m = re.match(r"^# (.*)( #)?$", markdown[0])
+ m = re.match(r"^#\s*(.*)( #)?$", markdown[0])
if m:
return m.group(1)
if self.data.author_uid:
return self.backend.accounts.get_by_uid(self.data.author_uid)
- def _render(self, text):
- logging.debug("Rendering %s" % self)
-
- patterns = (
- (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None, None),
- (r"\[\[([\w\d\/\-]+)\]\]", r"/\1", r"\1", self.backend.wiki.get_page_title, r"\1"),
- )
-
- for pattern, link, title, repl, args in patterns:
- replacements = []
-
- for match in re.finditer(pattern, text):
- l = match.expand(link)
- t = match.expand(title)
-
- if callable(repl):
- t = repl(match.expand(args)) or t
-
- replacements.append((match.span(), t or l, l))
-
- # Apply all replacements
- for (start, end), t, l in reversed(replacements):
- text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
-
- # Borrow this from the blog
- return self.backend.blog._render_text(text, lang="markdown")
-
@property
def markdown(self):
- return self.data.markdown
+ return self.data.markdown or ""
@property
def html(self):
- return self.data.html or self._render(self.markdown)
+ return self.backend.wiki.render(self.page, self.markdown)
@property
def timestamp(self):
return self.data.timestamp
def was_deleted(self):
- return self.markdown is None
+ return not self.markdown
@lazy_property
def breadcrumbs(self):
return self.backend.wiki.make_breadcrumbs(self.page)
+ def is_latest_revision(self):
+ return self.get_latest_revision() == self
+
def get_latest_revision(self):
revisions = self.get_revisions()
return self.backend.wiki._get_pages("SELECT * FROM wiki \
WHERE page = %s ORDER BY timestamp DESC", self.page)
+ @lazy_property
+ def previous_revision(self):
+ return self.backend.wiki._get_page("SELECT * FROM wiki \
+ WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
+ LIMIT 1", self.page, self.timestamp)
+
@property
def changes(self):
return self.data.changes
+ # ACL
+
+ def check_acl(self, account):
+ return self.backend.wiki.check_acl(self.page, account)
+
# Sidebar
@lazy_property
parts = self.page.split("/")
while parts:
- sidebar = self.backend.wiki.get_page(os.path.join(*parts, "sidebar"))
+ sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
if sidebar:
return sidebar
parts.pop()
+
+ # Watchers
+
+ @lazy_property
+ def diff(self):
+ if self.previous_revision:
+ diff = difflib.unified_diff(
+ self.previous_revision.markdown.splitlines(),
+ self.markdown.splitlines(),
+ )
+
+ return "\n".join(diff)
+
+ @property
+ def watchers(self):
+ res = self.db.query("SELECT uid FROM wiki_watchlist \
+ WHERE page = %s", self.page)
+
+ for row in res:
+ # Search for account by UID and skip if none was found
+ account = self.backend.accounts.get_by_uid(row.uid)
+ if not account:
+ continue
+
+ # Return the account
+ yield account
+
+ def is_watched_by(self, account):
+ res = self.db.get("SELECT 1 FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ if res:
+ return True
+
+ return False
+
+ def add_watcher(self, account):
+ if self.is_watched_by(account):
+ return
+
+ self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
+ VALUES(%s, %s)", self.page, account.uid)
+
+ def remove_watcher(self, account):
+ self.db.execute("DELETE FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ def _send_watcher_emails(self, excludes=[]):
+ # Nothing to do if there was no previous revision
+ if not self.previous_revision:
+ return
+
+ for watcher in self.watchers:
+ # Skip everyone who is excluded
+ if watcher in excludes:
+ logging.debug("Excluding %s" % watcher)
+ continue
+
+ # Check permissions
+ if not self.backend.wiki.check_acl(self.page, watcher):
+ logging.debug("Watcher %s does not have permissions" % watcher)
+ continue
+
+ logging.debug("Sending watcher email to %s" % watcher)
+
+ # Compose message
+ self.backend.messages.send_template("wiki/messages/page-changed",
+ recipients=[watcher], page=self, priority=-10)
+
+ def restore(self, author, address):
+ changes = "Restore to revision from %s" % self.timestamp.isoformat()
+
+ return self.backend.wiki.create_page(self.page,
+ author, self.markdown, changes=changes, address=address)
+
+
+class File(misc.Object):
+ def init(self, id, data):
+ self.id = id
+ self.data = data
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
+ @property
+ def url(self):
+ return os.path.join(self.path, self.filename)
+
+ @property
+ def path(self):
+ return self.data.path
+
+ @property
+ def filename(self):
+ return self.data.filename
+
+ @property
+ def mimetype(self):
+ return self.data.mimetype
+
+ @property
+ def size(self):
+ return self.data.size
+
+ @lazy_property
+ def author(self):
+ if self.data.author_uid:
+ return self.backend.accounts.get_by_uid(self.data.author_uid)
+
+ @property
+ def created_at(self):
+ return self.data.created_at
+
+ def delete(self, author=None):
+ self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
+ WHERE id = %s", author.uid if author else None, self.id)
+
+ @property
+ def deleted_at(self):
+ return self.data.deleted_at
+
+ def get_latest_revision(self):
+ revisions = self.get_revisions()
+
+ # Return first object
+ for rev in revisions:
+ return rev
+
+ def get_revisions(self):
+ revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s ORDER BY created_at DESC", self.path)
+
+ return list(revisions)
+
+ def is_pdf(self):
+ return self.mimetype in ("application/pdf", "application/x-pdf")
+
+ def is_image(self):
+ return self.mimetype.startswith("image/")
+
+ @lazy_property
+ def blob(self):
+ res = self.db.get("SELECT data FROM wiki_blobs \
+ WHERE id = %s", self.data.blob_id)
+
+ if res:
+ return bytes(res.data)
+
+ def get_thumbnail(self, size):
+ cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+
+ # Try to fetch the data from the cache
+ thumbnail = self.memcache.get(cache_key)
+ if thumbnail:
+ return thumbnail
+
+ # Generate the thumbnail
+ thumbnail = util.generate_thumbnail(self.blob, size)
+
+ # Put it into the cache for forever
+ self.memcache.set(cache_key, thumbnail)
+
+ return thumbnail
+
+
+class WikiRenderer(misc.Object):
+ schemas = (
+ "ftp://",
+ "git://",
+ "http://",
+ "https://",
+ "rsync://",
+ "sftp://",
+ "ssh://",
+ "webcal://",
+ )
+
+ # Links
+ links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+
+ # Images
+ images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+
+ def init(self, path):
+ self.path = path
+
+ def _render_link(self, m):
+ url, text = m.groups()
+
+ # Emails
+ if "@" in url:
+ # Strip mailto:
+ if url.startswith("mailto:"):
+ url = url[7:]
+
+ return """<a class="link-external" href="mailto:%s">%s</a>""" % \
+ (url, text or url)
+
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
+
+ # Everything else must be an internal link
+ path = self.backend.wiki.make_path(self.path, url)
+
+ return """<a href="%s">%s</a>""" % \
+ (path, text or self.backend.wiki.get_page_title(path))
+
+ def _render_image(self, m):
+ alt_text, url, caption = m.groups()
+
+ # Skip any absolute and external URLs
+ if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
+ return """<figure class="figure"><img src="%s" class="figure-img img-fluid rounded" alt="%s">
+ <figcaption class="figure-caption">%s</figcaption></figure>
+ """ % (url, alt_text, caption or "")
+
+ # Try to split query string
+ url, delimiter, qs = url.partition("?")
+
+ # Parse query arguments
+ args = urllib.parse.parse_qs(qs)
+
+ # Build absolute path
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Find image
+ file = self.backend.wiki.get_file_by_path(url)
+ if not file or not file.is_image():
+ return "<!-- Could not find image %s in %s -->" % (url, self.path)
+
+ # Scale down the image if not already done
+ if not "s" in args:
+ args["s"] = "920"
+
+ return """<figure class="figure"><img src="%s?%s" class="figure-img img-fluid rounded" alt="%s">
+ <figcaption class="figure-caption">%s</figcaption></figure>
+ """ % (url, urllib.parse.urlencode(args), caption, caption or "")
+
+ def render(self, text):
+ logging.debug("Rendering %s" % self.path)
+
+ # Borrow this from the blog
+ text = self.backend.blog._render_text(text, lang="markdown")
+
+ # Postprocess links
+ text = self.links.sub(self._render_link, text)
+
+ # Postprocess images to <figure>
+ text = self.images.sub(self._render_image, text)
+
+ return text