#!/usr/bin/python3
-import PIL
-import io
+import difflib
import logging
import os.path
import re
if res:
return Page(self.backend, res.id, data=res)
+ def make_path(self, page, path):
+ # Nothing to do for absolute links
+ if path.startswith("/"):
+ pass
+
+ # Relative links (one-level down)
+ elif path.startswith("./"):
+ path = os.path.join(page, path)
+
+ # All other relative links
+ else:
+ p = os.path.dirname(page)
+ path = os.path.join(p, path)
+
+ # Normalise links
+ return os.path.normpath(path)
+
+ def page_exists(self, path):
+ page = self.get_page(path)
+
+ # Page must have been found and not deleted
+ return page and not page.was_deleted()
+
def get_page_title(self, page, default=None):
+ # Try to retrieve title from cache
+ title = self.memcache.get("wiki:title:%s" % page)
+ if title:
+ return title
+
+ # If the title has not been in the cache, we will
+ # have to look it up
doc = self.get_page(page)
if doc:
- return doc.title
+ title = doc.title
+ else:
+ title = os.path.basename(page)
+
+ # Save in cache for forever
+ self.memcache.set("wiki:title:%s" % page, title)
- return default or os.path.basename(page)
+ return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
def get_recent_changes(self, account, limit=None):
pages = self._get_pages("SELECT * FROM wiki \
- WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
ORDER BY timestamp DESC")
for page in pages:
def create_page(self, page, author, content, changes=None, address=None):
page = Page.sanitise_page_name(page)
- return self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
+ # Write page to the database
+ page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ # Update cache
+ self.memcache.set("wiki:title:%s" % page.page, page.title)
+
+ # Send email to all watchers
+ page._send_watcher_emails(excludes=[author])
+
+ return page
+
def delete_page(self, page, author, **kwargs):
# Do nothing if the page does not exist
if not self.get_page(page):
ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
query, query)
+ pages = []
for page in res:
# Skip any pages the user doesn't have permission for
if not page.check_acl(account):
continue
# Return any other pages
- yield page
+ pages.append(page)
- limit -= 1
- if not limit:
+ # Break when we have found enough pages
+ if limit and len(pages) >= limit:
break
+ return pages
+
def refresh(self):
"""
Needs to be called after a page has been changed
"""
self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+ def get_watchlist(self, account):
+ pages = self._get_pages(
+ "WITH pages AS (SELECT * FROM wiki_current \
+ LEFT JOIN wiki ON wiki_current.id = wiki.id) \
+ SELECT * FROM wiki_watchlist watchlist \
+ LEFT JOIN pages ON watchlist.page = pages.page \
+ WHERE watchlist.uid = %s",
+ account.uid,
+ )
+
+ return sorted(pages)
+
# ACL
def check_acl(self, page, account):
return list(files)
- def get_file_by_path(self, path):
+ def get_file_by_path(self, path, revision=None):
path, filename = os.path.dirname(path), os.path.basename(path)
+ if revision:
+ # Fetch a specific revision
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND created_at <= %s \
+ ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+
+ # Fetch latest version
return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def get_file_by_path_and_filename(self, path, filename):
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
def upload(self, path, filename, data, mimetype, author, address):
+ # Replace any existing files
+ file = self.get_file_by_path_and_filename(path, filename)
+ if file:
+ file.delete(author)
+
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
+ blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
+ ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
+ RETURNING id", data, "MD5")
# Create entry for file
return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
filename, author.uid, address, mimetype, blob.id, len(data))
- def find_image(self, path, filename):
- for p in (path, os.path.dirname(path)):
- file = self.get_file_by_path(os.path.join(p, filename))
+ def render(self, path, text):
+ r = WikiRenderer(self.backend, path)
- if file and file.is_image():
- return file
+ return r.render(text)
class Page(misc.Object):
def url(self):
return self.page
+ @property
+ def full_url(self):
+ return "https://wiki.ipfire.org%s" % self.url
+
@property
def page(self):
return self.data.page
@property
def title(self):
- return self._title or self.page[1:]
+ return self._title or os.path.basename(self.page[1:])
@property
def _title(self):
# Find first H1 headline in markdown
markdown = self.markdown.splitlines()
- m = re.match(r"^# (.*)( #)?$", markdown[0])
+ m = re.match(r"^#\s*(.*)( #)?$", markdown[0])
if m:
return m.group(1)
if self.data.author_uid:
return self.backend.accounts.get_by_uid(self.data.author_uid)
- def _render(self, text):
- logging.debug("Rendering %s" % self)
-
- # Link images
- replacements = []
- for match in re.finditer(r"!\[(.*)\]\((.*)\)", text):
- alt_text, url = match.groups()
-
- # Skip any absolute and external URLs
- if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- continue
-
- # Try to split query string
- url, delimiter, qs = url.partition("?")
-
- # Parse query arguments
- args = urllib.parse.parse_qs(qs)
-
- # Find image
- file = self.backend.wiki.find_image(self.page, url)
- if not file:
- continue
-
- # Scale down the image if not already done
- if not "s" in args:
- args["s"] = "768"
-
- # Format URL
- url = "%s?%s" % (file.url, urllib.parse.urlencode(args))
-
- replacements.append((match.span(), file, alt_text, url))
-
- # Apply all replacements
- for (start, end), file, alt_text, url in reversed(replacements):
- text = text[:start] + "[![%s](%s)](%s?action=detail)" % (alt_text, url, file.url) + text[end:]
-
- # Add wiki links
- patterns = (
- (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None, None),
- (r"\[\[([\w\d\/\-]+)\]\]", r"/\1", r"\1", self.backend.wiki.get_page_title, r"\1"),
- )
-
- for pattern, link, title, repl, args in patterns:
- replacements = []
-
- for match in re.finditer(pattern, text):
- l = match.expand(link)
- t = match.expand(title)
-
- if callable(repl):
- t = repl(match.expand(args)) or t
-
- replacements.append((match.span(), t or l, l))
-
- # Apply all replacements
- for (start, end), t, l in reversed(replacements):
- text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
-
- # Borrow this from the blog
- return self.backend.blog._render_text(text, lang="markdown")
-
@property
def markdown(self):
return self.data.markdown or ""
@property
def html(self):
- return self.data.html or self._render(self.markdown)
+ return self.backend.wiki.render(self.page, self.markdown)
@property
def timestamp(self):
return self.data.timestamp
def was_deleted(self):
- return self.markdown is None
+ return not self.markdown
@lazy_property
def breadcrumbs(self):
return self.backend.wiki.make_breadcrumbs(self.page)
+ def is_latest_revision(self):
+ return self.get_latest_revision() == self
+
def get_latest_revision(self):
revisions = self.get_revisions()
parts.pop()
+ # Watchers
+
+ @lazy_property
+ def diff(self):
+ if self.previous_revision:
+ diff = difflib.unified_diff(
+ self.previous_revision.markdown.splitlines(),
+ self.markdown.splitlines(),
+ )
+
+ return "\n".join(diff)
+
+ @property
+ def watchers(self):
+ res = self.db.query("SELECT uid FROM wiki_watchlist \
+ WHERE page = %s", self.page)
+
+ for row in res:
+ # Search for account by UID and skip if none was found
+ account = self.backend.accounts.get_by_uid(row.uid)
+ if not account:
+ continue
+
+ # Return the account
+ yield account
+
+ def is_watched_by(self, account):
+ res = self.db.get("SELECT 1 FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ if res:
+ return True
+
+ return False
+
+ def add_watcher(self, account):
+ if self.is_watched_by(account):
+ return
+
+ self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
+ VALUES(%s, %s)", self.page, account.uid)
+
+ def remove_watcher(self, account):
+ self.db.execute("DELETE FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ def _send_watcher_emails(self, excludes=[]):
+ # Nothing to do if there was no previous revision
+ if not self.previous_revision:
+ return
+
+ for watcher in self.watchers:
+ # Skip everyone who is excluded
+ if watcher in excludes:
+ logging.debug("Excluding %s" % watcher)
+ continue
+
+ # Check permissions
+ if not self.backend.wiki.check_acl(self.page, watcher):
+ logging.debug("Watcher %s does not have permissions" % watcher)
+ continue
+
+ logging.debug("Sending watcher email to %s" % watcher)
+
+ # Compose message
+ self.backend.messages.send_template("wiki/messages/page-changed",
+ recipients=[watcher], page=self, priority=-10)
+
+ def restore(self, author, address):
+ changes = "Restore to revision from %s" % self.timestamp.isoformat()
+
+ return self.backend.wiki.create_page(self.page,
+ author, self.markdown, changes=changes, address=address)
+
class File(misc.Object):
def init(self, id, data):
self.id = id
self.data = data
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
@property
def url(self):
return os.path.join(self.path, self.filename)
def created_at(self):
return self.data.created_at
+ def delete(self, author=None):
+ self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
+ WHERE id = %s", author.uid if author else None, self.id)
+
+ @property
+ def deleted_at(self):
+ return self.data.deleted_at
+
+ def get_latest_revision(self):
+ revisions = self.get_revisions()
+
+ # Return first object
+ for rev in revisions:
+ return rev
+
+ def get_revisions(self):
+ revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s ORDER BY created_at DESC", self.path)
+
+ return list(revisions)
+
def is_pdf(self):
return self.mimetype in ("application/pdf", "application/x-pdf")
return thumbnail
# Generate the thumbnail
- thumbnail = self._generate_thumbnail(size)
+ thumbnail = util.generate_thumbnail(self.blob, size)
# Put it into the cache for forever
self.memcache.set(cache_key, thumbnail)
return thumbnail
- def _generate_thumbnail(self, size):
- image = PIL.Image.open(io.BytesIO(self.blob))
- # Resize the image to the desired resolution
- image.thumbnail((size, size), PIL.Image.ANTIALIAS)
+class WikiRenderer(misc.Object):
+ schemas = (
+ "ftp://",
+ "git://",
+ "http://",
+ "https://",
+ "rsync://",
+ "sftp://",
+ "ssh://",
+ "webcal://",
+ )
+
+ # Links
+ links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+
+ # Images
+ images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+
+ def init(self, path):
+ self.path = path
+
+ def _render_link(self, m):
+ url, text = m.groups()
+
+ # Emails
+ if "@" in url:
+ # Strip mailto:
+ if url.startswith("mailto:"):
+ url = url[7:]
+
+ return """<a class="link-external" href="mailto:%s">%s</a>""" % \
+ (url, text or url)
+
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
+
+ # Everything else must be an internal link
+ path = self.backend.wiki.make_path(self.path, url)
+
+ return """<a href="%s">%s</a>""" % \
+ (path, text or self.backend.wiki.get_page_title(path))
+
+ def _render_image(self, m):
+ alt_text, url, caption = m.groups()
+
+ # Skip any absolute and external URLs
+ if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
+ return """<figure class="figure"><img src="%s" class="figure-img img-fluid rounded" alt="%s">
+ <figcaption class="figure-caption">%s</figcaption></figure>
+ """ % (url, alt_text, caption or "")
+
+ # Try to split query string
+ url, delimiter, qs = url.partition("?")
+
+ # Parse query arguments
+ args = urllib.parse.parse_qs(qs)
+
+ # Build absolute path
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Find image
+ file = self.backend.wiki.get_file_by_path(url)
+ if not file or not file.is_image():
+ return "<!-- Could not find image %s in %s -->" % (url, self.path)
+
+ # Scale down the image if not already done
+ if not "s" in args:
+ args["s"] = "920"
+
+ return """<figure class="figure"><img src="%s?%s" class="figure-img img-fluid rounded" alt="%s">
+ <figcaption class="figure-caption">%s</figcaption></figure>
+ """ % (url, urllib.parse.urlencode(args), caption, caption or "")
+
+ def render(self, text):
+ logging.debug("Rendering %s" % self.path)
+
+ # Borrow this from the blog
+ text = self.backend.blog._render_text(text, lang="markdown")
+
+ # Postprocess links
+ text = self.links.sub(self._render_link, text)
- with io.BytesIO() as f:
- # If writing out the image does not work with optimization,
- # we try to write it out without any optimization.
- try:
- image.save(f, image.format, optimize=True, quality=98)
- except:
- image.save(f, image.format, quality=98)
+ # Postprocess images to <figure>
+ text = self.images.sub(self._render_image, text)
- return f.getvalue()
+ return text