#!/usr/bin/python3
import difflib
+import hashlib
import logging
+import markdown
+import markdown.extensions
+import markdown.preprocessors
import os.path
import re
-import tornado.gen
import urllib.parse
from . import misc
from . import util
from .decorators import *
-INTERWIKIS = {
- "google" : ("https://www.google.com/search?q=%(url)s", None, "fab fa-google"),
- "rfc" : ("https://tools.ietf.org/html/rfc%(name)s", "RFC %s", None),
- "wp" : ("https://en.wikipedia.org/wiki/%(name)s", None, "fab fa-wikipedia-w"),
-}
-
class Wiki(misc.Object):
def _get_pages(self, query, *args):
res = self.db.query(query, *args)
if res:
return Page(self.backend, res.id, data=res)
+ def __iter__(self):
+ return self._get_pages(
+ "SELECT wiki.* FROM wiki_current current \
+ LEFT JOIN wiki ON current.id = wiki.id \
+ WHERE current.deleted IS FALSE \
+ ORDER BY page",
+ )
+
def make_path(self, page, path):
# Nothing to do for absolute links
if path.startswith("/"):
# Normalise links
return os.path.normpath(path)
- def get_page_title(self, page, default=None):
- # Try to retrieve title from cache
- title = self.memcache.get("wiki:title:%s" % page)
- if title:
- return title
+ def page_exists(self, path):
+ page = self.get_page(path)
- # If the title has not been in the cache, we will
- # have to look it up
+ # Page must have been found and not deleted
+ return page and not page.was_deleted()
+
+ def get_page_title(self, page, default=None):
doc = self.get_page(page)
if doc:
title = doc.title
else:
title = os.path.basename(page)
- # Save in cache for forever
- self.memcache.set("wiki:title:%s" % page, title)
-
return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
- assert page
+
+ # Split the path into parts
+ parts = page.split("/")
+
+ # Check if this is an action
+ if any((part.startswith("_") for part in parts)):
+ return
if revision:
return self._get_page("SELECT * FROM wiki WHERE page = %s \
page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
- # Update cache
- self.memcache.set("wiki:title:%s" % page.page, page.title)
+ # Store any linked files
+ page._store_linked_files()
# Send email to all watchers
page._send_watcher_emails(excludes=[author])
return ret
def search(self, query, account=None, limit=None):
- query = util.parse_search_query(query)
-
res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
- WHERE search_index.document @@ to_tsquery('english', %s) \
- ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
+ WHERE search_index.document @@ websearch_to_tsquery('english', %s) \
+ ORDER BY ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC",
query, query)
pages = []
self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
def get_watchlist(self, account):
- pages = self._get_pages(
- "WITH pages AS (SELECT * FROM wiki_current \
- LEFT JOIN wiki ON wiki_current.id = wiki.id) \
- SELECT * FROM wiki_watchlist watchlist \
- LEFT JOIN pages ON watchlist.page = pages.page \
- WHERE watchlist.uid = %s",
- account.uid,
+ pages = self._get_pages("""
+ WITH pages AS (
+ SELECT
+ *
+ FROM
+ wiki_current
+ LEFT JOIN
+ wiki ON wiki_current.id = wiki.id
+ )
+
+ SELECT
+ *
+ FROM
+ wiki_watchlist watchlist
+ JOIN
+ pages ON watchlist.page = pages.page
+ WHERE
+ watchlist.uid = %s
+ """, account.uid,
)
return sorted(pages)
# If user is in a matching group, we grant permission
for group in row.groups:
- if group in account.groups:
+ if account.is_member_of_group(group):
return True
# Otherwise access is not permitted
return list(files)
- def get_file_by_path(self, path):
+ def get_file_by_path(self, path, revision=None):
path, filename = os.path.dirname(path), os.path.basename(path)
+ if revision:
+ # Fetch a specific revision
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND created_at <= %s \
+ ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+
+ # Fetch latest version
return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def get_file_by_path_and_filename(self, path, filename):
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
def upload(self, path, filename, data, mimetype, author, address):
+ # Replace any existing files
+ file = self.get_file_by_path_and_filename(path, filename)
+ if file:
+ file.delete(author)
+
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
+ blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
+ ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
+ RETURNING id", data, "MD5")
# Create entry for file
return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
filename, author.uid, address, mimetype, blob.id, len(data))
def render(self, path, text):
- r = WikiRenderer(self.backend, path)
-
- return r.render(text)
+ return WikiRenderer(self.backend, path, text)
class Page(misc.Object):
if isinstance(other, self.__class__):
return self.id == other.id
+ return NotImplemented
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
return self.page < other.page
+ return NotImplemented
+
@staticmethod
def sanitise_page_name(page):
if not page:
@property
def url(self):
- return self.page
+ return "/docs%s" % self.page
@property
def full_url(self):
- return "https://wiki.ipfire.org%s" % self.url
+ return "https://www.ipfire.org%s" % self.url
@property
def page(self):
# Find first H1 headline in markdown
markdown = self.markdown.splitlines()
- m = re.match(r"^# (.*)( #)?$", markdown[0])
+ m = re.match(r"^#\s*(.*)( #)?$", markdown[0])
if m:
return m.group(1)
@property
def html(self):
- return self.backend.wiki.render(self.page, self.markdown)
+ lines = []
+
+ # Strip off the first line if it contains a heading (as it will be shown separately)
+ for i, line in enumerate(self.markdown.splitlines()):
+ if i == 0 and line.startswith("#"):
+ continue
+
+ lines.append(line)
+
+ renderer = self.backend.wiki.render(self.page, "\n".join(lines))
+
+ return renderer.html
+
+ # Linked Files
+
+ @property
+ def files(self):
+ renderer = self.backend.wiki.render(self.page, self.markdown)
+
+ return renderer.files
+
+ def _store_linked_files(self):
+ self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \
+ VALUES(%s, %s)", ((self.id, file) for file in self.files))
@property
def timestamp(self):
return self.data.timestamp
def was_deleted(self):
- return self.markdown is None
+ return not self.markdown
@lazy_property
def breadcrumbs(self):
return self.backend.wiki.make_breadcrumbs(self.page)
+ def is_latest_revision(self):
+ return self.get_latest_revision() == self
+
def get_latest_revision(self):
revisions = self.get_revisions()
def check_acl(self, account):
return self.backend.wiki.check_acl(self.page, account)
- # Sidebar
-
- @lazy_property
- def sidebar(self):
- parts = self.page.split("/")
-
- while parts:
- sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
- if sidebar:
- return sidebar
-
- parts.pop()
-
# Watchers
@lazy_property
# Compose message
self.backend.messages.send_template("wiki/messages/page-changed",
- recipients=[watcher], page=self, priority=-10)
+ account=watcher, page=self, priority=-10)
+
+ def restore(self, author, address, comment=None):
+ changes = "Restore to revision from %s" % self.timestamp.isoformat()
+
+ # Append comment
+ if comment:
+ changes = "%s: %s" % (changes, comment)
+
+ return self.backend.wiki.create_page(self.page,
+ author, self.markdown, changes=changes, address=address)
class File(misc.Object):
self.id = id
self.data = data
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
+ return NotImplemented
+
@property
def url(self):
- return os.path.join(self.path, self.filename)
+ return "/docs%s" % os.path.join(self.path, self.filename)
@property
def path(self):
def created_at(self):
return self.data.created_at
+ def delete(self, author=None):
+ if not self.can_be_deleted():
+ raise RuntimeError("Cannot delete %s" % self)
+
+ self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
+ WHERE id = %s", author.uid if author else None, self.id)
+
+ def can_be_deleted(self):
+ # Cannot be deleted if still in use
+ if self.pages:
+ return False
+
+ # Can be deleted
+ return True
+
+ @property
+ def deleted_at(self):
+ return self.data.deleted_at
+
+ def get_latest_revision(self):
+ revisions = self.get_revisions()
+
+ # Return first object
+ for rev in revisions:
+ return rev
+
+ def get_revisions(self):
+ revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s ORDER BY created_at DESC", self.path, self.filename)
+
+ return list(revisions)
+
def is_pdf(self):
return self.mimetype in ("application/pdf", "application/x-pdf")
def is_image(self):
return self.mimetype.startswith("image/")
+ def is_vector_image(self):
+ return self.mimetype in ("image/svg+xml",)
+
+ def is_bitmap_image(self):
+ return self.is_image() and not self.is_vector_image()
+
@lazy_property
def blob(self):
res = self.db.get("SELECT data FROM wiki_blobs \
if res:
return bytes(res.data)
- def get_thumbnail(self, size):
- cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+ async def get_thumbnail(self, size):
+ assert self.is_bitmap_image()
+
+ cache_key = "-".join((
+ self.path,
+ util.normalize(self.filename),
+ self.created_at.isoformat(),
+ "%spx" % size,
+ ))
# Try to fetch the data from the cache
- thumbnail = self.memcache.get(cache_key)
+ thumbnail = await self.backend.cache.get(cache_key)
if thumbnail:
return thumbnail
thumbnail = util.generate_thumbnail(self.blob, size)
# Put it into the cache for forever
- self.memcache.set(cache_key, thumbnail)
+ await self.backend.cache.set(cache_key, thumbnail)
return thumbnail
+ @property
+ def pages(self):
+ """
+ Returns a list of all pages this file is linked by
+ """
+ pages = self.backend.wiki._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_linked_files
+ JOIN
+ wiki_current ON wiki_linked_files.page_id = wiki_current.id
+ LEFT JOIN
+ wiki ON wiki_linked_files.page_id = wiki.id
+ WHERE
+ wiki_linked_files.path = %s
+ ORDER BY
+ wiki.page
+ """, os.path.join(self.path, self.filename),
+ )
-class WikiRenderer(misc.Object):
- # Wiki links
- wiki_link = re.compile(r"\[\[([\w\d\/\-\.]+)(?:\|(.+?))?\]\]")
-
- # External links
- external_link = re.compile(r"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))?\]\]")
+ return list(pages)
- # Interwiki links e.g. [[wp>IPFire]]
- interwiki_link = re.compile(r"\[\[(\w+)>(.+?)(?:\|(.+?))?\]\]")
- # Mail link
- email_link = re.compile(r"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)(?:\|(.+?))?\]\]")
+class WikiRenderer(misc.Object):
+ schemas = (
+ "ftp://",
+ "git://",
+ "http://",
+ "https://",
+ "rsync://",
+ "sftp://",
+ "ssh://",
+ "webcal://",
+ )
+
+ # Links
+ _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
# Images
- images = re.compile(r"<img src=\"(.*?)\" alt=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+ _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
- def init(self, path):
+ def init(self, path, text):
self.path = path
-
- def _render_wiki_link(self, m):
- path, alias = m.groups()
-
- path = self.backend.wiki.make_path(self.path, path)
-
- return """<a href="%s">%s</a>""" % (
- path,
- alias or self.backend.wiki.get_page_title(path),
+ self.text = text
+
+ # Markdown Renderer
+ self.renderer = markdown.Markdown(
+ extensions=[
+ LinkedFilesExtractorExtension(),
+ PrettyLinksExtension(),
+ "codehilite",
+ "fenced_code",
+ "footnotes",
+ "nl2br",
+ "sane_lists",
+ "tables",
+ "toc",
+ ],
)
- def _render_external_link(self, m):
- url, alias = m.groups()
-
- return """<a class="link-external" href="%s">%s</a>""" % (url, alias or url)
-
- def _render_interwiki_link(self, m):
- wiki = m.group(1)
- if not wiki:
- return
-
- # Retrieve URL
- try:
- url, repl, icon = INTERWIKIS[wiki]
- except KeyError:
- logging.warning("Invalid interwiki: %s" % wiki)
- return
-
- # Name of the page
- name = m.group(2)
-
- # Expand URL
- url = url % {
- "name" : name,
- "url" : urllib.parse.quote(name),
- }
+ # Render!
+ self.html = self._render()
- # Get alias (if present)
- alias = m.group(3)
+ def _render_link(self, m):
+ url, text = m.groups()
- if not alias and repl:
- alias = repl % name
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
- # Put everything together
- s = []
+ # Emails
+ if "@" in url:
+ # Strip mailto:
+ if url.startswith("mailto:"):
+ url = url[7:]
- if icon:
- s.append("<span class=\"%s\"></span>" % icon)
+ return """<a class="link-external" href="mailto:%s">%s</a>""" % \
+ (url, text or url)
- s.append("""<a class="link-external" href="%s">%s</a>""" % (url, alias or name))
+ # Everything else must be an internal link
+ path = self.backend.wiki.make_path(self.path, url)
- return " ".join(s)
-
- def _render_email_link(self, m):
- address, alias = m.groups()
-
- return """<a class="link-external" href="mailto:%s">%s</a>""" \
- % (address, alias or address)
+ return """<a href="/docs%s">%s</a>""" % \
+ (path, text or self.backend.wiki.get_page_title(path))
def _render_image(self, m):
- url, alt_text, caption = m.groups()
+ alt_text, url, caption = m.groups()
+
+ # Compute a hash over the URL
+ h = hashlib.new("md5")
+ h.update(url.encode())
+ id = h.hexdigest()
+
+ html = """
+ <div class="columns is-centered">
+ <div class="column is-8">
+ <figure class="image modal-trigger" data-target="%(id)s">
+ <img src="/docs%(url)s" alt="%(caption)s">
+
+ <figcaption class="figure-caption">%(caption)s</figcaption>
+ </figure>
+
+ <div class="modal is-large" id="%(id)s">
+ <div class="modal-background"></div>
+
+ <div class="modal-content">
+ <p class="image">
+ <img src="/docs%(plain_url)s?s=1920" alt="%(caption)s"
+ loading="lazy">
+ </p>
+ </div>
+
+ <button class="modal-close is-large" aria-label="close"></button>
+ </div>
+ </div>
+ </div>
+ """
# Skip any absolute and external URLs
if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- return """<figure class="figure"><img src="%s" class="figure-img img-fluid rounded" alt="%s">
- <figcaption class="figure-caption">%s</figcaption></figure>
- """ % (url, alt_text, caption or "")
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : url,
+ "url" : url,
+ }
# Try to split query string
url, delimiter, qs = url.partition("?")
args = urllib.parse.parse_qs(qs)
# Build absolute path
- url = self.backend.wiki.make_path(self.path, url)
+ plain_url = url = self.backend.wiki.make_path(self.path, url)
# Find image
file = self.backend.wiki.get_file_by_path(url)
if not "s" in args:
args["s"] = "920"
- return """<figure class="figure"><img src="%s?%s" class="figure-img img-fluid rounded" alt="%s">
- <figcaption class="figure-caption">%s</figcaption></figure>
- """ % (url, urllib.parse.urlencode(args), caption, caption or "")
+ # Append arguments to the URL
+ if args:
+ url = "%s?%s" % (url, urllib.parse.urlencode(args))
- def render(self, text):
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : plain_url,
+ "url" : url,
+ }
+
+ def _render(self):
logging.debug("Rendering %s" % self.path)
- # Handle wiki links
- text = self.wiki_link.sub(self._render_wiki_link, text)
+ # Render...
+ text = self.renderer.convert(self.text)
- # Handle interwiki links
- text = self.interwiki_link.sub(self._render_interwiki_link, text)
+ # Postprocess links
+ text = self._links.sub(self._render_link, text)
- # Handle external links
- text = self.external_link.sub(self._render_external_link, text)
+ # Postprocess images to <figure>
+ text = self._images.sub(self._render_image, text)
- # Handle email links
- text = self.email_link.sub(self._render_email_link, text)
+ return text
- # Borrow this from the blog
- text = self.backend.blog._render_text(text, lang="markdown")
+ @lazy_property
+ def files(self):
+ """
+ A list of all linked files that have been part of the rendered markup
+ """
+ files = []
- # Postprocess images to <figure>
- text = self.images.sub(self._render_image, text)
+ for url in self.renderer.files:
+ # Skip external images
+ if url.startswith("https://") or url.startswith("http://"):
+ continue
- return text
+ # Make the URL absolute
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Check if this is a file (it could also just be a page)
+ file = self.backend.wiki.get_file_by_path(url)
+ if file:
+ files.append(url)
+
+ return files
+
+
+class PrettyLinksExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ # Create links to Bugzilla
+ md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10)
+
+ # Create links to CVE
+ md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+
+
+class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:#(\d{5,}))", re.I)
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line)
+
+
+class CVELinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+
+
+class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
+ """
+ Finds all Linked Files
+ """
+ def run(self, root):
+ self.md.files = []
+
+ # Find all images and store the URLs
+ for image in root.findall(".//img"):
+ src = image.get("src")
+
+ self.md.files.append(src)
+
+ # Find all links
+ for link in root.findall(".//a"):
+ href = link.get("href")
+
+ self.md.files.append(href)
+
+
+class LinkedFilesExtractorExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10)