#!/usr/bin/python3
import difflib
+import hashlib
import logging
+import markdown
+import markdown.extensions
+import markdown.preprocessors
import os.path
import re
-import tornado.gen
import urllib.parse
from . import misc
from . import util
from .decorators import *
-INTERWIKIS = {
- "google" : ("https://www.google.com/search?q=%(url)s", None, "fab fa-google"),
- "rfc" : ("https://tools.ietf.org/html/rfc%(name)s", "RFC %s", None),
- "wp" : ("https://en.wikipedia.org/wiki/%(name)s", None, "fab fa-wikipedia-w"),
-}
-
class Wiki(misc.Object):
def _get_pages(self, query, *args):
res = self.db.query(query, *args)
if res:
return Page(self.backend, res.id, data=res)
- def get_page_title(self, page, default=None):
- # Try to retrieve title from cache
- title = self.memcache.get("wiki:title:%s" % page)
- if title:
- return title
+ def __iter__(self):
+ return self._get_pages(
+ "SELECT wiki.* FROM wiki_current current \
+ LEFT JOIN wiki ON current.id = wiki.id \
+ WHERE current.deleted IS FALSE \
+ ORDER BY page",
+ )
- # If the title has not been in the cache, we will
- # have to look it up
+ def make_path(self, page, path):
+ # Nothing to do for absolute links
+ if path.startswith("/"):
+ pass
+
+ # Relative links (one-level down)
+ elif path.startswith("./"):
+ path = os.path.join(page, path)
+
+ # All other relative links
+ else:
+ p = os.path.dirname(page)
+ path = os.path.join(p, path)
+
+ # Normalise links
+ return os.path.normpath(path)
+
+ def page_exists(self, path):
+ page = self.get_page(path)
+
+ # Page must have been found and not deleted
+ return page and not page.was_deleted()
+
+ def get_page_title(self, page, default=None):
doc = self.get_page(page)
if doc:
title = doc.title
else:
title = os.path.basename(page)
- # Save in cache for forever
- self.memcache.set("wiki:title:%s" % page, title)
-
return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
- assert page
+
+ # Split the path into parts
+ parts = page.split("/")
+
+ # Check if this is an action
+ if any((part.startswith("_") for part in parts)):
+ return
if revision:
return self._get_page("SELECT * FROM wiki WHERE page = %s \
def get_recent_changes(self, account, limit=None):
pages = self._get_pages("SELECT * FROM wiki \
- WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
ORDER BY timestamp DESC")
for page in pages:
page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
- # Update cache
- self.memcache.set("wiki:title:%s" % page.page, page.title)
+ # Store any linked files
+ page._store_linked_files()
# Send email to all watchers
page._send_watcher_emails(excludes=[author])
return ret
def search(self, query, account=None, limit=None):
- query = util.parse_search_query(query)
-
res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
- WHERE search_index.document @@ to_tsquery('english', %s) \
- ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
+ WHERE search_index.document @@ websearch_to_tsquery('english', %s) \
+ ORDER BY ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC",
query, query)
pages = []
"""
self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+ def get_watchlist(self, account):
+ pages = self._get_pages("""
+ WITH pages AS (
+ SELECT
+ *
+ FROM
+ wiki_current
+ LEFT JOIN
+ wiki ON wiki_current.id = wiki.id
+ )
+
+ SELECT
+ *
+ FROM
+ wiki_watchlist watchlist
+ JOIN
+ pages ON watchlist.page = pages.page
+ WHERE
+ watchlist.uid = %s
+ """, account.uid,
+ )
+
+ return sorted(pages)
+
# ACL
def check_acl(self, page, account):
# If user is in a matching group, we grant permission
for group in row.groups:
- if group in account.groups:
+ if account.is_member_of_group(group):
return True
# Otherwise access is not permitted
return list(files)
- def get_file_by_path(self, path):
+ def get_file_by_path(self, path, revision=None):
path, filename = os.path.dirname(path), os.path.basename(path)
+ if revision:
+ # Fetch a specific revision
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND created_at <= %s \
+ ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+
+ # Fetch latest version
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def get_file_by_path_and_filename(self, path, filename):
return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
def upload(self, path, filename, data, mimetype, author, address):
+ # Replace any existing files
+ file = self.get_file_by_path_and_filename(path, filename)
+ if file:
+ file.delete(author)
+
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
+ blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
+ ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
+ RETURNING id", data, "MD5")
# Create entry for file
return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
filename, author.uid, address, mimetype, blob.id, len(data))
- def find_image(self, path, filename):
- for p in (path, os.path.dirname(path)):
- file = self.get_file_by_path(os.path.join(p, filename))
-
- if file and file.is_image():
- return file
+ def render(self, path, text):
+ return WikiRenderer(self.backend, path, text)
class Page(misc.Object):
- # External links
- external_link = re.compile(r"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]")
-
- # Interwiki links e.g. [[wp>IPFire]]
- interwiki_link = re.compile(r"\[\[(\w+)>(.+?)(?:\|(.+?))?\]\]")
-
def init(self, id, data=None):
self.id = id
self.data = data
if isinstance(other, self.__class__):
return self.id == other.id
+ return NotImplemented
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
return self.page < other.page
+ return NotImplemented
+
@staticmethod
def sanitise_page_name(page):
if not page:
@property
def url(self):
- return self.page
+ return "/docs%s" % self.page
@property
def full_url(self):
- return "https://wiki.ipfire.org%s" % self.url
+ return "https://www.ipfire.org%s" % self.url
@property
def page(self):
# Find first H1 headline in markdown
markdown = self.markdown.splitlines()
- m = re.match(r"^# (.*)( #)?$", markdown[0])
+ m = re.match(r"^#\s*(.*)( #)?$", markdown[0])
if m:
return m.group(1)
if self.data.author_uid:
return self.backend.accounts.get_by_uid(self.data.author_uid)
- def _render_external_link(self, m):
- url, alias = m.groups()
-
- return """<a class="link-external" href="%s">%s</a>""" % (url, alias or url)
-
- def _render_interwiki_link(self, m):
- wiki = m.group(1)
- if not wiki:
- return
-
- # Retrieve URL
- try:
- url, repl, icon = INTERWIKIS[wiki]
- except KeyError:
- logging.warning("Invalid interwiki: %s" % wiki)
- return
-
- # Name of the page
- name = m.group(2)
-
- # Expand URL
- url = url % {
- "name" : name,
- "url" : urllib.parse.quote(name),
- }
-
- # Get alias (if present)
- alias = m.group(3)
-
- if not alias and repl:
- alias = repl % name
-
- # Put everything together
- s = []
-
- if icon:
- s.append("<span class=\"%s\"></span>" % icon)
-
- s.append("""<a class="link-external" href="%s">%s</a>""" % (url, alias or name))
-
- return " ".join(s)
-
- def _render(self, text):
- logging.debug("Rendering %s" % self)
-
- # Link images
- replacements = []
- for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", text):
- alt_text, url = match.groups()
-
- # Skip any absolute and external URLs
- if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- continue
-
- # Try to split query string
- url, delimiter, qs = url.partition("?")
+ @property
+ def markdown(self):
+ return self.data.markdown or ""
- # Parse query arguments
- args = urllib.parse.parse_qs(qs)
+ @property
+ def html(self):
+ lines = []
- # Find image
- file = self.backend.wiki.find_image(self.page, url)
- if not file:
+ # Strip off the first line if it contains a heading (as it will be shown separately)
+ for i, line in enumerate(self.markdown.splitlines()):
+ if i == 0 and line.startswith("#"):
continue
- # Scale down the image if not already done
- if not "s" in args:
- args["s"] = "768"
-
- # Format URL
- url = "%s?%s" % (file.url, urllib.parse.urlencode(args))
-
- replacements.append((match.span(), file, alt_text, url))
-
- # Apply all replacements
- for (start, end), file, alt_text, url in reversed(replacements):
- text = text[:start] + "[![%s](%s)](%s?action=detail)" % (alt_text, url, file.url) + text[end:]
-
- # Handle interwiki links
- text = self.interwiki_link.sub(self._render_interwiki_link, text)
-
- # Handle external links
- text = self.external_link.sub(self._render_external_link, text)
-
- # Add wiki links
- patterns = (
- (r"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r"\1", r"\2", None, True),
- (r"\[\[([\w\d\/\-\.]+)\]\]", r"\1", r"\1", self.backend.wiki.get_page_title, True),
-
- # Mail
- (r"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\]\]",
- r"\1", r"\1", None, False),
- )
+ lines.append(line)
- for pattern, link, title, repl, internal in patterns:
- replacements = []
+ renderer = self.backend.wiki.render(self.page, "\n".join(lines))
- for match in re.finditer(pattern, text):
- l = match.expand(link)
- t = match.expand(title)
+ return renderer.html
- if internal:
- # Allow relative links
- if not l.startswith("/"):
- l = os.path.join(self.page, l)
-
- # Normalise links
- l = os.path.normpath(l)
-
- if callable(repl):
- t = repl(l) or t
-
- replacements.append((match.span(), t or l, l))
-
- # Apply all replacements
- for (start, end), t, l in reversed(replacements):
- text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
-
- # Borrow this from the blog
- return self.backend.blog._render_text(text, lang="markdown")
+ # Linked Files
@property
- def markdown(self):
- return self.data.markdown or ""
+ def files(self):
+ renderer = self.backend.wiki.render(self.page, self.markdown)
- @property
- def html(self):
- return self._render(self.markdown)
+ return renderer.files
+
+ def _store_linked_files(self):
+ self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \
+ VALUES(%s, %s)", ((self.id, file) for file in self.files))
@property
def timestamp(self):
return self.data.timestamp
def was_deleted(self):
- return self.markdown is None
+ return not self.markdown
@lazy_property
def breadcrumbs(self):
return self.backend.wiki.make_breadcrumbs(self.page)
+ def is_latest_revision(self):
+ return self.get_latest_revision() == self
+
def get_latest_revision(self):
revisions = self.get_revisions()
def check_acl(self, account):
return self.backend.wiki.check_acl(self.page, account)
- # Sidebar
-
- @lazy_property
- def sidebar(self):
- parts = self.page.split("/")
-
- while parts:
- sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
- if sidebar:
- return sidebar
-
- parts.pop()
-
# Watchers
@lazy_property
logging.debug("Excluding %s" % watcher)
continue
+ # Check permissions
+ if not self.backend.wiki.check_acl(self.page, watcher):
+ logging.debug("Watcher %s does not have permissions" % watcher)
+ continue
+
logging.debug("Sending watcher email to %s" % watcher)
# Compose message
self.backend.messages.send_template("wiki/messages/page-changed",
- recipients=[watcher], page=self, priority=-10)
+ account=watcher, page=self, priority=-10)
+
+ def restore(self, author, address, comment=None):
+ changes = "Restore to revision from %s" % self.timestamp.isoformat()
+
+ # Append comment
+ if comment:
+ changes = "%s: %s" % (changes, comment)
+
+ return self.backend.wiki.create_page(self.page,
+ author, self.markdown, changes=changes, address=address)
class File(misc.Object):
self.id = id
self.data = data
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
+ return NotImplemented
+
@property
def url(self):
- return os.path.join(self.path, self.filename)
+ return "/docs%s" % os.path.join(self.path, self.filename)
@property
def path(self):
def created_at(self):
return self.data.created_at
+ def delete(self, author=None):
+ if not self.can_be_deleted():
+ raise RuntimeError("Cannot delete %s" % self)
+
+ self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
+ WHERE id = %s", author.uid if author else None, self.id)
+
+ def can_be_deleted(self):
+ # Cannot be deleted if still in use
+ if self.pages:
+ return False
+
+ # Can be deleted
+ return True
+
+ @property
+ def deleted_at(self):
+ return self.data.deleted_at
+
+ def get_latest_revision(self):
+ revisions = self.get_revisions()
+
+ # Return first object
+ for rev in revisions:
+ return rev
+
+ def get_revisions(self):
+ revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s ORDER BY created_at DESC", self.path, self.filename)
+
+ return list(revisions)
+
def is_pdf(self):
return self.mimetype in ("application/pdf", "application/x-pdf")
def is_image(self):
return self.mimetype.startswith("image/")
+ def is_vector_image(self):
+ return self.mimetype in ("image/svg+xml",)
+
+ def is_bitmap_image(self):
+ return self.is_image() and not self.is_vector_image()
+
@lazy_property
def blob(self):
res = self.db.get("SELECT data FROM wiki_blobs \
if res:
return bytes(res.data)
- def get_thumbnail(self, size):
- cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+ async def get_thumbnail(self, size):
+ assert self.is_bitmap_image()
+
+ cache_key = "-".join((
+ self.path,
+ util.normalize(self.filename),
+ self.created_at.isoformat(),
+ "%spx" % size,
+ ))
# Try to fetch the data from the cache
- thumbnail = self.memcache.get(cache_key)
+ thumbnail = await self.backend.cache.get(cache_key)
if thumbnail:
return thumbnail
thumbnail = util.generate_thumbnail(self.blob, size)
# Put it into the cache for forever
- self.memcache.set(cache_key, thumbnail)
+ await self.backend.cache.set(cache_key, thumbnail)
return thumbnail
+
+ @property
+ def pages(self):
+ """
+ Returns a list of all pages this file is linked by
+ """
+ pages = self.backend.wiki._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_linked_files
+ JOIN
+ wiki_current ON wiki_linked_files.page_id = wiki_current.id
+ LEFT JOIN
+ wiki ON wiki_linked_files.page_id = wiki.id
+ WHERE
+ wiki_linked_files.path = %s
+ ORDER BY
+ wiki.page
+ """, os.path.join(self.path, self.filename),
+ )
+
+ return list(pages)
+
+
+class WikiRenderer(misc.Object):
+ schemas = (
+ "ftp://",
+ "git://",
+ "http://",
+ "https://",
+ "rsync://",
+ "sftp://",
+ "ssh://",
+ "webcal://",
+ )
+
+ # Links
+ _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+
+ # Images
+ _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+
+ def init(self, path, text):
+ self.path = path
+ self.text = text
+
+ # Markdown Renderer
+ self.renderer = markdown.Markdown(
+ extensions=[
+ LinkedFilesExtractorExtension(),
+ PrettyLinksExtension(),
+ "codehilite",
+ "fenced_code",
+ "footnotes",
+ "nl2br",
+ "sane_lists",
+ "tables",
+ "toc",
+ ],
+ )
+
+ # Render!
+ self.html = self._render()
+
+ def _render_link(self, m):
+ url, text = m.groups()
+
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
+
+ # Emails
+ if "@" in url:
+ # Strip mailto:
+ if url.startswith("mailto:"):
+ url = url[7:]
+
+ return """<a class="link-external" href="mailto:%s">%s</a>""" % \
+ (url, text or url)
+
+ # Everything else must be an internal link
+ path = self.backend.wiki.make_path(self.path, url)
+
+ return """<a href="/docs%s">%s</a>""" % \
+ (path, text or self.backend.wiki.get_page_title(path))
+
+ def _render_image(self, m):
+ alt_text, url, caption = m.groups()
+
+ # Compute a hash over the URL
+ h = hashlib.new("md5")
+ h.update(url.encode())
+ id = h.hexdigest()
+
+ html = """
+ <div class="columns is-centered">
+ <div class="column is-8">
+ <figure class="image modal-trigger" data-target="%(id)s">
+ <img src="/docs%(url)s" alt="%(caption)s">
+
+ <figcaption class="figure-caption">%(caption)s</figcaption>
+ </figure>
+
+ <div class="modal is-large" id="%(id)s">
+ <div class="modal-background"></div>
+
+ <div class="modal-content">
+ <p class="image">
+ <img src="/docs%(plain_url)s?s=1920" alt="%(caption)s"
+ loading="lazy">
+ </p>
+ </div>
+
+ <button class="modal-close is-large" aria-label="close"></button>
+ </div>
+ </div>
+ </div>
+ """
+
+ # Skip any absolute and external URLs
+ if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : url,
+ "url" : url,
+ }
+
+ # Try to split query string
+ url, delimiter, qs = url.partition("?")
+
+ # Parse query arguments
+ args = urllib.parse.parse_qs(qs)
+
+ # Build absolute path
+ plain_url = url = self.backend.wiki.make_path(self.path, url)
+
+ # Find image
+ file = self.backend.wiki.get_file_by_path(url)
+ if not file or not file.is_image():
+ return "<!-- Could not find image %s in %s -->" % (url, self.path)
+
+ # Scale down the image if not already done
+ if not "s" in args:
+ args["s"] = "920"
+
+ # Append arguments to the URL
+ if args:
+ url = "%s?%s" % (url, urllib.parse.urlencode(args))
+
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : plain_url,
+ "url" : url,
+ }
+
+ def _render(self):
+ logging.debug("Rendering %s" % self.path)
+
+ # Render...
+ text = self.renderer.convert(self.text)
+
+ # Postprocess links
+ text = self._links.sub(self._render_link, text)
+
+ # Postprocess images to <figure>
+ text = self._images.sub(self._render_image, text)
+
+ return text
+
+ @lazy_property
+ def files(self):
+ """
+ A list of all linked files that have been part of the rendered markup
+ """
+ files = []
+
+ for url in self.renderer.files:
+ # Skip external images
+ if url.startswith("https://") or url.startswith("http://"):
+ continue
+
+ # Make the URL absolute
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Check if this is a file (it could also just be a page)
+ file = self.backend.wiki.get_file_by_path(url)
+ if file:
+ files.append(url)
+
+ return files
+
+
+class PrettyLinksExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ # Create links to Bugzilla
+ md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10)
+
+ # Create links to CVE
+ md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+
+
+class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:#(\d{5,}))", re.I)
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line)
+
+
+class CVELinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+
+
+class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
+ """
+ Finds all Linked Files
+ """
+ def run(self, root):
+ self.md.files = []
+
+ # Find all images and store the URLs
+ for image in root.findall(".//img"):
+ src = image.get("src")
+
+ self.md.files.append(src)
+
+ # Find all links
+ for link in root.findall(".//a"):
+ href = link.get("href")
+
+ self.md.files.append(href)
+
+
+class LinkedFilesExtractorExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10)