#!/usr/bin/python3
import difflib
+import hashlib
import logging
+import markdown
+import markdown.extensions
+import markdown.preprocessors
import os.path
import re
import urllib.parse
return Page(self.backend, res.id, data=res)
def __iter__(self):
- return self._get_pages(
- "SELECT wiki.* FROM wiki_current current \
- LEFT JOIN wiki ON current.id = wiki.id \
- WHERE current.deleted IS FALSE \
- ORDER BY page",
+ return self._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_current current
+ LEFT JOIN
+ wiki ON current.id = wiki.id
+ WHERE
+ current.deleted IS FALSE
+ ORDER BY page
+ """,
)
def make_path(self, page, path):
# Normalise links
return os.path.normpath(path)
- def page_exists(self, path):
- page = self.get_page(path)
+ def _make_url(self, path):
+ """
+ Composes the URL out of the path
+ """
+ # Remove any leading slashes (if present)
+ path = path.removeprefix("/")
- # Page must have been found and not deleted
- return page and not page.was_deleted()
+ return os.path.join("/docs", path)
def get_page_title(self, page, default=None):
- # Try to retrieve title from cache
- title = self.memcache.get("wiki:title:%s" % page)
- if title:
- return title
-
- # If the title has not been in the cache, we will
- # have to look it up
doc = self.get_page(page)
if doc:
title = doc.title
else:
title = os.path.basename(page)
- # Save in cache for forever
- self.memcache.set("wiki:title:%s" % page, title)
-
return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
- assert page
+
+ # Split the path into parts
+ parts = page.split("/")
+
+ # Check if this is an action
+ if any((part.startswith("_") for part in parts)):
+ return
if revision:
return self._get_page("SELECT * FROM wiki WHERE page = %s \
page = Page.sanitise_page_name(page)
# Write page to the database
- page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
- VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ page = self._get_page("""
+ INSERT INTO
+ wiki
+ (
+ page,
+ author_uid,
+ markdown,
+ changes,
+ address
+ ) VALUES (
+ %s, %s, %s, %s, %s
+ )
+ RETURNING *
+ """, page, author.uid, content or None, changes, address,
+ )
- # Update cache
- self.memcache.set("wiki:title:%s" % page.page, page.title)
+ # Store any linked files
+ page._store_linked_files()
# Send email to all watchers
page._send_watcher_emails(excludes=[author])
# Just creates a blank last version of the page
self.create_page(page, author=author, content=None, **kwargs)
- def make_breadcrumbs(self, url):
- # Split and strip all empty elements (double slashes)
- parts = list(e for e in url.split("/") if e)
-
+ def make_breadcrumbs(self, path):
ret = []
- for part in ("/".join(parts[:i]) for i in range(1, len(parts))):
- ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part))))
- return ret
+ while path:
+ # Cut off everything after the last slash
+ path, _, _ = path.rpartition("/")
+
+ # Do not include the root
+ if not path:
+ break
+
+ # Find the page
+ page = self.get_page(path)
+
+ # Append the URL and title to the output
+ ret.append((
+ page.url if page else self._make_url(path),
+ page.title if page else os.path.basename(path),
+ ))
+
+ # Return the breadcrumbs in order
+ return reversed(ret)
def search(self, query, account=None, limit=None):
- res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
- LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
- WHERE search_index.document @@ websearch_to_tsquery('english', %s) \
- ORDER BY ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC",
- query, query)
+ res = self._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_search_index search_index
+ LEFT JOIN
+ wiki ON search_index.wiki_id = wiki.id
+ WHERE
+ search_index.document @@ websearch_to_tsquery('english', %s)
+ ORDER BY
+ ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC
+ """, query, query,
+ )
pages = []
for page in res:
"""
Needs to be called after a page has been changed
"""
- self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+ self.db.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY wiki_search_index")
def get_watchlist(self, account):
- pages = self._get_pages(
- "WITH pages AS (SELECT * FROM wiki_current \
- LEFT JOIN wiki ON wiki_current.id = wiki.id) \
- SELECT * FROM wiki_watchlist watchlist \
- LEFT JOIN pages ON watchlist.page = pages.page \
- WHERE watchlist.uid = %s",
- account.uid,
+ pages = self._get_pages("""
+ WITH pages AS (
+ SELECT
+ *
+ FROM
+ wiki_current
+ LEFT JOIN
+ wiki ON wiki_current.id = wiki.id
+ )
+
+ SELECT
+ *
+ FROM
+ wiki_watchlist watchlist
+ JOIN
+ pages ON watchlist.page = pages.page
+ WHERE
+ watchlist.uid = %s
+ """, account.uid,
)
return sorted(pages)
# ACL
def check_acl(self, page, account):
- res = self.db.query("SELECT * FROM wiki_acls \
- WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page)
+ res = self.db.query("""
+ SELECT
+ *
+ FROM
+ wiki_acls
+ WHERE
+ %s ILIKE (path || '%%')
+ ORDER BY
+ LENGTH(path) DESC
+ LIMIT 1
+ """, page,
+ )
for row in res:
# Access not permitted when user is not logged in
return File(self.backend, res.id, data=res)
def get_files(self, path):
- files = self._get_files("SELECT * FROM wiki_files \
- WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path)
+ files = self._get_files("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ deleted_at IS NULL
+ ORDER BY filename
+ """, path,
+ )
return list(files)
if revision:
# Fetch a specific revision
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND created_at <= %s \
- ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ created_at <= %s
+ ORDER BY
+ created_at DESC
+ LIMIT 1
+ """, path, filename, revision,
+ )
# Fetch latest version
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL",
- path, filename)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ deleted_at IS NULL
+ """, path, filename,
+ )
def get_file_by_path_and_filename(self, path, filename):
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL",
- path, filename)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ deleted_at IS NULL
+ """, path, filename,
+ )
def upload(self, path, filename, data, mimetype, author, address):
# Replace any existing files
file.delete(author)
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
- ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
- RETURNING id", data, "MD5")
+ blob = self.db.get("""
+ INSERT INTO
+ wiki_blobs(data)
+ VALUES
+ (%s)
+ ON CONFLICT
+ (digest(data, %s))
+ DO UPDATE
+ SET data = EXCLUDED.data
+ RETURNING id
+ """, data, "MD5",
+ )
# Create entry for file
- return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
- mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
- filename, author.uid, address, mimetype, blob.id, len(data))
-
- def render(self, path, text):
- r = WikiRenderer(self.backend, path)
+ return self._get_file("""
+ INSERT INTO
+ wiki_files
+ (
+ path,
+ filename,
+ author_uid,
+ address,
+ mimetype,
+ blob_id,
+ size
+ ) VALUES (
+ %s, %s, %s, %s, %s, %s, %s
+ )
+ RETURNING *
+ """, path, filename, author.uid, address, mimetype, blob.id, len(data),
+ )
- return r.render(text)
+ def render(self, path, text, **kwargs):
+ return WikiRenderer(self.backend, path, text, **kwargs)
class Page(misc.Object):
if isinstance(other, self.__class__):
return self.id == other.id
+ return NotImplemented
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
return self.page < other.page
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(self.page)
+
@staticmethod
def sanitise_page_name(page):
if not page:
@property
def url(self):
- return self.page
+ return self.backend.wiki._make_url(self.page)
@property
def full_url(self):
- return "https://wiki.ipfire.org%s" % self.url
+ return "https://www.ipfire.org%s" % self.url
@property
def page(self):
@property
def html(self):
- return self.backend.wiki.render(self.page, self.markdown)
+ lines = []
+
+ # Strip off the first line if it contains a heading (as it will be shown separately)
+ for i, line in enumerate(self.markdown.splitlines()):
+ if i == 0 and line.startswith("#"):
+ continue
+
+ lines.append(line)
+
+ renderer = self.backend.wiki.render(self.page, "\n".join(lines), revision=self.timestamp)
+
+ return renderer.html
+
+ # Linked Files
+
+ @property
+ def files(self):
+ renderer = self.backend.wiki.render(self.page, self.markdown, revision=self.timestamp)
+
+ return renderer.files
+
+ def _store_linked_files(self):
+ self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \
+ VALUES(%s, %s)", ((self.id, file) for file in self.files))
@property
def timestamp(self):
def check_acl(self, account):
return self.backend.wiki.check_acl(self.page, account)
- # Sidebar
-
- @lazy_property
- def sidebar(self):
- parts = self.page.split("/")
-
- while parts:
- sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
- if sidebar:
- return sidebar
-
- parts.pop()
-
# Watchers
@lazy_property
self.backend.messages.send_template("wiki/messages/page-changed",
account=watcher, page=self, priority=-10)
- def restore(self, author, address):
+ def restore(self, author, address, comment=None):
changes = "Restore to revision from %s" % self.timestamp.isoformat()
+ # Append comment
+ if comment:
+ changes = "%s: %s" % (changes, comment)
+
return self.backend.wiki.create_page(self.page,
author, self.markdown, changes=changes, address=address)
if isinstance(other, self.__class__):
return self.id == other.id
+ return NotImplemented
+
@property
def url(self):
- return os.path.join(self.path, self.filename)
+ return "/docs%s" % os.path.join(self.path, self.filename)
@property
def path(self):
def created_at(self):
return self.data.created_at
+ timestamp = created_at
+
def delete(self, author=None):
+ if not self.can_be_deleted():
+ raise RuntimeError("Cannot delete %s" % self)
+
self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
WHERE id = %s", author.uid if author else None, self.id)
+ def can_be_deleted(self):
+ # Cannot be deleted if still in use
+ if self.pages:
+ return False
+
+ # Can be deleted
+ return True
+
@property
def deleted_at(self):
return self.data.deleted_at
def is_image(self):
return self.mimetype.startswith("image/")
+ def is_vector_image(self):
+ return self.mimetype in ("image/svg+xml",)
+
+ def is_bitmap_image(self):
+ return self.is_image() and not self.is_vector_image()
+
@lazy_property
def blob(self):
res = self.db.get("SELECT data FROM wiki_blobs \
if res:
return bytes(res.data)
- def get_thumbnail(self, size):
- cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+ async def get_thumbnail(self, size, format=None):
+ assert self.is_bitmap_image()
+
+ # Let thumbnails live in the cache for up to 24h
+ ttl = 24 * 3600
+
+ cache_key = ":".join((
+ "wiki",
+ "thumbnail",
+ self.path,
+ util.normalize(self.filename),
+ self.created_at.isoformat(),
+ format or "N/A",
+ "%spx" % size,
+ ))
# Try to fetch the data from the cache
- thumbnail = self.memcache.get(cache_key)
+ async with await self.backend.cache.pipeline() as p:
+ # Fetch the key
+ await p.get(cache_key)
+
+ # Reset the TTL
+ await p.expire(cache_key, ttl)
+
+ # Execute the pipeline
+ thumbnail, _ = await p.execute()
+
+ # Return the cached value
if thumbnail:
return thumbnail
# Generate the thumbnail
- thumbnail = util.generate_thumbnail(self.blob, size)
+ thumbnail = util.generate_thumbnail(self.blob, size, format=format, quality=95)
- # Put it into the cache for forever
- self.memcache.set(cache_key, thumbnail)
+ # Put it into the cache for 24h
+ await self.backend.cache.set(cache_key, thumbnail, ttl)
return thumbnail
+ @property
+ def pages(self):
+ """
+ Returns a list of all pages this file is linked by
+ """
+ pages = self.backend.wiki._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_linked_files
+ JOIN
+ wiki_current ON wiki_linked_files.page_id = wiki_current.id
+ LEFT JOIN
+ wiki ON wiki_linked_files.page_id = wiki.id
+ WHERE
+ wiki_linked_files.path = %s
+ ORDER BY
+ wiki.page
+ """, os.path.join(self.path, self.filename),
+ )
+
+ return list(pages)
+
class WikiRenderer(misc.Object):
schemas = (
)
# Links
- links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+ _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
# Images
- images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+ _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
- def init(self, path):
+ def init(self, path, text, revision=None):
self.path = path
+ self.text = text
+
+ # Optionally, the revision of the rendered page
+ self.revision = revision
+
+ # Markdown Renderer
+ self.renderer = Markdown(
+ self.backend,
+ extensions=[
+ LinkedFilesExtractorExtension(),
+ PrettyLinksExtension(),
+ "codehilite",
+ "fenced_code",
+ "footnotes",
+ "nl2br",
+ "sane_lists",
+ "tables",
+ "toc",
+ ],
+ )
+
+ # Render!
+ self.html = self._render()
def _render_link(self, m):
url, text = m.groups()
+ # Treat linkes starting with a double slash as absolute
+ if url.startswith("//"):
+ # Remove the double-lash
+ url = url.removeprefix("/")
+
+ # Return a link
+ return """<a href="%s">%s</a>""" % (url, text or url)
+
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
+
# Emails
if "@" in url:
# Strip mailto:
return """<a class="link-external" href="mailto:%s">%s</a>""" % \
(url, text or url)
- # External Links
- for schema in self.schemas:
- if url.startswith(schema):
- return """<a class="link-external" href="%s">%s</a>""" % \
- (url, text or url)
-
# Everything else must be an internal link
path = self.backend.wiki.make_path(self.path, url)
- return """<a href="%s">%s</a>""" % \
+ return """<a href="/docs%s">%s</a>""" % \
(path, text or self.backend.wiki.get_page_title(path))
def _render_image(self, m):
alt_text, url, caption = m.groups()
- # Skip any absolute and external URLs
- if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- return """<figure class="figure"><img src="%s" class="figure-img img-fluid rounded" alt="%s">
- <figcaption class="figure-caption">%s</figcaption></figure>
- """ % (url, alt_text, caption or "")
+ # Compute a hash over the URL
+ h = hashlib.new("md5")
+ h.update(url.encode())
+ id = h.hexdigest()
+
+ html = """
+ <div class="columns is-centered">
+ <div class="column is-8">
+ <figure class="image modal-trigger" data-target="%(id)s">
+ <img src="/docs%(url)s?s=960&%(args)s" alt="%(caption)s">
+
+ <figcaption class="figure-caption">%(caption)s</figcaption>
+ </figure>
+
+ <div class="modal is-large" id="%(id)s">
+ <div class="modal-background"></div>
+
+ <div class="modal-content">
+ <p class="image">
+ <img src="/docs%(url)s?s=2048&%(args)s" alt="%(caption)s"
+ loading="lazy">
+ </p>
+
+ <a class="button is-small" href="/docs%(url)s?action=detail">
+ <span class="icon">
+ <i class="fa-solid fa-circle-info"></i>
+ </span>
+ </a>
+ </div>
+
+ <button class="modal-close is-large" aria-label="close"></button>
+ </div>
+ </div>
+ </div>
+ """
# Try to split query string
url, delimiter, qs = url.partition("?")
# Parse query arguments
args = urllib.parse.parse_qs(qs)
+ # Skip any absolute and external URLs
+ if url.startswith("https://") or url.startswith("http://"):
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "url" : url,
+ "args" : args,
+ }
+
# Build absolute path
url = self.backend.wiki.make_path(self.path, url)
# Find image
- file = self.backend.wiki.get_file_by_path(url)
+ file = self.backend.wiki.get_file_by_path(url, revision=self.revision)
if not file or not file.is_image():
return "<!-- Could not find image %s in %s -->" % (url, self.path)
- # Scale down the image if not already done
- if not "s" in args:
- args["s"] = "920"
+ # Remove any requested size
+ if "s" in args:
+ del args["s"]
+
+ # Link the image that has been the current version at the time of the page edit
+ if file:
+ args["revision"] = file.timestamp
- return """<figure class="figure"><img src="%s?%s" class="figure-img img-fluid rounded" alt="%s">
- <figcaption class="figure-caption">%s</figcaption></figure>
- """ % (url, urllib.parse.urlencode(args), caption, caption or "")
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "url" : url,
+ "args" : urllib.parse.urlencode(args),
+ }
- def render(self, text):
+ def _render(self):
logging.debug("Rendering %s" % self.path)
- # Borrow this from the blog
- text = self.backend.blog._render_text(text, lang="markdown")
+ # Render...
+ text = self.renderer.convert(self.text)
# Postprocess links
- text = self.links.sub(self._render_link, text)
+ text = self._links.sub(self._render_link, text)
# Postprocess images to <figure>
- text = self.images.sub(self._render_image, text)
+ text = self._images.sub(self._render_image, text)
return text
+
+ @lazy_property
+ def files(self):
+ """
+ A list of all linked files that have been part of the rendered markup
+ """
+ files = []
+
+ for url in self.renderer.files:
+ # Skip external images
+ if url.startswith("https://") or url.startswith("http://"):
+ continue
+
+ # Make the URL absolute
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Check if this is a file (it could also just be a page)
+ file = self.backend.wiki.get_file_by_path(url)
+ if file:
+ files.append(url)
+
+ return files
+
+
+class Markdown(markdown.Markdown):
+ def __init__(self, backend, *args, **kwargs):
+ # Store the backend
+ self.backend = backend
+
+ # Call inherited setup routine
+ super().__init__(*args, **kwargs)
+
+
+class PrettyLinksExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ # Create links to Bugzilla
+ md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10)
+
+ # Create links to CVE
+ md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+
+ # Link mentioned users
+ md.preprocessors.register(UserMentionPreprocessor(md), "user-mention", 10)
+
+
+class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:#(\d{5,}))", re.I)
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line)
+
+
+class CVELinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+
+
+class UserMentionPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"\b@(\w+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(self._replace, line)
+
+ def _replace(self, m):
+ # Fetch the user's handle
+ uid, = m.groups()
+
+ # Fetch the user
+ user = self.md.backend.accounts.get_by_uid(uid)
+
+ # If the user was not found, we put back the matched text
+ if not user:
+ return m.group(0)
+
+ # Link the user
+ return "[%s](//users/%s)" % (user, user.uid)
+
+
+class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
+ """
+ Finds all Linked Files
+ """
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self.md.files = []
+
+ def run(self, root):
+ # Find all images and store the URLs
+ for image in root.findall(".//img"):
+ src = image.get("src")
+
+ self.md.files.append(src)
+
+ # Find all links
+ for link in root.findall(".//a"):
+ href = link.get("href")
+
+ self.md.files.append(href)
+
+
+class LinkedFilesExtractorExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10)