#!/usr/bin/python3
import difflib
+import hashlib
import logging
+import markdown
+import markdown.extensions
+import markdown.preprocessors
import os.path
import re
import urllib.parse
return Page(self.backend, res.id, data=res)
def __iter__(self):
- return self._get_pages(
- "SELECT wiki.* FROM wiki_current current \
- LEFT JOIN wiki ON current.id = wiki.id \
- WHERE current.deleted IS FALSE \
- ORDER BY page",
+ return self._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_current current
+ LEFT JOIN
+ wiki ON current.id = wiki.id
+ WHERE
+ current.deleted IS FALSE
+ ORDER BY page
+ """,
)
def make_path(self, page, path):
return page and not page.was_deleted()
def get_page_title(self, page, default=None):
- # Try to retrieve title from cache
- title = self.memcache.get("wiki:title:%s" % page)
- if title:
- return title
-
- # If the title has not been in the cache, we will
- # have to look it up
doc = self.get_page(page)
if doc:
title = doc.title
else:
title = os.path.basename(page)
- # Save in cache for forever
- self.memcache.set("wiki:title:%s" % page, title)
-
return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
# Write page to the database
- page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
- VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ page = self._get_page("""
+ INSERT INTO
+ wiki
+ (
+ page,
+ author_uid,
+ markdown,
+ changes,
+ address
+ ) VALUES (
+ %s, %s, %s, %s, %s
+ )
+ RETURNING *
+ """, page, author.uid, content or None, changes, address,
+ )
- # Update cache
- self.memcache.set("wiki:title:%s" % page.page, page.title)
+ # Store any linked files
+ page._store_linked_files()
# Send email to all watchers
page._send_watcher_emails(excludes=[author])
return ret
def search(self, query, account=None, limit=None):
- res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
- LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
- WHERE search_index.document @@ websearch_to_tsquery('english', %s) \
- ORDER BY ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC",
- query, query)
+ res = self._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_search_index search_index
+ LEFT JOIN
+ wiki ON search_index.wiki_id = wiki.id
+ WHERE
+ search_index.document @@ websearch_to_tsquery('english', %s)
+ ORDER BY
+ ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC
+ """, query, query,
+ )
pages = []
for page in res:
# ACL
def check_acl(self, page, account):
- res = self.db.query("SELECT * FROM wiki_acls \
- WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page)
+ res = self.db.query("""
+ SELECT
+ *
+ FROM
+ wiki_acls
+ WHERE
+ %s ILIKE (path || '%%')
+ ORDER BY
+ LENGTH(path) DESC
+ LIMIT 1
+ """, page,
+ )
for row in res:
# Access not permitted when user is not logged in
return File(self.backend, res.id, data=res)
def get_files(self, path):
- files = self._get_files("SELECT * FROM wiki_files \
- WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path)
+ files = self._get_files("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ deleted_at IS NULL
+ ORDER BY filename
+ """, path,
+ )
return list(files)
if revision:
# Fetch a specific revision
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND created_at <= %s \
- ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ created_at <= %s
+ ORDER BY
+ created_at DESC
+ LIMIT 1
+ """, path, filename, revision,
+ )
# Fetch latest version
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL",
- path, filename)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ deleted_at IS NULL
+ """, path, filename,
+ )
def get_file_by_path_and_filename(self, path, filename):
- return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL",
- path, filename)
+ return self._get_file("""
+ SELECT
+ *
+ FROM
+ wiki_files
+ WHERE
+ path = %s
+ AND
+ filename = %s
+ AND
+ deleted_at IS NULL
+ """, path, filename,
+ )
def upload(self, path, filename, data, mimetype, author, address):
# Replace any existing files
file.delete(author)
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
- ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
- RETURNING id", data, "MD5")
+ blob = self.db.get("""
+ INSERT INTO
+ wiki_blobs(data)
+ VALUES
+ (%s)
+ ON CONFLICT
+ (digest(data, %s))
+ DO UPDATE
+ SET data = EXCLUDED.data
+ RETURNING id
+ """, data, "MD5",
+ )
# Create entry for file
- return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
- mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
- filename, author.uid, address, mimetype, blob.id, len(data))
-
- def render(self, path, text):
- r = WikiRenderer(self.backend, path)
+ return self._get_file("""
+ INSERT INTO
+ wiki_files
+ (
+ path,
+ filename,
+ author_uid,
+ address,
+ mimetype,
+ blob_id,
+ size
+ ) VALUES (
+ %s, %s, %s, %s, %s, %s, %s
+ )
+ RETURNING *
+ """, path, filename, author.uid, address, mimetype, blob.id, len(data),
+ )
- return r.render(text)
+ def render(self, path, text, **kwargs):
+ return WikiRenderer(self.backend, path, text, **kwargs)
class Page(misc.Object):
@property
def url(self):
- return urllib.parse.urljoin("/docs", self.page)
+ return "/docs%s" % self.page
@property
def full_url(self):
- return "https://www.ipfire.org/docs%s" % self.url
+ return "https://www.ipfire.org%s" % self.url
@property
def page(self):
lines.append(line)
- return self.backend.wiki.render(self.page, "\n".join(lines))
+ renderer = self.backend.wiki.render(self.page, "\n".join(lines), revision=self.timestamp)
+
+ return renderer.html
+
+ # Linked Files
+
+ @property
+ def files(self):
+ renderer = self.backend.wiki.render(self.page, self.markdown, revision=self.timestamp)
+
+ return renderer.files
+
+ def _store_linked_files(self):
+ self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \
+ VALUES(%s, %s)", ((self.id, file) for file in self.files))
@property
def timestamp(self):
def check_acl(self, account):
return self.backend.wiki.check_acl(self.page, account)
- # Sidebar
-
- @lazy_property
- def sidebar(self):
- parts = self.page.split("/")
-
- while parts:
- sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
- if sidebar:
- return sidebar
-
- parts.pop()
-
# Watchers
@lazy_property
if isinstance(other, self.__class__):
return self.id == other.id
+ return NotImplemented
+
@property
def url(self):
- return os.path.join(self.path, self.filename)
+ return "/docs%s" % os.path.join(self.path, self.filename)
@property
def path(self):
def created_at(self):
return self.data.created_at
+ timestamp = created_at
+
def delete(self, author=None):
+ if not self.can_be_deleted():
+ raise RuntimeError("Cannot delete %s" % self)
+
self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
WHERE id = %s", author.uid if author else None, self.id)
+ def can_be_deleted(self):
+ # Cannot be deleted if still in use
+ if self.pages:
+ return False
+
+ # Can be deleted
+ return True
+
@property
def deleted_at(self):
return self.data.deleted_at
if res:
return bytes(res.data)
- def get_thumbnail(self, size):
+ async def get_thumbnail(self, size):
assert self.is_bitmap_image()
- cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+ cache_key = "-".join((
+ self.path,
+ util.normalize(self.filename),
+ self.created_at.isoformat(),
+ "%spx" % size,
+ ))
# Try to fetch the data from the cache
- thumbnail = self.memcache.get(cache_key)
+ thumbnail = await self.backend.cache.get(cache_key)
if thumbnail:
return thumbnail
thumbnail = util.generate_thumbnail(self.blob, size)
# Put it into the cache for forever
- self.memcache.set(cache_key, thumbnail)
+ await self.backend.cache.set(cache_key, thumbnail)
return thumbnail
+ @property
+ def pages(self):
+ """
+ Returns a list of all pages this file is linked by
+ """
+ pages = self.backend.wiki._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_linked_files
+ JOIN
+ wiki_current ON wiki_linked_files.page_id = wiki_current.id
+ LEFT JOIN
+ wiki ON wiki_linked_files.page_id = wiki.id
+ WHERE
+ wiki_linked_files.path = %s
+ ORDER BY
+ wiki.page
+ """, os.path.join(self.path, self.filename),
+ )
+
+ return list(pages)
+
class WikiRenderer(misc.Object):
schemas = (
)
# Links
- links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+ _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
# Images
- images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+ _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
- def init(self, path):
+ def init(self, path, text, revision=None):
self.path = path
+ self.text = text
+
+ # Optionally, the revision of the rendered page
+ self.revision = revision
+
+ # Markdown Renderer
+ self.renderer = markdown.Markdown(
+ extensions=[
+ LinkedFilesExtractorExtension(),
+ PrettyLinksExtension(),
+ "codehilite",
+ "fenced_code",
+ "footnotes",
+ "nl2br",
+ "sane_lists",
+ "tables",
+ "toc",
+ ],
+ )
+
+ # Render!
+ self.html = self._render()
def _render_link(self, m):
url, text = m.groups()
def _render_image(self, m):
alt_text, url, caption = m.groups()
+ # Compute a hash over the URL
+ h = hashlib.new("md5")
+ h.update(url.encode())
+ id = h.hexdigest()
+
html = """
<div class="columns is-centered">
<div class="column is-8">
- <figure class="image">
- <img src="/docs%s" alt="%s">
- <figcaption class="figure-caption">%s</figcaption>
+ <figure class="image modal-trigger" data-target="%(id)s">
+ <img src="/docs%(url)s?s=640&%(args)s" alt="%(caption)s">
+
+ <figcaption class="figure-caption">%(caption)s</figcaption>
</figure>
+
+ <div class="modal is-large" id="%(id)s">
+ <div class="modal-background"></div>
+
+ <div class="modal-content">
+ <p class="image">
+ <img src="/docs%(url)s?s=1920&%(args)s" alt="%(caption)s"
+ loading="lazy">
+ </p>
+
+ <a class="button is-small" href="/docs%(url)s?action=detail">
+ <span class="icon">
+ <i class="fa-solid fa-circle-info"></i>
+ </span>
+ </a>
+ </div>
+
+ <button class="modal-close is-large" aria-label="close"></button>
+ </div>
</div>
</div>
"""
# Skip any absolute and external URLs
- if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- return html % (url, alt_text, caption or "")
+ if url.startswith("https://") or url.startswith("http://"):
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : url,
+ "url" : url,
+ }
# Try to split query string
url, delimiter, qs = url.partition("?")
url = self.backend.wiki.make_path(self.path, url)
# Find image
- file = self.backend.wiki.get_file_by_path(url)
+ file = self.backend.wiki.get_file_by_path(url, revision=self.revision)
if not file or not file.is_image():
return "<!-- Could not find image %s in %s -->" % (url, self.path)
- # Scale down the image if not already done
- if not "s" in args:
- args["s"] = "920"
+ # Remove any requested size
+ if "s" in args:
+ del args["s"]
- # Append arguments to the URL
- if args:
- url = "%s?%s" % (url, urllib.parse.urlencode(args))
+ # Link the image that has been the current version at the time of the page edit
+ if file:
+ args["revision"] = file.timestamp
- return html % (url, caption, caption or "")
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "url" : url,
+ "args" : urllib.parse.urlencode(args),
+ }
- def render(self, text):
+ def _render(self):
logging.debug("Rendering %s" % self.path)
- # Borrow this from the blog
- text = self.backend.blog._render_text(text, lang="markdown")
+ # Render...
+ text = self.renderer.convert(self.text)
# Postprocess links
- text = self.links.sub(self._render_link, text)
+ text = self._links.sub(self._render_link, text)
# Postprocess images to <figure>
- text = self.images.sub(self._render_image, text)
+ text = self._images.sub(self._render_image, text)
return text
+
+ @lazy_property
+ def files(self):
+ """
+ A list of all linked files that have been part of the rendered markup
+ """
+ files = []
+
+ for url in self.renderer.files:
+ # Skip external images
+ if url.startswith("https://") or url.startswith("http://"):
+ continue
+
+ # Make the URL absolute
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Check if this is a file (it could also just be a page)
+ file = self.backend.wiki.get_file_by_path(url)
+ if file:
+ files.append(url)
+
+ return files
+
+
+class PrettyLinksExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ # Create links to Bugzilla
+ md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10)
+
+ # Create links to CVE
+ md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+
+
+class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:#(\d{5,}))", re.I)
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line)
+
+
+class CVELinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+
+
+class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
+ """
+ Finds all Linked Files
+ """
+ def run(self, root):
+ self.md.files = []
+
+ # Find all images and store the URLs
+ for image in root.findall(".//img"):
+ src = image.get("src")
+
+ self.md.files.append(src)
+
+ # Find all links
+ for link in root.findall(".//a"):
+ href = link.get("href")
+
+ self.md.files.append(href)
+
+
+class LinkedFilesExtractorExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10)