#!/usr/bin/python3
-import PIL
-import io
+import difflib
+import hashlib
import logging
+import markdown
+import markdown.extensions
+import markdown.preprocessors
import os.path
import re
import urllib.parse
if res:
return Page(self.backend, res.id, data=res)
+ def __iter__(self):
+ return self._get_pages(
+ "SELECT wiki.* FROM wiki_current current \
+ LEFT JOIN wiki ON current.id = wiki.id \
+ WHERE current.deleted IS FALSE \
+ ORDER BY page",
+ )
+
+ def make_path(self, page, path):
+ # Nothing to do for absolute links
+ if path.startswith("/"):
+ pass
+
+ # Relative links (one-level down)
+ elif path.startswith("./"):
+ path = os.path.join(page, path)
+
+ # All other relative links
+ else:
+ p = os.path.dirname(page)
+ path = os.path.join(p, path)
+
+ # Normalise links
+ return os.path.normpath(path)
+
+ def page_exists(self, path):
+ page = self.get_page(path)
+
+ # Page must have been found and not deleted
+ return page and not page.was_deleted()
+
def get_page_title(self, page, default=None):
doc = self.get_page(page)
if doc:
- return doc.title
+ title = doc.title
+ else:
+ title = os.path.basename(page)
- return default or os.path.basename(page)
+ return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
- assert page
+
+ # Split the path into parts
+ parts = page.split("/")
+
+ # Check if this is an action
+ if any((part.startswith("_") for part in parts)):
+ return
if revision:
return self._get_page("SELECT * FROM wiki WHERE page = %s \
def get_recent_changes(self, account, limit=None):
pages = self._get_pages("SELECT * FROM wiki \
- WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
ORDER BY timestamp DESC")
for page in pages:
def create_page(self, page, author, content, changes=None, address=None):
page = Page.sanitise_page_name(page)
- return self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
+ # Write page to the database
+ page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ # Store any linked files
+ page._store_linked_files()
+
+ # Send email to all watchers
+ page._send_watcher_emails(excludes=[author])
+
+ return page
+
def delete_page(self, page, author, **kwargs):
# Do nothing if the page does not exist
if not self.get_page(page):
return ret
def search(self, query, account=None, limit=None):
- query = util.parse_search_query(query)
-
res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
- WHERE search_index.document @@ to_tsquery('english', %s) \
- ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
+ WHERE search_index.document @@ websearch_to_tsquery('english', %s) \
+ ORDER BY ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC",
query, query)
+ pages = []
for page in res:
# Skip any pages the user doesn't have permission for
if not page.check_acl(account):
continue
# Return any other pages
- yield page
+ pages.append(page)
- limit -= 1
- if not limit:
+ # Break when we have found enough pages
+ if limit and len(pages) >= limit:
break
+ return pages
+
def refresh(self):
"""
Needs to be called after a page has been changed
"""
self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+ def get_watchlist(self, account):
+ pages = self._get_pages("""
+ WITH pages AS (
+ SELECT
+ *
+ FROM
+ wiki_current
+ LEFT JOIN
+ wiki ON wiki_current.id = wiki.id
+ )
+
+ SELECT
+ *
+ FROM
+ wiki_watchlist watchlist
+ JOIN
+ pages ON watchlist.page = pages.page
+ WHERE
+ watchlist.uid = %s
+ """, account.uid,
+ )
+
+ return sorted(pages)
+
# ACL
def check_acl(self, page, account):
# If user is in a matching group, we grant permission
for group in row.groups:
- if group in account.groups:
+ if account.is_member_of_group(group):
return True
# Otherwise access is not permitted
return list(files)
- def get_file_by_path(self, path):
+ def get_file_by_path(self, path, revision=None):
path, filename = os.path.dirname(path), os.path.basename(path)
+ if revision:
+ # Fetch a specific revision
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND created_at <= %s \
+ ORDER BY created_at DESC LIMIT 1", path, filename, revision)
+
+ # Fetch latest version
return self._get_file("SELECT * FROM wiki_files \
- WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
+
+ def get_file_by_path_and_filename(self, path, filename):
+ return self._get_file("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s AND deleted_at IS NULL",
+ path, filename)
def upload(self, path, filename, data, mimetype, author, address):
+ # Replace any existing files
+ file = self.get_file_by_path_and_filename(path, filename)
+ if file:
+ file.delete(author)
+
# Upload the blob first
- blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
+ blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) \
+ ON CONFLICT (digest(data, %s)) DO UPDATE SET data = EXCLUDED.data \
+ RETURNING id", data, "MD5")
# Create entry for file
return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
filename, author.uid, address, mimetype, blob.id, len(data))
- def find_image(self, path, filename):
- for p in (path, os.path.dirname(path)):
- file = self.get_file_by_path(os.path.join(p, filename))
-
- if file and file.is_image():
- return file
+ def render(self, path, text):
+ return WikiRenderer(self.backend, path, text)
class Page(misc.Object):
self.id = id
self.data = data
+ def __repr__(self):
+ return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp)
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
+ return NotImplemented
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
return self.page < other.page
+ return NotImplemented
+
@staticmethod
def sanitise_page_name(page):
if not page:
@property
def url(self):
- return self.page
+ return "/docs%s" % self.page
+
+ @property
+ def full_url(self):
+ return "https://www.ipfire.org%s" % self.url
@property
def page(self):
@property
def title(self):
- return self._title or self.page[1:]
+ return self._title or os.path.basename(self.page[1:])
@property
def _title(self):
# Find first H1 headline in markdown
markdown = self.markdown.splitlines()
- m = re.match(r"^# (.*)( #)?$", markdown[0])
+ m = re.match(r"^#\s*(.*)( #)?$", markdown[0])
if m:
return m.group(1)
if self.data.author_uid:
return self.backend.accounts.get_by_uid(self.data.author_uid)
- def _render(self, text):
- logging.debug("Rendering %s" % self)
-
- # Link images
- replacements = []
- for match in re.finditer(r"!\[(.*)\]\((.*)\)", text):
- alt_text, url = match.groups()
-
- # Skip any absolute and external URLs
- if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
- continue
-
- # Try to split query string
- url, delimiter, qs = url.partition("?")
+ @property
+ def markdown(self):
+ return self.data.markdown or ""
- # Parse query arguments
- args = urllib.parse.parse_qs(qs)
+ @property
+ def html(self):
+ lines = []
- # Find image
- file = self.backend.wiki.find_image(self.page, url)
- if not file:
+ # Strip off the first line if it contains a heading (as it will be shown separately)
+ for i, line in enumerate(self.markdown.splitlines()):
+ if i == 0 and line.startswith("#"):
continue
- # Scale down the image if not already done
- if not "s" in args:
- args["s"] = "768"
-
- # Format URL
- url = "%s?%s" % (file.url, urllib.parse.urlencode(args))
-
- replacements.append((match.span(), file, alt_text, url))
-
- # Apply all replacements
- for (start, end), file, alt_text, url in reversed(replacements):
- text = text[:start] + "[![%s](%s)](%s?action=detail)" % (alt_text, url, file.url) + text[end:]
-
- # Add wiki links
- patterns = (
- (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None, None),
- (r"\[\[([\w\d\/\-]+)\]\]", r"/\1", r"\1", self.backend.wiki.get_page_title, r"\1"),
- )
-
- for pattern, link, title, repl, args in patterns:
- replacements = []
-
- for match in re.finditer(pattern, text):
- l = match.expand(link)
- t = match.expand(title)
+ lines.append(line)
- if callable(repl):
- t = repl(match.expand(args)) or t
+ renderer = self.backend.wiki.render(self.page, "\n".join(lines))
- replacements.append((match.span(), t or l, l))
+ return renderer.html
- # Apply all replacements
- for (start, end), t, l in reversed(replacements):
- text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
-
- # Borrow this from the blog
- return self.backend.blog._render_text(text, lang="markdown")
+ # Linked Files
@property
- def markdown(self):
- return self.data.markdown
+ def files(self):
+ renderer = self.backend.wiki.render(self.page, self.markdown)
- @property
- def html(self):
- return self.data.html or self._render(self.markdown)
+ return renderer.files
+
+ def _store_linked_files(self):
+ self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \
+ VALUES(%s, %s)", ((self.id, file) for file in self.files))
@property
def timestamp(self):
return self.data.timestamp
def was_deleted(self):
- return self.markdown is None
+ return not self.markdown
@lazy_property
def breadcrumbs(self):
return self.backend.wiki.make_breadcrumbs(self.page)
+ def is_latest_revision(self):
+ return self.get_latest_revision() == self
+
def get_latest_revision(self):
revisions = self.get_revisions()
return self.backend.wiki._get_pages("SELECT * FROM wiki \
WHERE page = %s ORDER BY timestamp DESC", self.page)
+ @lazy_property
+ def previous_revision(self):
+ return self.backend.wiki._get_page("SELECT * FROM wiki \
+ WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
+ LIMIT 1", self.page, self.timestamp)
+
@property
def changes(self):
return self.data.changes
def check_acl(self, account):
return self.backend.wiki.check_acl(self.page, account)
- # Sidebar
+ # Watchers
@lazy_property
- def sidebar(self):
- parts = self.page.split("/")
+ def diff(self):
+ if self.previous_revision:
+ diff = difflib.unified_diff(
+ self.previous_revision.markdown.splitlines(),
+ self.markdown.splitlines(),
+ )
+
+ return "\n".join(diff)
+
+ @property
+ def watchers(self):
+ res = self.db.query("SELECT uid FROM wiki_watchlist \
+ WHERE page = %s", self.page)
+
+ for row in res:
+ # Search for account by UID and skip if none was found
+ account = self.backend.accounts.get_by_uid(row.uid)
+ if not account:
+ continue
+
+ # Return the account
+ yield account
+
+ def is_watched_by(self, account):
+ res = self.db.get("SELECT 1 FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ if res:
+ return True
+
+ return False
+
+ def add_watcher(self, account):
+ if self.is_watched_by(account):
+ return
+
+ self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
+ VALUES(%s, %s)", self.page, account.uid)
+
+ def remove_watcher(self, account):
+ self.db.execute("DELETE FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ def _send_watcher_emails(self, excludes=[]):
+ # Nothing to do if there was no previous revision
+ if not self.previous_revision:
+ return
+
+ for watcher in self.watchers:
+ # Skip everyone who is excluded
+ if watcher in excludes:
+ logging.debug("Excluding %s" % watcher)
+ continue
+
+ # Check permissions
+ if not self.backend.wiki.check_acl(self.page, watcher):
+ logging.debug("Watcher %s does not have permissions" % watcher)
+ continue
+
+ logging.debug("Sending watcher email to %s" % watcher)
- while parts:
- sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
- if sidebar:
- return sidebar
+ # Compose message
+ self.backend.messages.send_template("wiki/messages/page-changed",
+ account=watcher, page=self, priority=-10)
- parts.pop()
+ def restore(self, author, address, comment=None):
+ changes = "Restore to revision from %s" % self.timestamp.isoformat()
+
+ # Append comment
+ if comment:
+ changes = "%s: %s" % (changes, comment)
+
+ return self.backend.wiki.create_page(self.page,
+ author, self.markdown, changes=changes, address=address)
class File(misc.Object):
self.id = id
self.data = data
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
+ return NotImplemented
+
@property
def url(self):
- return os.path.join(self.path, self.filename)
+ return "/docs%s" % os.path.join(self.path, self.filename)
@property
def path(self):
def created_at(self):
return self.data.created_at
+ def delete(self, author=None):
+ if not self.can_be_deleted():
+ raise RuntimeError("Cannot delete %s" % self)
+
+ self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \
+ WHERE id = %s", author.uid if author else None, self.id)
+
+ def can_be_deleted(self):
+ # Cannot be deleted if still in use
+ if self.pages:
+ return False
+
+ # Can be deleted
+ return True
+
+ @property
+ def deleted_at(self):
+ return self.data.deleted_at
+
+ def get_latest_revision(self):
+ revisions = self.get_revisions()
+
+ # Return first object
+ for rev in revisions:
+ return rev
+
+ def get_revisions(self):
+ revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \
+ WHERE path = %s AND filename = %s ORDER BY created_at DESC", self.path, self.filename)
+
+ return list(revisions)
+
def is_pdf(self):
return self.mimetype in ("application/pdf", "application/x-pdf")
def is_image(self):
return self.mimetype.startswith("image/")
+ def is_vector_image(self):
+ return self.mimetype in ("image/svg+xml",)
+
+ def is_bitmap_image(self):
+ return self.is_image() and not self.is_vector_image()
+
@lazy_property
def blob(self):
res = self.db.get("SELECT data FROM wiki_blobs \
if res:
return bytes(res.data)
- def get_thumbnail(self, size):
- cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
+ async def get_thumbnail(self, size):
+ assert self.is_bitmap_image()
+
+ cache_key = "-".join((
+ self.path,
+ util.normalize(self.filename),
+ self.created_at.isoformat(),
+ "%spx" % size,
+ ))
# Try to fetch the data from the cache
- thumbnail = self.memcache.get(cache_key)
+ thumbnail = await self.backend.cache.get(cache_key)
if thumbnail:
return thumbnail
# Generate the thumbnail
- thumbnail = self._generate_thumbnail(size)
+ thumbnail = util.generate_thumbnail(self.blob, size)
# Put it into the cache for forever
- self.memcache.set(cache_key, thumbnail)
+ await self.backend.cache.set(cache_key, thumbnail)
return thumbnail
- def _generate_thumbnail(self, size):
- image = PIL.Image.open(io.BytesIO(self.blob))
+ @property
+ def pages(self):
+ """
+ Returns a list of all pages this file is linked by
+ """
+ pages = self.backend.wiki._get_pages("""
+ SELECT
+ wiki.*
+ FROM
+ wiki_linked_files
+ JOIN
+ wiki_current ON wiki_linked_files.page_id = wiki_current.id
+ LEFT JOIN
+ wiki ON wiki_linked_files.page_id = wiki.id
+ WHERE
+ wiki_linked_files.path = %s
+ ORDER BY
+ wiki.page
+ """, os.path.join(self.path, self.filename),
+ )
+
+ return list(pages)
+
+
+class WikiRenderer(misc.Object):
+ schemas = (
+ "ftp://",
+ "git://",
+ "http://",
+ "https://",
+ "rsync://",
+ "sftp://",
+ "ssh://",
+ "webcal://",
+ )
+
+ # Links
+ _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>")
+
+ # Images
+ _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>")
+
+ def init(self, path, text):
+ self.path = path
+ self.text = text
+
+ # Markdown Renderer
+ self.renderer = markdown.Markdown(
+ extensions=[
+ LinkedFilesExtractorExtension(),
+ PrettyLinksExtension(),
+ "codehilite",
+ "fenced_code",
+ "footnotes",
+ "nl2br",
+ "sane_lists",
+ "tables",
+ "toc",
+ ],
+ )
+
+ # Render!
+ self.html = self._render()
+
+ def _render_link(self, m):
+ url, text = m.groups()
+
+ # External Links
+ for schema in self.schemas:
+ if url.startswith(schema):
+ return """<a class="link-external" href="%s">%s</a>""" % \
+ (url, text or url)
+
+ # Emails
+ if "@" in url:
+ # Strip mailto:
+ if url.startswith("mailto:"):
+ url = url[7:]
+
+ return """<a class="link-external" href="mailto:%s">%s</a>""" % \
+ (url, text or url)
+
+ # Everything else must be an internal link
+ path = self.backend.wiki.make_path(self.path, url)
+
+ return """<a href="/docs%s">%s</a>""" % \
+ (path, text or self.backend.wiki.get_page_title(path))
+
+ def _render_image(self, m):
+ alt_text, url, caption = m.groups()
+
+ # Compute a hash over the URL
+ h = hashlib.new("md5")
+ h.update(url.encode())
+ id = h.hexdigest()
+
+ html = """
+ <div class="columns is-centered">
+ <div class="column is-8">
+ <figure class="image modal-trigger" data-target="%(id)s">
+ <img src="/docs%(url)s" alt="%(caption)s">
+
+ <figcaption class="figure-caption">%(caption)s</figcaption>
+ </figure>
+
+ <div class="modal is-large" id="%(id)s">
+ <div class="modal-background"></div>
+
+ <div class="modal-content">
+ <p class="image">
+ <img src="/docs%(plain_url)s?s=1920" alt="%(caption)s"
+ loading="lazy">
+ </p>
+ </div>
+
+ <button class="modal-close is-large" aria-label="close"></button>
+ </div>
+ </div>
+ </div>
+ """
+
+ # Skip any absolute and external URLs
+ if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : url,
+ "url" : url,
+ }
+
+ # Try to split query string
+ url, delimiter, qs = url.partition("?")
+
+ # Parse query arguments
+ args = urllib.parse.parse_qs(qs)
+
+ # Build absolute path
+ plain_url = url = self.backend.wiki.make_path(self.path, url)
+
+ # Find image
+ file = self.backend.wiki.get_file_by_path(url)
+ if not file or not file.is_image():
+ return "<!-- Could not find image %s in %s -->" % (url, self.path)
+
+ # Scale down the image if not already done
+ if not "s" in args:
+ args["s"] = "920"
+
+ # Append arguments to the URL
+ if args:
+ url = "%s?%s" % (url, urllib.parse.urlencode(args))
+
+ return html % {
+ "caption" : caption or "",
+ "id" : id,
+ "plain_url" : plain_url,
+ "url" : url,
+ }
+
+ def _render(self):
+ logging.debug("Rendering %s" % self.path)
+
+ # Render...
+ text = self.renderer.convert(self.text)
+
+ # Postprocess links
+ text = self._links.sub(self._render_link, text)
+
+ # Postprocess images to <figure>
+ text = self._images.sub(self._render_image, text)
+
+ return text
+
+ @lazy_property
+ def files(self):
+ """
+ A list of all linked files that have been part of the rendered markup
+ """
+ files = []
+
+ for url in self.renderer.files:
+ # Skip external images
+ if url.startswith("https://") or url.startswith("http://"):
+ continue
+
+ # Make the URL absolute
+ url = self.backend.wiki.make_path(self.path, url)
+
+ # Check if this is a file (it could also just be a page)
+ file = self.backend.wiki.get_file_by_path(url)
+ if file:
+ files.append(url)
+
+ return files
+
+
+class PrettyLinksExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ # Create links to Bugzilla
+ md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10)
+
+ # Create links to CVE
+ md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+
+
+class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:#(\d{5,}))", re.I)
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line)
+
+
+class CVELinksPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+
+
+class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
+ """
+ Finds all Linked Files
+ """
+ def run(self, root):
+ self.md.files = []
+
+ # Find all images and store the URLs
+ for image in root.findall(".//img"):
+ src = image.get("src")
+
+ self.md.files.append(src)
+
+ # Find all links
+ for link in root.findall(".//a"):
+ href = link.get("href")
- # Resize the image to the desired resolution
- image.thumbnail((size, size), PIL.Image.ANTIALIAS)
+ self.md.files.append(href)
- with io.BytesIO() as f:
- # If writing out the image does not work with optimization,
- # we try to write it out without any optimization.
- try:
- image.save(f, image.format, optimize=True, quality=98)
- except:
- image.save(f, image.format, quality=98)
- return f.getvalue()
+class LinkedFilesExtractorExtension(markdown.extensions.Extension):
+ def extendMarkdown(self, md):
+ md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10)