# Normalise links
return os.path.normpath(path)
- def page_exists(self, path):
- page = self.get_page(path)
+ def _make_url(self, path):
+ """
+ Composes the URL out of the path
+ """
+ # Remove any leading slashes (if present)
+ path = path.removeprefix("/")
- # Page must have been found and not deleted
- return page and not page.was_deleted()
+ return os.path.join("/docs", path)
def get_page_title(self, page, default=None):
doc = self.get_page(page)
# Just creates a blank last version of the page
self.create_page(page, author=author, content=None, **kwargs)
- def make_breadcrumbs(self, url):
- # Split and strip all empty elements (double slashes)
- parts = list(e for e in url.split("/") if e)
-
+ def make_breadcrumbs(self, path):
ret = []
- for part in ("/".join(parts[:i]) for i in range(1, len(parts))):
- ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part))))
- return ret
+ while path:
+ # Cut off everything after the last slash
+ path, _, _ = path.rpartition("/")
+
+ # Do not include the root
+ if not path:
+ break
+
+ # Find the page
+ page = self.get_page(path)
+
+ # Append the URL and title to the output
+ ret.append((
+ page.url if page else self._make_url(path),
+ page.title if page else os.path.basename(path),
+ ))
+
+ # Return the breadcrumbs in order
+ return reversed(ret)
def search(self, query, account=None, limit=None):
res = self._get_pages("""
"""
Needs to be called after a page has been changed
"""
- self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
+ self.db.execute("REFRESH MATERIALIZED VIEW CONCURRENTLY wiki_search_index")
def get_watchlist(self, account):
pages = self._get_pages("""
return NotImplemented
+ def __hash__(self):
+ return hash(self.page)
+
@staticmethod
def sanitise_page_name(page):
if not page:
@property
def url(self):
- return "/docs%s" % self.page
+ return self.backend.wiki._make_url(self.page)
@property
def full_url(self):
if res:
return bytes(res.data)
- async def get_thumbnail(self, size):
+ async def get_thumbnail(self, size, format=None):
assert self.is_bitmap_image()
- cache_key = "-".join((
+ # Let thumbnails live in the cache for up to 24h
+ ttl = 24 * 3600
+
+ cache_key = ":".join((
+ "wiki",
+ "thumbnail",
self.path,
util.normalize(self.filename),
self.created_at.isoformat(),
+ format or "N/A",
"%spx" % size,
))
# Try to fetch the data from the cache
- thumbnail = await self.backend.cache.get(cache_key)
+ async with await self.backend.cache.pipeline() as p:
+ # Fetch the key
+ await p.get(cache_key)
+
+ # Reset the TTL
+ await p.expire(cache_key, ttl)
+
+ # Execute the pipeline
+ thumbnail, _ = await p.execute()
+
+ # Return the cached value
if thumbnail:
return thumbnail
# Generate the thumbnail
- thumbnail = util.generate_thumbnail(self.blob, size)
+ thumbnail = util.generate_thumbnail(self.blob, size, format=format, quality=95)
- # Put it into the cache for forever
- await self.backend.cache.set(cache_key, thumbnail)
+ # Put it into the cache for 24h
+ await self.backend.cache.set(cache_key, thumbnail, ttl)
return thumbnail
self.revision = revision
# Markdown Renderer
- self.renderer = markdown.Markdown(
+ self.renderer = Markdown(
+ self.backend,
extensions=[
LinkedFilesExtractorExtension(),
PrettyLinksExtension(),
def _render_link(self, m):
url, text = m.groups()
+ # Treat linkes starting with a double slash as absolute
+ if url.startswith("//"):
+ # Remove the double-lash
+ url = url.removeprefix("/")
+
+ # Return a link
+ return """<a href="%s">%s</a>""" % (url, text or url)
+
# External Links
for schema in self.schemas:
if url.startswith(schema):
<div class="columns is-centered">
<div class="column is-8">
<figure class="image modal-trigger" data-target="%(id)s">
- <img src="/docs%(url)s?s=640&%(args)s" alt="%(caption)s">
+ <img src="/docs%(url)s?s=960&%(args)s" alt="%(caption)s">
<figcaption class="figure-caption">%(caption)s</figcaption>
</figure>
<div class="modal-content">
<p class="image">
- <img src="/docs%(url)s?s=1920&%(args)s" alt="%(caption)s"
+ <img src="/docs%(url)s?s=2048&%(args)s" alt="%(caption)s"
loading="lazy">
</p>
</div>
"""
+ # Try to split query string
+ url, delimiter, qs = url.partition("?")
+
+ # Parse query arguments
+ args = urllib.parse.parse_qs(qs)
+
# Skip any absolute and external URLs
if url.startswith("https://") or url.startswith("http://"):
return html % {
"caption" : caption or "",
"id" : id,
- "plain_url" : url,
"url" : url,
+ "args" : args,
}
- # Try to split query string
- url, delimiter, qs = url.partition("?")
-
- # Parse query arguments
- args = urllib.parse.parse_qs(qs)
-
# Build absolute path
url = self.backend.wiki.make_path(self.path, url)
return files
+class Markdown(markdown.Markdown):
+ def __init__(self, backend, *args, **kwargs):
+ # Store the backend
+ self.backend = backend
+
+ # Call inherited setup routine
+ super().__init__(*args, **kwargs)
+
+
class PrettyLinksExtension(markdown.extensions.Extension):
def extendMarkdown(self, md):
# Create links to Bugzilla
# Create links to CVE
md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10)
+ # Link mentioned users
+ md.preprocessors.register(UserMentionPreprocessor(md), "user-mention", 10)
+
class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor):
regex = re.compile(r"(?:#(\d{5,}))", re.I)
yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line)
+class UserMentionPreprocessor(markdown.preprocessors.Preprocessor):
+ regex = re.compile(r"\b@(\w+)")
+
+ def run(self, lines):
+ for line in lines:
+ yield self.regex.sub(self._replace, line)
+
+ def _replace(self, m):
+ # Fetch the user's handle
+ uid, = m.groups()
+
+ # Fetch the user
+ user = self.md.backend.accounts.get_by_uid(uid)
+
+ # If the user was not found, we put back the matched text
+ if not user:
+ return m.group(0)
+
+ # Link the user
+ return "[%s](//users/%s)" % (user, user.uid)
+
+
class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor):
"""
Finds all Linked Files
"""
- def run(self, root):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
self.md.files = []
+ def run(self, root):
# Find all images and store the URLs
for image in root.findall(".//img"):
src = image.get("src")