]> git.ipfire.org Git - ipfire.org.git/blobdiff - src/backend/wiki.py
wiki: Store title of pages in memcache
[ipfire.org.git] / src / backend / wiki.py
index 3c8b04a5d02775d84e59911699fb0d3d08855d5a..b98dcd802740eb01d894e964c6a76a9fbe01550c 100644 (file)
@@ -1,11 +1,13 @@
 #!/usr/bin/python3
 
 import PIL
+import PIL.ImageFilter
 import difflib
 import io
 import logging
 import os.path
 import re
+import tornado.gen
 import urllib.parse
 
 from . import misc
@@ -26,11 +28,23 @@ class Wiki(misc.Object):
                        return Page(self.backend, res.id, data=res)
 
        def get_page_title(self, page, default=None):
+               # Try to retrieve title from cache
+               title = self.memcache.get("wiki:title:%s" % page)
+               if title:
+                       return title
+
+               # If the title has not been in the cache, we will
+               # have to look it up
                doc = self.get_page(page)
                if doc:
-                       return doc.title
+                       title = doc.title
+               else:
+                       title = os.path.basename(page)
+
+               # Save in cache for forever
+               self.memcache.set("wiki:title:%s" % page, title)
 
-               return default or os.path.basename(page)
+               return title
 
        def get_page(self, page, revision=None):
                page = Page.sanitise_page_name(page)
@@ -65,6 +79,9 @@ class Wiki(misc.Object):
                page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
                        VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
 
+               # Update cache
+               self.memcache.set("wiki:title:%s" % page.path, page.title)
+
                # Send email to all watchers
                page._send_watcher_emails(excludes=[author])
 
@@ -97,18 +114,21 @@ class Wiki(misc.Object):
                                ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
                        query, query)
 
+               pages = []
                for page in res:
                        # Skip any pages the user doesn't have permission for
                        if not page.check_acl(account):
                                continue
 
                        # Return any other pages
-                       yield page
+                       pages.append(page)
 
-                       limit -= 1
-                       if not limit:
+                       # Break when we have found enough pages
+                       if limit and len(pages) >= limit:
                                break
 
+               return pages
+
        def refresh(self):
                """
                        Needs to be called after a page has been changed
@@ -255,7 +275,7 @@ class Page(misc.Object):
 
                # Link images
                replacements = []
-               for match in re.finditer(r"!\[(.*)\]\((.*)\)", text):
+               for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", text):
                        alt_text, url = match.groups()
 
                        # Skip any absolute and external URLs
@@ -288,19 +308,35 @@ class Page(misc.Object):
 
                # Add wiki links
                patterns = (
-                       (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None, None),
-                       (r"\[\[([\w\d\/\-]+)\]\]", r"/\1", r"\1", self.backend.wiki.get_page_title, r"\1"),
+                       (r"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r"\1", r"\2", None, True),
+                       (r"\[\[([\w\d\/\-\.]+)\]\]", r"\1", r"\1", self.backend.wiki.get_page_title, True),
+
+                       # External links
+                       (r"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]",
+                               r"\1", r"\2", None, False),
+
+                       # Mail
+                       (r"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\]\]",
+                               r"\1", r"\1", None, False),
                )
 
-               for pattern, link, title, repl, args in patterns:
+               for pattern, link, title, repl, internal in patterns:
                        replacements = []
 
                        for match in re.finditer(pattern, text):
                                l = match.expand(link)
                                t = match.expand(title)
 
+                               if internal:
+                                       # Allow relative links
+                                       if not l.startswith("/"):
+                                               l = os.path.join(self.page, l)
+
+                                       # Normalise links
+                                       l = os.path.normpath(l)
+
                                if callable(repl):
-                                       t = repl(match.expand(args)) or t
+                                       t = repl(l) or t
 
                                replacements.append((match.span(), t or l, l))
 
@@ -317,7 +353,7 @@ class Page(misc.Object):
 
        @property
        def html(self):
-               return self.data.html or self._render(self.markdown)
+               return self._render(self.markdown)
 
        @property
        def timestamp(self):
@@ -497,18 +533,39 @@ class File(misc.Object):
 
                return thumbnail
 
-       def _generate_thumbnail(self, size):
+       def _generate_thumbnail(self, size, **args):
                image = PIL.Image.open(io.BytesIO(self.blob))
 
+               # Remove any alpha-channels
+               if image.format == "JPEG" and not image.mode == "RGB":
+                       # Make a white background
+                       background = PIL.Image.new("RGBA", image.size, (255,255,255))
+
+                       # Flatten both images together
+                       flattened_image = PIL.Image.alpha_composite(background, image)
+
+                       # Remove the alpha channel
+                       image = flattened_image.convert("RGB")
+
                # Resize the image to the desired resolution
-               image.thumbnail((size, size), PIL.Image.ANTIALIAS)
+               image.thumbnail((size, size), PIL.Image.LANCZOS)
+
+               if image.format == "JPEG":
+                       # Apply a gaussian blur to make compression easier
+                       image = image.filter(PIL.ImageFilter.GaussianBlur(radius=0.05))
+
+                       # Arguments to optimise the compression
+                       args.update({
+                               "subsampling" : "4:2:0",
+                               "quality"     : 70,
+                       })
 
                with io.BytesIO() as f:
                        # If writing out the image does not work with optimization,
                        # we try to write it out without any optimization.
                        try:
-                               image.save(f, image.format, optimize=True, quality=98)
+                               image.save(f, image.format, optimize=True, **args)
                        except:
-                               image.save(f, image.format, quality=98)
+                               image.save(f, image.format, **args)
 
                        return f.getvalue()