#!/usr/bin/python3
import PIL
+import PIL.ImageFilter
+import difflib
import io
import logging
import os.path
import re
+import tornado.gen
import urllib.parse
from . import misc
return Page(self.backend, res.id, data=res)
def get_page_title(self, page, default=None):
+ # Try to retrieve title from cache
+ title = self.memcache.get("wiki:title:%s" % page)
+ if title:
+ return title
+
+ # If the title has not been in the cache, we will
+ # have to look it up
doc = self.get_page(page)
if doc:
- return doc.title
+ title = doc.title
+ else:
+ title = os.path.basename(page)
- return default or os.path.basename(page)
+ # Save in cache for forever
+ self.memcache.set("wiki:title:%s" % page, title)
+
+ return title
def get_page(self, page, revision=None):
page = Page.sanitise_page_name(page)
def create_page(self, page, author, content, changes=None, address=None):
page = Page.sanitise_page_name(page)
- return self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
+ # Write page to the database
+ page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
+ # Update cache
+ self.memcache.set("wiki:title:%s" % page.path, page.title)
+
+ # Send email to all watchers
+ page._send_watcher_emails(excludes=[author])
+
+ return page
+
def delete_page(self, page, author, **kwargs):
# Do nothing if the page does not exist
if not self.get_page(page):
ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
query, query)
+ pages = []
for page in res:
# Skip any pages the user doesn't have permission for
if not page.check_acl(account):
continue
# Return any other pages
- yield page
+ pages.append(page)
- limit -= 1
- if not limit:
+ # Break when we have found enough pages
+ if limit and len(pages) >= limit:
break
+ return pages
+
def refresh(self):
"""
Needs to be called after a page has been changed
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp)
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ return self.id == other.id
+
def __lt__(self, other):
if isinstance(other, self.__class__):
if self.page == other.page:
def url(self):
return self.page
+ @property
+ def full_url(self):
+ return "https://wiki.ipfire.org%s" % self.url
+
@property
def page(self):
return self.data.page
@property
def title(self):
- return self._title or self.page[1:]
+ return self._title or os.path.basename(self.page[1:])
@property
def _title(self):
# Link images
replacements = []
- for match in re.finditer(r"!\[(.*)\]\((.*)\)", text):
+ for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", text):
alt_text, url = match.groups()
# Skip any absolute and external URLs
# Add wiki links
patterns = (
- (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None, None),
- (r"\[\[([\w\d\/\-]+)\]\]", r"/\1", r"\1", self.backend.wiki.get_page_title, r"\1"),
+ (r"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r"\1", r"\2", None, True),
+ (r"\[\[([\w\d\/\-\.]+)\]\]", r"\1", r"\1", self.backend.wiki.get_page_title, True),
+
+ # External links
+ (r"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]",
+ r"\1", r"\2", None, False),
+
+ # Mail
+ (r"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\]\]",
+ r"\1", r"\1", None, False),
)
- for pattern, link, title, repl, args in patterns:
+ for pattern, link, title, repl, internal in patterns:
replacements = []
for match in re.finditer(pattern, text):
l = match.expand(link)
t = match.expand(title)
+ if internal:
+ # Allow relative links
+ if not l.startswith("/"):
+ l = os.path.join(self.page, l)
+
+ # Normalise links
+ l = os.path.normpath(l)
+
if callable(repl):
- t = repl(match.expand(args)) or t
+ t = repl(l) or t
replacements.append((match.span(), t or l, l))
@property
def markdown(self):
- return self.data.markdown
+ return self.data.markdown or ""
@property
def html(self):
- return self.data.html or self._render(self.markdown)
+ return self._render(self.markdown)
@property
def timestamp(self):
return self.backend.wiki._get_pages("SELECT * FROM wiki \
WHERE page = %s ORDER BY timestamp DESC", self.page)
+ @lazy_property
+ def previous_revision(self):
+ return self.backend.wiki._get_page("SELECT * FROM wiki \
+ WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
+ LIMIT 1", self.page, self.timestamp)
+
@property
def changes(self):
return self.data.changes
parts.pop()
+ # Watchers
+
+ @lazy_property
+ def diff(self):
+ if self.previous_revision:
+ diff = difflib.unified_diff(
+ self.previous_revision.markdown.splitlines(),
+ self.markdown.splitlines(),
+ )
+
+ return "\n".join(diff)
+
+ @property
+ def watchers(self):
+ res = self.db.query("SELECT uid FROM wiki_watchlist \
+ WHERE page = %s", self.page)
+
+ for row in res:
+ # Search for account by UID and skip if none was found
+ account = self.backend.accounts.get_by_uid(row.uid)
+ if not account:
+ continue
+
+ # Return the account
+ yield account
+
+ def is_watched_by(self, account):
+ res = self.db.get("SELECT 1 FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ if res:
+ return True
+
+ return False
+
+ def add_watcher(self, account):
+ if self.is_watched_by(account):
+ return
+
+ self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
+ VALUES(%s, %s)", self.page, account.uid)
+
+ def remove_watcher(self, account):
+ self.db.execute("DELETE FROM wiki_watchlist \
+ WHERE page = %s AND uid = %s", self.page, account.uid)
+
+ def _send_watcher_emails(self, excludes=[]):
+ # Nothing to do if there was no previous revision
+ if not self.previous_revision:
+ return
+
+ for watcher in self.watchers:
+ # Skip everyone who is excluded
+ if watcher in excludes:
+ logging.debug("Excluding %s" % watcher)
+ continue
+
+ logging.debug("Sending watcher email to %s" % watcher)
+
+ # Compose message
+ self.backend.messages.send_template("wiki/messages/page-changed",
+ recipients=[watcher], page=self, priority=-10)
+
class File(misc.Object):
def init(self, id, data):
return thumbnail
- def _generate_thumbnail(self, size):
+ def _generate_thumbnail(self, size, **args):
image = PIL.Image.open(io.BytesIO(self.blob))
+ # Remove any alpha-channels
+ if image.format == "JPEG" and not image.mode == "RGB":
+ # Make a white background
+ background = PIL.Image.new("RGBA", image.size, (255,255,255))
+
+ # Flatten both images together
+ flattened_image = PIL.Image.alpha_composite(background, image)
+
+ # Remove the alpha channel
+ image = flattened_image.convert("RGB")
+
# Resize the image to the desired resolution
- image.thumbnail((size, size), PIL.Image.ANTIALIAS)
+ image.thumbnail((size, size), PIL.Image.LANCZOS)
+
+ if image.format == "JPEG":
+ # Apply a gaussian blur to make compression easier
+ image = image.filter(PIL.ImageFilter.GaussianBlur(radius=0.05))
+
+ # Arguments to optimise the compression
+ args.update({
+ "subsampling" : "4:2:0",
+ "quality" : 70,
+ })
with io.BytesIO() as f:
# If writing out the image does not work with optimization,
# we try to write it out without any optimization.
try:
- image.save(f, image.format, optimize=True, quality=98)
+ image.save(f, image.format, optimize=True, **args)
except:
- image.save(f, image.format, quality=98)
+ image.save(f, image.format, **args)
return f.getvalue()