]> git.ipfire.org Git - ipfire.org.git/blob - src/backend/wiki.py
wiki: Store title of pages in memcache
[ipfire.org.git] / src / backend / wiki.py
1 #!/usr/bin/python3
2
3 import PIL
4 import PIL.ImageFilter
5 import difflib
6 import io
7 import logging
8 import os.path
9 import re
10 import tornado.gen
11 import urllib.parse
12
13 from . import misc
14 from . import util
15 from .decorators import *
16
17 class Wiki(misc.Object):
18 def _get_pages(self, query, *args):
19 res = self.db.query(query, *args)
20
21 for row in res:
22 yield Page(self.backend, row.id, data=row)
23
24 def _get_page(self, query, *args):
25 res = self.db.get(query, *args)
26
27 if res:
28 return Page(self.backend, res.id, data=res)
29
30 def get_page_title(self, page, default=None):
31 # Try to retrieve title from cache
32 title = self.memcache.get("wiki:title:%s" % page)
33 if title:
34 return title
35
36 # If the title has not been in the cache, we will
37 # have to look it up
38 doc = self.get_page(page)
39 if doc:
40 title = doc.title
41 else:
42 title = os.path.basename(page)
43
44 # Save in cache for forever
45 self.memcache.set("wiki:title:%s" % page, title)
46
47 return title
48
49 def get_page(self, page, revision=None):
50 page = Page.sanitise_page_name(page)
51 assert page
52
53 if revision:
54 return self._get_page("SELECT * FROM wiki WHERE page = %s \
55 AND timestamp = %s", page, revision)
56 else:
57 return self._get_page("SELECT * FROM wiki WHERE page = %s \
58 ORDER BY timestamp DESC LIMIT 1", page)
59
60 def get_recent_changes(self, account, limit=None):
61 pages = self._get_pages("SELECT * FROM wiki \
62 WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
63 ORDER BY timestamp DESC")
64
65 for page in pages:
66 if not page.check_acl(account):
67 continue
68
69 yield page
70
71 limit -= 1
72 if not limit:
73 break
74
75 def create_page(self, page, author, content, changes=None, address=None):
76 page = Page.sanitise_page_name(page)
77
78 # Write page to the database
79 page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
80 VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
81
82 # Update cache
83 self.memcache.set("wiki:title:%s" % page.path, page.title)
84
85 # Send email to all watchers
86 page._send_watcher_emails(excludes=[author])
87
88 return page
89
90 def delete_page(self, page, author, **kwargs):
91 # Do nothing if the page does not exist
92 if not self.get_page(page):
93 return
94
95 # Just creates a blank last version of the page
96 self.create_page(page, author=author, content=None, **kwargs)
97
98 def make_breadcrumbs(self, url):
99 # Split and strip all empty elements (double slashes)
100 parts = list(e for e in url.split("/") if e)
101
102 ret = []
103 for part in ("/".join(parts[:i]) for i in range(1, len(parts))):
104 ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part))))
105
106 return ret
107
108 def search(self, query, account=None, limit=None):
109 query = util.parse_search_query(query)
110
111 res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
112 LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
113 WHERE search_index.document @@ to_tsquery('english', %s) \
114 ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
115 query, query)
116
117 pages = []
118 for page in res:
119 # Skip any pages the user doesn't have permission for
120 if not page.check_acl(account):
121 continue
122
123 # Return any other pages
124 pages.append(page)
125
126 # Break when we have found enough pages
127 if limit and len(pages) >= limit:
128 break
129
130 return pages
131
132 def refresh(self):
133 """
134 Needs to be called after a page has been changed
135 """
136 self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
137
138 # ACL
139
140 def check_acl(self, page, account):
141 res = self.db.query("SELECT * FROM wiki_acls \
142 WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page)
143
144 for row in res:
145 # Access not permitted when user is not logged in
146 if not account:
147 return False
148
149 # If user is in a matching group, we grant permission
150 for group in row.groups:
151 if group in account.groups:
152 return True
153
154 # Otherwise access is not permitted
155 return False
156
157 # If no ACLs are found, we permit access
158 return True
159
160 # Files
161
162 def _get_files(self, query, *args):
163 res = self.db.query(query, *args)
164
165 for row in res:
166 yield File(self.backend, row.id, data=row)
167
168 def _get_file(self, query, *args):
169 res = self.db.get(query, *args)
170
171 if res:
172 return File(self.backend, res.id, data=res)
173
174 def get_files(self, path):
175 files = self._get_files("SELECT * FROM wiki_files \
176 WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path)
177
178 return list(files)
179
180 def get_file_by_path(self, path):
181 path, filename = os.path.dirname(path), os.path.basename(path)
182
183 return self._get_file("SELECT * FROM wiki_files \
184 WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
185
186 def upload(self, path, filename, data, mimetype, author, address):
187 # Upload the blob first
188 blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
189
190 # Create entry for file
191 return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
192 mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
193 filename, author.uid, address, mimetype, blob.id, len(data))
194
195 def find_image(self, path, filename):
196 for p in (path, os.path.dirname(path)):
197 file = self.get_file_by_path(os.path.join(p, filename))
198
199 if file and file.is_image():
200 return file
201
202
203 class Page(misc.Object):
204 def init(self, id, data=None):
205 self.id = id
206 self.data = data
207
208 def __repr__(self):
209 return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp)
210
211 def __eq__(self, other):
212 if isinstance(other, self.__class__):
213 return self.id == other.id
214
215 def __lt__(self, other):
216 if isinstance(other, self.__class__):
217 if self.page == other.page:
218 return self.timestamp < other.timestamp
219
220 return self.page < other.page
221
222 @staticmethod
223 def sanitise_page_name(page):
224 if not page:
225 return "/"
226
227 # Make sure that the page name does NOT end with a /
228 if page.endswith("/"):
229 page = page[:-1]
230
231 # Make sure the page name starts with a /
232 if not page.startswith("/"):
233 page = "/%s" % page
234
235 # Remove any double slashes
236 page = page.replace("//", "/")
237
238 return page
239
240 @property
241 def url(self):
242 return self.page
243
244 @property
245 def full_url(self):
246 return "https://wiki.ipfire.org%s" % self.url
247
248 @property
249 def page(self):
250 return self.data.page
251
252 @property
253 def title(self):
254 return self._title or os.path.basename(self.page[1:])
255
256 @property
257 def _title(self):
258 if not self.markdown:
259 return
260
261 # Find first H1 headline in markdown
262 markdown = self.markdown.splitlines()
263
264 m = re.match(r"^# (.*)( #)?$", markdown[0])
265 if m:
266 return m.group(1)
267
268 @lazy_property
269 def author(self):
270 if self.data.author_uid:
271 return self.backend.accounts.get_by_uid(self.data.author_uid)
272
273 def _render(self, text):
274 logging.debug("Rendering %s" % self)
275
276 # Link images
277 replacements = []
278 for match in re.finditer(r"!\[(.*?)\]\((.*?)\)", text):
279 alt_text, url = match.groups()
280
281 # Skip any absolute and external URLs
282 if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
283 continue
284
285 # Try to split query string
286 url, delimiter, qs = url.partition("?")
287
288 # Parse query arguments
289 args = urllib.parse.parse_qs(qs)
290
291 # Find image
292 file = self.backend.wiki.find_image(self.page, url)
293 if not file:
294 continue
295
296 # Scale down the image if not already done
297 if not "s" in args:
298 args["s"] = "768"
299
300 # Format URL
301 url = "%s?%s" % (file.url, urllib.parse.urlencode(args))
302
303 replacements.append((match.span(), file, alt_text, url))
304
305 # Apply all replacements
306 for (start, end), file, alt_text, url in reversed(replacements):
307 text = text[:start] + "[![%s](%s)](%s?action=detail)" % (alt_text, url, file.url) + text[end:]
308
309 # Add wiki links
310 patterns = (
311 (r"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r"\1", r"\2", None, True),
312 (r"\[\[([\w\d\/\-\.]+)\]\]", r"\1", r"\1", self.backend.wiki.get_page_title, True),
313
314 # External links
315 (r"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]",
316 r"\1", r"\2", None, False),
317
318 # Mail
319 (r"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\]\]",
320 r"\1", r"\1", None, False),
321 )
322
323 for pattern, link, title, repl, internal in patterns:
324 replacements = []
325
326 for match in re.finditer(pattern, text):
327 l = match.expand(link)
328 t = match.expand(title)
329
330 if internal:
331 # Allow relative links
332 if not l.startswith("/"):
333 l = os.path.join(self.page, l)
334
335 # Normalise links
336 l = os.path.normpath(l)
337
338 if callable(repl):
339 t = repl(l) or t
340
341 replacements.append((match.span(), t or l, l))
342
343 # Apply all replacements
344 for (start, end), t, l in reversed(replacements):
345 text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
346
347 # Borrow this from the blog
348 return self.backend.blog._render_text(text, lang="markdown")
349
350 @property
351 def markdown(self):
352 return self.data.markdown or ""
353
354 @property
355 def html(self):
356 return self._render(self.markdown)
357
358 @property
359 def timestamp(self):
360 return self.data.timestamp
361
362 def was_deleted(self):
363 return self.markdown is None
364
365 @lazy_property
366 def breadcrumbs(self):
367 return self.backend.wiki.make_breadcrumbs(self.page)
368
369 def get_latest_revision(self):
370 revisions = self.get_revisions()
371
372 # Return first object
373 for rev in revisions:
374 return rev
375
376 def get_revisions(self):
377 return self.backend.wiki._get_pages("SELECT * FROM wiki \
378 WHERE page = %s ORDER BY timestamp DESC", self.page)
379
380 @lazy_property
381 def previous_revision(self):
382 return self.backend.wiki._get_page("SELECT * FROM wiki \
383 WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
384 LIMIT 1", self.page, self.timestamp)
385
386 @property
387 def changes(self):
388 return self.data.changes
389
390 # ACL
391
392 def check_acl(self, account):
393 return self.backend.wiki.check_acl(self.page, account)
394
395 # Sidebar
396
397 @lazy_property
398 def sidebar(self):
399 parts = self.page.split("/")
400
401 while parts:
402 sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
403 if sidebar:
404 return sidebar
405
406 parts.pop()
407
408 # Watchers
409
410 @lazy_property
411 def diff(self):
412 if self.previous_revision:
413 diff = difflib.unified_diff(
414 self.previous_revision.markdown.splitlines(),
415 self.markdown.splitlines(),
416 )
417
418 return "\n".join(diff)
419
420 @property
421 def watchers(self):
422 res = self.db.query("SELECT uid FROM wiki_watchlist \
423 WHERE page = %s", self.page)
424
425 for row in res:
426 # Search for account by UID and skip if none was found
427 account = self.backend.accounts.get_by_uid(row.uid)
428 if not account:
429 continue
430
431 # Return the account
432 yield account
433
434 def is_watched_by(self, account):
435 res = self.db.get("SELECT 1 FROM wiki_watchlist \
436 WHERE page = %s AND uid = %s", self.page, account.uid)
437
438 if res:
439 return True
440
441 return False
442
443 def add_watcher(self, account):
444 if self.is_watched_by(account):
445 return
446
447 self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
448 VALUES(%s, %s)", self.page, account.uid)
449
450 def remove_watcher(self, account):
451 self.db.execute("DELETE FROM wiki_watchlist \
452 WHERE page = %s AND uid = %s", self.page, account.uid)
453
454 def _send_watcher_emails(self, excludes=[]):
455 # Nothing to do if there was no previous revision
456 if not self.previous_revision:
457 return
458
459 for watcher in self.watchers:
460 # Skip everyone who is excluded
461 if watcher in excludes:
462 logging.debug("Excluding %s" % watcher)
463 continue
464
465 logging.debug("Sending watcher email to %s" % watcher)
466
467 # Compose message
468 self.backend.messages.send_template("wiki/messages/page-changed",
469 recipients=[watcher], page=self, priority=-10)
470
471
472 class File(misc.Object):
473 def init(self, id, data):
474 self.id = id
475 self.data = data
476
477 @property
478 def url(self):
479 return os.path.join(self.path, self.filename)
480
481 @property
482 def path(self):
483 return self.data.path
484
485 @property
486 def filename(self):
487 return self.data.filename
488
489 @property
490 def mimetype(self):
491 return self.data.mimetype
492
493 @property
494 def size(self):
495 return self.data.size
496
497 @lazy_property
498 def author(self):
499 if self.data.author_uid:
500 return self.backend.accounts.get_by_uid(self.data.author_uid)
501
502 @property
503 def created_at(self):
504 return self.data.created_at
505
506 def is_pdf(self):
507 return self.mimetype in ("application/pdf", "application/x-pdf")
508
509 def is_image(self):
510 return self.mimetype.startswith("image/")
511
512 @lazy_property
513 def blob(self):
514 res = self.db.get("SELECT data FROM wiki_blobs \
515 WHERE id = %s", self.data.blob_id)
516
517 if res:
518 return bytes(res.data)
519
520 def get_thumbnail(self, size):
521 cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
522
523 # Try to fetch the data from the cache
524 thumbnail = self.memcache.get(cache_key)
525 if thumbnail:
526 return thumbnail
527
528 # Generate the thumbnail
529 thumbnail = self._generate_thumbnail(size)
530
531 # Put it into the cache for forever
532 self.memcache.set(cache_key, thumbnail)
533
534 return thumbnail
535
536 def _generate_thumbnail(self, size, **args):
537 image = PIL.Image.open(io.BytesIO(self.blob))
538
539 # Remove any alpha-channels
540 if image.format == "JPEG" and not image.mode == "RGB":
541 # Make a white background
542 background = PIL.Image.new("RGBA", image.size, (255,255,255))
543
544 # Flatten both images together
545 flattened_image = PIL.Image.alpha_composite(background, image)
546
547 # Remove the alpha channel
548 image = flattened_image.convert("RGB")
549
550 # Resize the image to the desired resolution
551 image.thumbnail((size, size), PIL.Image.LANCZOS)
552
553 if image.format == "JPEG":
554 # Apply a gaussian blur to make compression easier
555 image = image.filter(PIL.ImageFilter.GaussianBlur(radius=0.05))
556
557 # Arguments to optimise the compression
558 args.update({
559 "subsampling" : "4:2:0",
560 "quality" : 70,
561 })
562
563 with io.BytesIO() as f:
564 # If writing out the image does not work with optimization,
565 # we try to write it out without any optimization.
566 try:
567 image.save(f, image.format, optimize=True, **args)
568 except:
569 image.save(f, image.format, **args)
570
571 return f.getvalue()