]> git.ipfire.org Git - ipfire.org.git/blob - src/backend/wiki.py
wiki: Always render live
[ipfire.org.git] / src / backend / wiki.py
1 #!/usr/bin/python3
2
3 import PIL
4 import difflib
5 import io
6 import logging
7 import os.path
8 import re
9 import tornado.gen
10 import urllib.parse
11
12 from . import misc
13 from . import util
14 from .decorators import *
15
16 class Wiki(misc.Object):
17 def _get_pages(self, query, *args):
18 res = self.db.query(query, *args)
19
20 for row in res:
21 yield Page(self.backend, row.id, data=row)
22
23 def _get_page(self, query, *args):
24 res = self.db.get(query, *args)
25
26 if res:
27 return Page(self.backend, res.id, data=res)
28
29 def get_page_title(self, page, default=None):
30 doc = self.get_page(page)
31 if doc:
32 return doc.title
33
34 return default or os.path.basename(page)
35
36 def get_page(self, page, revision=None):
37 page = Page.sanitise_page_name(page)
38 assert page
39
40 if revision:
41 return self._get_page("SELECT * FROM wiki WHERE page = %s \
42 AND timestamp = %s", page, revision)
43 else:
44 return self._get_page("SELECT * FROM wiki WHERE page = %s \
45 ORDER BY timestamp DESC LIMIT 1", page)
46
47 def get_recent_changes(self, account, limit=None):
48 pages = self._get_pages("SELECT * FROM wiki \
49 WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
50 ORDER BY timestamp DESC")
51
52 for page in pages:
53 if not page.check_acl(account):
54 continue
55
56 yield page
57
58 limit -= 1
59 if not limit:
60 break
61
62 def create_page(self, page, author, content, changes=None, address=None):
63 page = Page.sanitise_page_name(page)
64
65 # Write page to the database
66 page = self._get_page("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
67 VALUES(%s, %s, %s, %s, %s) RETURNING *", page, author.uid, content or None, changes, address)
68
69 # Send email to all watchers
70 page._send_watcher_emails(excludes=[author])
71
72 return page
73
74 def delete_page(self, page, author, **kwargs):
75 # Do nothing if the page does not exist
76 if not self.get_page(page):
77 return
78
79 # Just creates a blank last version of the page
80 self.create_page(page, author=author, content=None, **kwargs)
81
82 def make_breadcrumbs(self, url):
83 # Split and strip all empty elements (double slashes)
84 parts = list(e for e in url.split("/") if e)
85
86 ret = []
87 for part in ("/".join(parts[:i]) for i in range(1, len(parts))):
88 ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part))))
89
90 return ret
91
92 def search(self, query, account=None, limit=None):
93 query = util.parse_search_query(query)
94
95 res = self._get_pages("SELECT wiki.* FROM wiki_search_index search_index \
96 LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
97 WHERE search_index.document @@ to_tsquery('english', %s) \
98 ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
99 query, query)
100
101 for page in res:
102 # Skip any pages the user doesn't have permission for
103 if not page.check_acl(account):
104 continue
105
106 # Return any other pages
107 yield page
108
109 limit -= 1
110 if not limit:
111 break
112
113 def refresh(self):
114 """
115 Needs to be called after a page has been changed
116 """
117 self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
118
119 # ACL
120
121 def check_acl(self, page, account):
122 res = self.db.query("SELECT * FROM wiki_acls \
123 WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page)
124
125 for row in res:
126 # Access not permitted when user is not logged in
127 if not account:
128 return False
129
130 # If user is in a matching group, we grant permission
131 for group in row.groups:
132 if group in account.groups:
133 return True
134
135 # Otherwise access is not permitted
136 return False
137
138 # If no ACLs are found, we permit access
139 return True
140
141 # Files
142
143 def _get_files(self, query, *args):
144 res = self.db.query(query, *args)
145
146 for row in res:
147 yield File(self.backend, row.id, data=row)
148
149 def _get_file(self, query, *args):
150 res = self.db.get(query, *args)
151
152 if res:
153 return File(self.backend, res.id, data=res)
154
155 def get_files(self, path):
156 files = self._get_files("SELECT * FROM wiki_files \
157 WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path)
158
159 return list(files)
160
161 def get_file_by_path(self, path):
162 path, filename = os.path.dirname(path), os.path.basename(path)
163
164 return self._get_file("SELECT * FROM wiki_files \
165 WHERE path = %s AND filename = %s AND deleted_at IS NULL", path, filename)
166
167 def upload(self, path, filename, data, mimetype, author, address):
168 # Upload the blob first
169 blob = self.db.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data)
170
171 # Create entry for file
172 return self._get_file("INSERT INTO wiki_files(path, filename, author_uid, address, \
173 mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path,
174 filename, author.uid, address, mimetype, blob.id, len(data))
175
176 def find_image(self, path, filename):
177 for p in (path, os.path.dirname(path)):
178 file = self.get_file_by_path(os.path.join(p, filename))
179
180 if file and file.is_image():
181 return file
182
183
184 class Page(misc.Object):
185 def init(self, id, data=None):
186 self.id = id
187 self.data = data
188
189 def __repr__(self):
190 return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp)
191
192 def __eq__(self, other):
193 if isinstance(other, self.__class__):
194 return self.id == other.id
195
196 def __lt__(self, other):
197 if isinstance(other, self.__class__):
198 if self.page == other.page:
199 return self.timestamp < other.timestamp
200
201 return self.page < other.page
202
203 @staticmethod
204 def sanitise_page_name(page):
205 if not page:
206 return "/"
207
208 # Make sure that the page name does NOT end with a /
209 if page.endswith("/"):
210 page = page[:-1]
211
212 # Make sure the page name starts with a /
213 if not page.startswith("/"):
214 page = "/%s" % page
215
216 # Remove any double slashes
217 page = page.replace("//", "/")
218
219 return page
220
221 @property
222 def url(self):
223 return self.page
224
225 @property
226 def full_url(self):
227 return "https://wiki.ipfire.org%s" % self.url
228
229 @property
230 def page(self):
231 return self.data.page
232
233 @property
234 def title(self):
235 return self._title or os.path.basename(self.page[1:])
236
237 @property
238 def _title(self):
239 if not self.markdown:
240 return
241
242 # Find first H1 headline in markdown
243 markdown = self.markdown.splitlines()
244
245 m = re.match(r"^# (.*)( #)?$", markdown[0])
246 if m:
247 return m.group(1)
248
249 @lazy_property
250 def author(self):
251 if self.data.author_uid:
252 return self.backend.accounts.get_by_uid(self.data.author_uid)
253
254 def _render(self, text):
255 logging.debug("Rendering %s" % self)
256
257 # Link images
258 replacements = []
259 for match in re.finditer(r"!\[(.*)\]\((.*)\)", text):
260 alt_text, url = match.groups()
261
262 # Skip any absolute and external URLs
263 if url.startswith("/") or url.startswith("https://") or url.startswith("http://"):
264 continue
265
266 # Try to split query string
267 url, delimiter, qs = url.partition("?")
268
269 # Parse query arguments
270 args = urllib.parse.parse_qs(qs)
271
272 # Find image
273 file = self.backend.wiki.find_image(self.page, url)
274 if not file:
275 continue
276
277 # Scale down the image if not already done
278 if not "s" in args:
279 args["s"] = "768"
280
281 # Format URL
282 url = "%s?%s" % (file.url, urllib.parse.urlencode(args))
283
284 replacements.append((match.span(), file, alt_text, url))
285
286 # Apply all replacements
287 for (start, end), file, alt_text, url in reversed(replacements):
288 text = text[:start] + "[![%s](%s)](%s?action=detail)" % (alt_text, url, file.url) + text[end:]
289
290 # Add wiki links
291 patterns = (
292 (r"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r"/\1", r"\2", None),
293 (r"\[\[([\w\d\/\-\.]+)\]\]", r"\1", r"\1", self.backend.wiki.get_page_title),
294 )
295
296 for pattern, link, title, repl in patterns:
297 replacements = []
298
299 for match in re.finditer(pattern, text):
300 l = match.expand(link)
301 t = match.expand(title)
302
303 # Allow relative links
304 if not l.startswith("/"):
305 l = os.path.join(self.page, l)
306
307 # Normalise links
308 l = os.path.normpath(l)
309
310 if callable(repl):
311 t = repl(l) or t
312
313 replacements.append((match.span(), t or l, l))
314
315 # Apply all replacements
316 for (start, end), t, l in reversed(replacements):
317 text = text[:start] + "[%s](%s)" % (t, l) + text[end:]
318
319 # Borrow this from the blog
320 return self.backend.blog._render_text(text, lang="markdown")
321
322 @property
323 def markdown(self):
324 return self.data.markdown or ""
325
326 @property
327 def html(self):
328 return self._render(self.markdown)
329
330 @property
331 def timestamp(self):
332 return self.data.timestamp
333
334 def was_deleted(self):
335 return self.markdown is None
336
337 @lazy_property
338 def breadcrumbs(self):
339 return self.backend.wiki.make_breadcrumbs(self.page)
340
341 def get_latest_revision(self):
342 revisions = self.get_revisions()
343
344 # Return first object
345 for rev in revisions:
346 return rev
347
348 def get_revisions(self):
349 return self.backend.wiki._get_pages("SELECT * FROM wiki \
350 WHERE page = %s ORDER BY timestamp DESC", self.page)
351
352 @lazy_property
353 def previous_revision(self):
354 return self.backend.wiki._get_page("SELECT * FROM wiki \
355 WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
356 LIMIT 1", self.page, self.timestamp)
357
358 @property
359 def changes(self):
360 return self.data.changes
361
362 # ACL
363
364 def check_acl(self, account):
365 return self.backend.wiki.check_acl(self.page, account)
366
367 # Sidebar
368
369 @lazy_property
370 def sidebar(self):
371 parts = self.page.split("/")
372
373 while parts:
374 sidebar = self.backend.wiki.get_page("%s/sidebar" % os.path.join(*parts))
375 if sidebar:
376 return sidebar
377
378 parts.pop()
379
380 # Watchers
381
382 @lazy_property
383 def diff(self):
384 if self.previous_revision:
385 diff = difflib.unified_diff(
386 self.previous_revision.markdown.splitlines(),
387 self.markdown.splitlines(),
388 )
389
390 return "\n".join(diff)
391
392 @property
393 def watchers(self):
394 res = self.db.query("SELECT uid FROM wiki_watchlist \
395 WHERE page = %s", self.page)
396
397 for row in res:
398 # Search for account by UID and skip if none was found
399 account = self.backend.accounts.get_by_uid(row.uid)
400 if not account:
401 continue
402
403 # Return the account
404 yield account
405
406 def is_watched_by(self, account):
407 res = self.db.get("SELECT 1 FROM wiki_watchlist \
408 WHERE page = %s AND uid = %s", self.page, account.uid)
409
410 if res:
411 return True
412
413 return False
414
415 def add_watcher(self, account):
416 if self.is_watched_by(account):
417 return
418
419 self.db.execute("INSERT INTO wiki_watchlist(page, uid) \
420 VALUES(%s, %s)", self.page, account.uid)
421
422 def remove_watcher(self, account):
423 self.db.execute("DELETE FROM wiki_watchlist \
424 WHERE page = %s AND uid = %s", self.page, account.uid)
425
426 def _send_watcher_emails(self, excludes=[]):
427 # Nothing to do if there was no previous revision
428 if not self.previous_revision:
429 return
430
431 for watcher in self.watchers:
432 # Skip everyone who is excluded
433 if watcher in excludes:
434 logging.debug("Excluding %s" % watcher)
435 continue
436
437 logging.debug("Sending watcher email to %s" % watcher)
438
439 # Compose message
440 self.backend.messages.send_template("wiki/messages/page-changed",
441 recipients=[watcher], page=self, priority=-10)
442
443
444 class File(misc.Object):
445 def init(self, id, data):
446 self.id = id
447 self.data = data
448
449 @property
450 def url(self):
451 return os.path.join(self.path, self.filename)
452
453 @property
454 def path(self):
455 return self.data.path
456
457 @property
458 def filename(self):
459 return self.data.filename
460
461 @property
462 def mimetype(self):
463 return self.data.mimetype
464
465 @property
466 def size(self):
467 return self.data.size
468
469 @lazy_property
470 def author(self):
471 if self.data.author_uid:
472 return self.backend.accounts.get_by_uid(self.data.author_uid)
473
474 @property
475 def created_at(self):
476 return self.data.created_at
477
478 def is_pdf(self):
479 return self.mimetype in ("application/pdf", "application/x-pdf")
480
481 def is_image(self):
482 return self.mimetype.startswith("image/")
483
484 @lazy_property
485 def blob(self):
486 res = self.db.get("SELECT data FROM wiki_blobs \
487 WHERE id = %s", self.data.blob_id)
488
489 if res:
490 return bytes(res.data)
491
492 def get_thumbnail(self, size):
493 cache_key = "-".join((self.path, util.normalize(self.filename), self.created_at.isoformat(), "%spx" % size))
494
495 # Try to fetch the data from the cache
496 thumbnail = self.memcache.get(cache_key)
497 if thumbnail:
498 return thumbnail
499
500 # Generate the thumbnail
501 thumbnail = self._generate_thumbnail(size)
502
503 # Put it into the cache for forever
504 self.memcache.set(cache_key, thumbnail)
505
506 return thumbnail
507
508 def _generate_thumbnail(self, size):
509 image = PIL.Image.open(io.BytesIO(self.blob))
510
511 # Resize the image to the desired resolution
512 image.thumbnail((size, size), PIL.Image.ANTIALIAS)
513
514 with io.BytesIO() as f:
515 # If writing out the image does not work with optimization,
516 # we try to write it out without any optimization.
517 try:
518 image.save(f, image.format, optimize=True, quality=98)
519 except:
520 image.save(f, image.format, quality=98)
521
522 return f.getvalue()