]>
Commit | Line | Data |
---|---|---|
181d08f3 MT |
1 | #!/usr/bin/python3 |
2 | ||
4ed1dadb | 3 | import difflib |
ef963ecb | 4 | import hashlib |
181d08f3 | 5 | import logging |
245a2e36 MT |
6 | import markdown |
7 | import markdown.extensions | |
8 | import markdown.preprocessors | |
6ac7e934 | 9 | import os.path |
181d08f3 | 10 | import re |
9e90e800 | 11 | import urllib.parse |
181d08f3 MT |
12 | |
13 | from . import misc | |
9523790a | 14 | from . import util |
181d08f3 MT |
15 | from .decorators import * |
16 | ||
181d08f3 MT |
17 | class Wiki(misc.Object): |
18 | def _get_pages(self, query, *args): | |
19 | res = self.db.query(query, *args) | |
20 | ||
21 | for row in res: | |
22 | yield Page(self.backend, row.id, data=row) | |
23 | ||
d398ca08 MT |
24 | def _get_page(self, query, *args): |
25 | res = self.db.get(query, *args) | |
26 | ||
27 | if res: | |
28 | return Page(self.backend, res.id, data=res) | |
29 | ||
86368c12 | 30 | def __iter__(self): |
53333caa MT |
31 | return self._get_pages(""" |
32 | SELECT | |
33 | wiki.* | |
34 | FROM | |
35 | wiki_current current | |
36 | LEFT JOIN | |
37 | wiki ON current.id = wiki.id | |
38 | WHERE | |
39 | current.deleted IS FALSE | |
40 | ORDER BY page | |
41 | """, | |
86368c12 MT |
42 | ) |
43 | ||
c78ad26e MT |
44 | def make_path(self, page, path): |
45 | # Nothing to do for absolute links | |
46 | if path.startswith("/"): | |
47 | pass | |
48 | ||
49 | # Relative links (one-level down) | |
50 | elif path.startswith("./"): | |
51 | path = os.path.join(page, path) | |
52 | ||
53 | # All other relative links | |
54 | else: | |
55 | p = os.path.dirname(page) | |
56 | path = os.path.join(p, path) | |
57 | ||
58 | # Normalise links | |
59 | return os.path.normpath(path) | |
60 | ||
9ff59d70 MT |
61 | def page_exists(self, path): |
62 | page = self.get_page(path) | |
63 | ||
64 | # Page must have been found and not deleted | |
65 | return page and not page.was_deleted() | |
66 | ||
6ac7e934 MT |
67 | def get_page_title(self, page, default=None): |
68 | doc = self.get_page(page) | |
69 | if doc: | |
50c8dc11 MT |
70 | title = doc.title |
71 | else: | |
72 | title = os.path.basename(page) | |
6ac7e934 | 73 | |
50c8dc11 | 74 | return title |
6ac7e934 | 75 | |
181d08f3 MT |
76 | def get_page(self, page, revision=None): |
77 | page = Page.sanitise_page_name(page) | |
947224b4 MT |
78 | |
79 | # Split the path into parts | |
80 | parts = page.split("/") | |
81 | ||
82 | # Check if this is an action | |
83 | if any((part.startswith("_") for part in parts)): | |
84 | return | |
181d08f3 MT |
85 | |
86 | if revision: | |
d398ca08 | 87 | return self._get_page("SELECT * FROM wiki WHERE page = %s \ |
181d08f3 MT |
88 | AND timestamp = %s", page, revision) |
89 | else: | |
d398ca08 | 90 | return self._get_page("SELECT * FROM wiki WHERE page = %s \ |
181d08f3 MT |
91 | ORDER BY timestamp DESC LIMIT 1", page) |
92 | ||
11afe905 MT |
93 | def get_recent_changes(self, account, limit=None): |
94 | pages = self._get_pages("SELECT * FROM wiki \ | |
11afe905 MT |
95 | ORDER BY timestamp DESC") |
96 | ||
97 | for page in pages: | |
98 | if not page.check_acl(account): | |
99 | continue | |
100 | ||
101 | yield page | |
102 | ||
103 | limit -= 1 | |
104 | if not limit: | |
105 | break | |
181d08f3 | 106 | |
495e9dc4 | 107 | def create_page(self, page, author, content, changes=None, address=None): |
181d08f3 MT |
108 | page = Page.sanitise_page_name(page) |
109 | ||
aba5e58a | 110 | # Write page to the database |
53333caa MT |
111 | page = self._get_page(""" |
112 | INSERT INTO | |
113 | wiki | |
114 | ( | |
115 | page, | |
116 | author_uid, | |
117 | markdown, | |
118 | changes, | |
119 | address | |
120 | ) VALUES ( | |
121 | %s, %s, %s, %s, %s | |
122 | ) | |
123 | RETURNING * | |
124 | """, page, author.uid, content or None, changes, address, | |
125 | ) | |
181d08f3 | 126 | |
2ab2c753 MT |
127 | # Store any linked files |
128 | page._store_linked_files() | |
129 | ||
aba5e58a MT |
130 | # Send email to all watchers |
131 | page._send_watcher_emails(excludes=[author]) | |
132 | ||
133 | return page | |
134 | ||
495e9dc4 | 135 | def delete_page(self, page, author, **kwargs): |
181d08f3 MT |
136 | # Do nothing if the page does not exist |
137 | if not self.get_page(page): | |
138 | return | |
139 | ||
140 | # Just creates a blank last version of the page | |
495e9dc4 | 141 | self.create_page(page, author=author, content=None, **kwargs) |
181d08f3 | 142 | |
3168788e MT |
143 | def make_breadcrumbs(self, url): |
144 | # Split and strip all empty elements (double slashes) | |
181d08f3 MT |
145 | parts = list(e for e in url.split("/") if e) |
146 | ||
3168788e | 147 | ret = [] |
b1bf7d48 | 148 | for part in ("/".join(parts[:i]) for i in range(1, len(parts))): |
3168788e | 149 | ret.append(("/%s" % part, self.get_page_title(part, os.path.basename(part)))) |
181d08f3 | 150 | |
3168788e | 151 | return ret |
181d08f3 | 152 | |
11afe905 | 153 | def search(self, query, account=None, limit=None): |
53333caa MT |
154 | res = self._get_pages(""" |
155 | SELECT | |
156 | wiki.* | |
157 | FROM | |
158 | wiki_search_index search_index | |
159 | LEFT JOIN | |
160 | wiki ON search_index.wiki_id = wiki.id | |
161 | WHERE | |
162 | search_index.document @@ websearch_to_tsquery('english', %s) | |
163 | ORDER BY | |
164 | ts_rank(search_index.document, websearch_to_tsquery('english', %s)) DESC | |
165 | """, query, query, | |
166 | ) | |
9523790a | 167 | |
df80be2c | 168 | pages = [] |
11afe905 MT |
169 | for page in res: |
170 | # Skip any pages the user doesn't have permission for | |
171 | if not page.check_acl(account): | |
172 | continue | |
173 | ||
174 | # Return any other pages | |
df80be2c | 175 | pages.append(page) |
11afe905 | 176 | |
df80be2c MT |
177 | # Break when we have found enough pages |
178 | if limit and len(pages) >= limit: | |
11afe905 | 179 | break |
9523790a | 180 | |
df80be2c MT |
181 | return pages |
182 | ||
9523790a MT |
183 | def refresh(self): |
184 | """ | |
185 | Needs to be called after a page has been changed | |
186 | """ | |
187 | self.db.execute("REFRESH MATERIALIZED VIEW wiki_search_index") | |
188 | ||
2f23c558 | 189 | def get_watchlist(self, account): |
e1d2efef MT |
190 | pages = self._get_pages(""" |
191 | WITH pages AS ( | |
192 | SELECT | |
193 | * | |
194 | FROM | |
195 | wiki_current | |
196 | LEFT JOIN | |
197 | wiki ON wiki_current.id = wiki.id | |
198 | ) | |
199 | ||
200 | SELECT | |
201 | * | |
202 | FROM | |
203 | wiki_watchlist watchlist | |
204 | JOIN | |
205 | pages ON watchlist.page = pages.page | |
206 | WHERE | |
207 | watchlist.uid = %s | |
208 | """, account.uid, | |
2f23c558 MT |
209 | ) |
210 | ||
211 | return sorted(pages) | |
212 | ||
11afe905 MT |
213 | # ACL |
214 | ||
215 | def check_acl(self, page, account): | |
53333caa MT |
216 | res = self.db.query(""" |
217 | SELECT | |
218 | * | |
219 | FROM | |
220 | wiki_acls | |
221 | WHERE | |
222 | %s ILIKE (path || '%%') | |
223 | ORDER BY | |
224 | LENGTH(path) DESC | |
225 | LIMIT 1 | |
226 | """, page, | |
227 | ) | |
11afe905 MT |
228 | |
229 | for row in res: | |
230 | # Access not permitted when user is not logged in | |
231 | if not account: | |
232 | return False | |
233 | ||
234 | # If user is in a matching group, we grant permission | |
235 | for group in row.groups: | |
93402e56 | 236 | if account.is_member_of_group(group): |
11afe905 MT |
237 | return True |
238 | ||
239 | # Otherwise access is not permitted | |
240 | return False | |
241 | ||
242 | # If no ACLs are found, we permit access | |
243 | return True | |
244 | ||
f2cfd873 MT |
245 | # Files |
246 | ||
247 | def _get_files(self, query, *args): | |
248 | res = self.db.query(query, *args) | |
249 | ||
250 | for row in res: | |
251 | yield File(self.backend, row.id, data=row) | |
252 | ||
253 | def _get_file(self, query, *args): | |
254 | res = self.db.get(query, *args) | |
255 | ||
256 | if res: | |
257 | return File(self.backend, res.id, data=res) | |
258 | ||
259 | def get_files(self, path): | |
53333caa MT |
260 | files = self._get_files(""" |
261 | SELECT | |
262 | * | |
263 | FROM | |
264 | wiki_files | |
265 | WHERE | |
266 | path = %s | |
267 | AND | |
268 | deleted_at IS NULL | |
269 | ORDER BY filename | |
270 | """, path, | |
271 | ) | |
f2cfd873 MT |
272 | |
273 | return list(files) | |
274 | ||
ff14dea3 | 275 | def get_file_by_path(self, path, revision=None): |
f2cfd873 MT |
276 | path, filename = os.path.dirname(path), os.path.basename(path) |
277 | ||
ff14dea3 MT |
278 | if revision: |
279 | # Fetch a specific revision | |
53333caa MT |
280 | return self._get_file(""" |
281 | SELECT | |
282 | * | |
283 | FROM | |
284 | wiki_files | |
285 | WHERE | |
286 | path = %s | |
287 | AND | |
288 | filename = %s | |
289 | AND | |
290 | created_at <= %s | |
291 | ORDER BY | |
292 | created_at DESC | |
293 | LIMIT 1 | |
294 | """, path, filename, revision, | |
295 | ) | |
ff14dea3 MT |
296 | |
297 | # Fetch latest version | |
53333caa MT |
298 | return self._get_file(""" |
299 | SELECT | |
300 | * | |
301 | FROM | |
302 | wiki_files | |
303 | WHERE | |
304 | path = %s | |
305 | AND | |
306 | filename = %s | |
307 | AND | |
308 | deleted_at IS NULL | |
309 | """, path, filename, | |
310 | ) | |
ff14dea3 MT |
311 | |
312 | def get_file_by_path_and_filename(self, path, filename): | |
53333caa MT |
313 | return self._get_file(""" |
314 | SELECT | |
315 | * | |
316 | FROM | |
317 | wiki_files | |
318 | WHERE | |
319 | path = %s | |
320 | AND | |
321 | filename = %s | |
322 | AND | |
323 | deleted_at IS NULL | |
324 | """, path, filename, | |
325 | ) | |
f2cfd873 MT |
326 | |
327 | def upload(self, path, filename, data, mimetype, author, address): | |
ff14dea3 MT |
328 | # Replace any existing files |
329 | file = self.get_file_by_path_and_filename(path, filename) | |
330 | if file: | |
331 | file.delete(author) | |
332 | ||
f2cfd873 | 333 | # Upload the blob first |
53333caa MT |
334 | blob = self.db.get(""" |
335 | INSERT INTO | |
336 | wiki_blobs(data) | |
337 | VALUES | |
338 | (%s) | |
339 | ON CONFLICT | |
340 | (digest(data, %s)) | |
341 | DO UPDATE | |
342 | SET data = EXCLUDED.data | |
343 | RETURNING id | |
344 | """, data, "MD5", | |
345 | ) | |
f2cfd873 MT |
346 | |
347 | # Create entry for file | |
53333caa MT |
348 | return self._get_file(""" |
349 | INSERT INTO | |
350 | wiki_files | |
351 | ( | |
352 | path, | |
353 | filename, | |
354 | author_uid, | |
355 | address, | |
356 | mimetype, | |
357 | blob_id, | |
358 | size | |
359 | ) VALUES ( | |
360 | %s, %s, %s, %s, %s, %s, %s | |
361 | ) | |
362 | RETURNING * | |
363 | """, path, filename, author.uid, address, mimetype, blob.id, len(data), | |
364 | ) | |
f2cfd873 | 365 | |
25e2dbe5 MT |
366 | def render(self, path, text, **kwargs): |
367 | return WikiRenderer(self.backend, path, text, **kwargs) | |
e2205cff | 368 | |
154f6179 | 369 | |
2901b734 | 370 | class Page(misc.Object): |
181d08f3 MT |
371 | def init(self, id, data=None): |
372 | self.id = id | |
373 | self.data = data | |
374 | ||
dc847af5 MT |
375 | def __repr__(self): |
376 | return "<%s %s %s>" % (self.__class__.__name__, self.page, self.timestamp) | |
377 | ||
c21ffadb MT |
378 | def __eq__(self, other): |
379 | if isinstance(other, self.__class__): | |
380 | return self.id == other.id | |
381 | ||
0713d9ae MT |
382 | return NotImplemented |
383 | ||
181d08f3 MT |
384 | def __lt__(self, other): |
385 | if isinstance(other, self.__class__): | |
386 | if self.page == other.page: | |
387 | return self.timestamp < other.timestamp | |
388 | ||
389 | return self.page < other.page | |
390 | ||
0713d9ae MT |
391 | return NotImplemented |
392 | ||
55ed268d MT |
393 | def __hash__(self): |
394 | return hash(self.page) | |
395 | ||
181d08f3 MT |
396 | @staticmethod |
397 | def sanitise_page_name(page): | |
398 | if not page: | |
399 | return "/" | |
400 | ||
401 | # Make sure that the page name does NOT end with a / | |
402 | if page.endswith("/"): | |
403 | page = page[:-1] | |
404 | ||
405 | # Make sure the page name starts with a / | |
406 | if not page.startswith("/"): | |
407 | page = "/%s" % page | |
408 | ||
409 | # Remove any double slashes | |
410 | page = page.replace("//", "/") | |
411 | ||
412 | return page | |
413 | ||
414 | @property | |
415 | def url(self): | |
0805ae90 | 416 | return "/docs%s" % self.page |
181d08f3 | 417 | |
4ed1dadb MT |
418 | @property |
419 | def full_url(self): | |
0805ae90 | 420 | return "https://www.ipfire.org%s" % self.url |
4ed1dadb | 421 | |
181d08f3 MT |
422 | @property |
423 | def page(self): | |
424 | return self.data.page | |
425 | ||
426 | @property | |
427 | def title(self): | |
51e7a876 | 428 | return self._title or os.path.basename(self.page[1:]) |
181d08f3 MT |
429 | |
430 | @property | |
431 | def _title(self): | |
432 | if not self.markdown: | |
433 | return | |
434 | ||
435 | # Find first H1 headline in markdown | |
436 | markdown = self.markdown.splitlines() | |
437 | ||
0074e919 | 438 | m = re.match(r"^#\s*(.*)( #)?$", markdown[0]) |
181d08f3 MT |
439 | if m: |
440 | return m.group(1) | |
441 | ||
3b05ef6e MT |
442 | @lazy_property |
443 | def author(self): | |
444 | if self.data.author_uid: | |
445 | return self.backend.accounts.get_by_uid(self.data.author_uid) | |
446 | ||
181d08f3 MT |
447 | @property |
448 | def markdown(self): | |
c21ffadb | 449 | return self.data.markdown or "" |
181d08f3 MT |
450 | |
451 | @property | |
452 | def html(self): | |
f9e077ed MT |
453 | lines = [] |
454 | ||
455 | # Strip off the first line if it contains a heading (as it will be shown separately) | |
456 | for i, line in enumerate(self.markdown.splitlines()): | |
457 | if i == 0 and line.startswith("#"): | |
458 | continue | |
459 | ||
460 | lines.append(line) | |
461 | ||
25e2dbe5 | 462 | renderer = self.backend.wiki.render(self.page, "\n".join(lines), revision=self.timestamp) |
5ab70651 MT |
463 | |
464 | return renderer.html | |
465 | ||
2ab2c753 MT |
466 | # Linked Files |
467 | ||
5ab70651 | 468 | @property |
2ab2c753 | 469 | def files(self): |
25e2dbe5 | 470 | renderer = self.backend.wiki.render(self.page, self.markdown, revision=self.timestamp) |
5ab70651 | 471 | |
2ab2c753 MT |
472 | return renderer.files |
473 | ||
474 | def _store_linked_files(self): | |
475 | self.db.executemany("INSERT INTO wiki_linked_files(page_id, path) \ | |
476 | VALUES(%s, %s)", ((self.id, file) for file in self.files)) | |
addc18d5 | 477 | |
181d08f3 MT |
478 | @property |
479 | def timestamp(self): | |
480 | return self.data.timestamp | |
481 | ||
482 | def was_deleted(self): | |
4c13230c | 483 | return not self.markdown |
181d08f3 MT |
484 | |
485 | @lazy_property | |
486 | def breadcrumbs(self): | |
487 | return self.backend.wiki.make_breadcrumbs(self.page) | |
488 | ||
d4c68c5c MT |
489 | def is_latest_revision(self): |
490 | return self.get_latest_revision() == self | |
491 | ||
181d08f3 | 492 | def get_latest_revision(self): |
7d699684 MT |
493 | revisions = self.get_revisions() |
494 | ||
495 | # Return first object | |
496 | for rev in revisions: | |
497 | return rev | |
498 | ||
499 | def get_revisions(self): | |
500 | return self.backend.wiki._get_pages("SELECT * FROM wiki \ | |
501 | WHERE page = %s ORDER BY timestamp DESC", self.page) | |
091ac36b | 502 | |
c21ffadb MT |
503 | @lazy_property |
504 | def previous_revision(self): | |
505 | return self.backend.wiki._get_page("SELECT * FROM wiki \ | |
506 | WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \ | |
507 | LIMIT 1", self.page, self.timestamp) | |
508 | ||
d398ca08 MT |
509 | @property |
510 | def changes(self): | |
511 | return self.data.changes | |
512 | ||
11afe905 MT |
513 | # ACL |
514 | ||
515 | def check_acl(self, account): | |
516 | return self.backend.wiki.check_acl(self.page, account) | |
517 | ||
d64a1e35 MT |
518 | # Watchers |
519 | ||
4ed1dadb MT |
520 | @lazy_property |
521 | def diff(self): | |
522 | if self.previous_revision: | |
523 | diff = difflib.unified_diff( | |
524 | self.previous_revision.markdown.splitlines(), | |
525 | self.markdown.splitlines(), | |
526 | ) | |
527 | ||
528 | return "\n".join(diff) | |
529 | ||
aba5e58a MT |
530 | @property |
531 | def watchers(self): | |
532 | res = self.db.query("SELECT uid FROM wiki_watchlist \ | |
533 | WHERE page = %s", self.page) | |
534 | ||
535 | for row in res: | |
536 | # Search for account by UID and skip if none was found | |
537 | account = self.backend.accounts.get_by_uid(row.uid) | |
538 | if not account: | |
539 | continue | |
540 | ||
541 | # Return the account | |
542 | yield account | |
543 | ||
f2e25ded | 544 | def is_watched_by(self, account): |
d64a1e35 MT |
545 | res = self.db.get("SELECT 1 FROM wiki_watchlist \ |
546 | WHERE page = %s AND uid = %s", self.page, account.uid) | |
547 | ||
548 | if res: | |
549 | return True | |
550 | ||
551 | return False | |
552 | ||
553 | def add_watcher(self, account): | |
f2e25ded | 554 | if self.is_watched_by(account): |
d64a1e35 MT |
555 | return |
556 | ||
557 | self.db.execute("INSERT INTO wiki_watchlist(page, uid) \ | |
558 | VALUES(%s, %s)", self.page, account.uid) | |
559 | ||
560 | def remove_watcher(self, account): | |
561 | self.db.execute("DELETE FROM wiki_watchlist \ | |
562 | WHERE page = %s AND uid = %s", self.page, account.uid) | |
563 | ||
aba5e58a MT |
564 | def _send_watcher_emails(self, excludes=[]): |
565 | # Nothing to do if there was no previous revision | |
566 | if not self.previous_revision: | |
567 | return | |
568 | ||
569 | for watcher in self.watchers: | |
570 | # Skip everyone who is excluded | |
571 | if watcher in excludes: | |
572 | logging.debug("Excluding %s" % watcher) | |
573 | continue | |
574 | ||
516da0a9 MT |
575 | # Check permissions |
576 | if not self.backend.wiki.check_acl(self.page, watcher): | |
577 | logging.debug("Watcher %s does not have permissions" % watcher) | |
578 | continue | |
579 | ||
aba5e58a MT |
580 | logging.debug("Sending watcher email to %s" % watcher) |
581 | ||
4ed1dadb MT |
582 | # Compose message |
583 | self.backend.messages.send_template("wiki/messages/page-changed", | |
ba14044c | 584 | account=watcher, page=self, priority=-10) |
aba5e58a | 585 | |
9f1cfab7 | 586 | def restore(self, author, address, comment=None): |
d4c68c5c MT |
587 | changes = "Restore to revision from %s" % self.timestamp.isoformat() |
588 | ||
9f1cfab7 MT |
589 | # Append comment |
590 | if comment: | |
591 | changes = "%s: %s" % (changes, comment) | |
592 | ||
d4c68c5c MT |
593 | return self.backend.wiki.create_page(self.page, |
594 | author, self.markdown, changes=changes, address=address) | |
595 | ||
f2cfd873 MT |
596 | |
597 | class File(misc.Object): | |
598 | def init(self, id, data): | |
599 | self.id = id | |
600 | self.data = data | |
601 | ||
ff14dea3 MT |
602 | def __eq__(self, other): |
603 | if isinstance(other, self.__class__): | |
604 | return self.id == other.id | |
605 | ||
9406e5e2 MT |
606 | return NotImplemented |
607 | ||
f2cfd873 MT |
608 | @property |
609 | def url(self): | |
a82de4b1 | 610 | return "/docs%s" % os.path.join(self.path, self.filename) |
f2cfd873 MT |
611 | |
612 | @property | |
613 | def path(self): | |
614 | return self.data.path | |
615 | ||
616 | @property | |
617 | def filename(self): | |
618 | return self.data.filename | |
619 | ||
620 | @property | |
621 | def mimetype(self): | |
622 | return self.data.mimetype | |
623 | ||
624 | @property | |
625 | def size(self): | |
626 | return self.data.size | |
627 | ||
8cb0bea4 MT |
628 | @lazy_property |
629 | def author(self): | |
630 | if self.data.author_uid: | |
631 | return self.backend.accounts.get_by_uid(self.data.author_uid) | |
632 | ||
633 | @property | |
634 | def created_at(self): | |
635 | return self.data.created_at | |
636 | ||
25e2dbe5 MT |
637 | timestamp = created_at |
638 | ||
b26c705a | 639 | def delete(self, author=None): |
9406e5e2 MT |
640 | if not self.can_be_deleted(): |
641 | raise RuntimeError("Cannot delete %s" % self) | |
642 | ||
b26c705a MT |
643 | self.db.execute("UPDATE wiki_files SET deleted_at = NOW(), deleted_by = %s \ |
644 | WHERE id = %s", author.uid if author else None, self.id) | |
ff14dea3 | 645 | |
9406e5e2 MT |
646 | def can_be_deleted(self): |
647 | # Cannot be deleted if still in use | |
648 | if self.pages: | |
649 | return False | |
650 | ||
651 | # Can be deleted | |
652 | return True | |
653 | ||
ff14dea3 MT |
654 | @property |
655 | def deleted_at(self): | |
656 | return self.data.deleted_at | |
657 | ||
658 | def get_latest_revision(self): | |
659 | revisions = self.get_revisions() | |
660 | ||
661 | # Return first object | |
662 | for rev in revisions: | |
663 | return rev | |
664 | ||
665 | def get_revisions(self): | |
666 | revisions = self.backend.wiki._get_files("SELECT * FROM wiki_files \ | |
2225edd9 | 667 | WHERE path = %s AND filename = %s ORDER BY created_at DESC", self.path, self.filename) |
ff14dea3 MT |
668 | |
669 | return list(revisions) | |
670 | ||
8cb0bea4 MT |
671 | def is_pdf(self): |
672 | return self.mimetype in ("application/pdf", "application/x-pdf") | |
673 | ||
f2cfd873 MT |
674 | def is_image(self): |
675 | return self.mimetype.startswith("image/") | |
676 | ||
8a62e589 MT |
677 | def is_vector_image(self): |
678 | return self.mimetype in ("image/svg+xml",) | |
679 | ||
680 | def is_bitmap_image(self): | |
681 | return self.is_image() and not self.is_vector_image() | |
682 | ||
f2cfd873 MT |
683 | @lazy_property |
684 | def blob(self): | |
685 | res = self.db.get("SELECT data FROM wiki_blobs \ | |
686 | WHERE id = %s", self.data.blob_id) | |
687 | ||
688 | if res: | |
689 | return bytes(res.data) | |
79dd9a0f | 690 | |
ccfb1584 | 691 | async def get_thumbnail(self, size, format=None): |
8a62e589 MT |
692 | assert self.is_bitmap_image() |
693 | ||
ccfb1584 MT |
694 | cache_key = ":".join(( |
695 | "wiki", | |
696 | "thumbnail", | |
df4f5dfb MT |
697 | self.path, |
698 | util.normalize(self.filename), | |
699 | self.created_at.isoformat(), | |
ccfb1584 | 700 | format or "N/A", |
df4f5dfb MT |
701 | "%spx" % size, |
702 | )) | |
75d9b3da MT |
703 | |
704 | # Try to fetch the data from the cache | |
df4f5dfb | 705 | thumbnail = await self.backend.cache.get(cache_key) |
75d9b3da MT |
706 | if thumbnail: |
707 | return thumbnail | |
708 | ||
709 | # Generate the thumbnail | |
60b2a858 | 710 | thumbnail = util.generate_thumbnail(self.blob, size, format=format, quality=95) |
75d9b3da MT |
711 | |
712 | # Put it into the cache for forever | |
df4f5dfb | 713 | await self.backend.cache.set(cache_key, thumbnail) |
75d9b3da MT |
714 | |
715 | return thumbnail | |
2901b734 | 716 | |
9406e5e2 MT |
717 | @property |
718 | def pages(self): | |
719 | """ | |
720 | Returns a list of all pages this file is linked by | |
721 | """ | |
722 | pages = self.backend.wiki._get_pages(""" | |
723 | SELECT | |
724 | wiki.* | |
725 | FROM | |
726 | wiki_linked_files | |
727 | JOIN | |
728 | wiki_current ON wiki_linked_files.page_id = wiki_current.id | |
729 | LEFT JOIN | |
730 | wiki ON wiki_linked_files.page_id = wiki.id | |
731 | WHERE | |
732 | wiki_linked_files.path = %s | |
733 | ORDER BY | |
734 | wiki.page | |
735 | """, os.path.join(self.path, self.filename), | |
736 | ) | |
737 | ||
738 | return list(pages) | |
739 | ||
2901b734 MT |
740 | |
741 | class WikiRenderer(misc.Object): | |
4ddad3e5 MT |
742 | schemas = ( |
743 | "ftp://", | |
744 | "git://", | |
745 | "http://", | |
746 | "https://", | |
747 | "rsync://", | |
748 | "sftp://", | |
749 | "ssh://", | |
750 | "webcal://", | |
751 | ) | |
752 | ||
753 | # Links | |
5ab70651 | 754 | _links = re.compile(r"<a href=\"(.*?)\">(.*?)</a>") |
2901b734 | 755 | |
c78ad26e | 756 | # Images |
5ab70651 | 757 | _images = re.compile(r"<img alt(?:=\"(.*?)\")? src=\"(.*?)\" (?:title=\"(.*?)\" )?/>") |
245a2e36 | 758 | |
25e2dbe5 | 759 | def init(self, path, text, revision=None): |
2901b734 | 760 | self.path = path |
5ab70651 MT |
761 | self.text = text |
762 | ||
25e2dbe5 MT |
763 | # Optionally, the revision of the rendered page |
764 | self.revision = revision | |
765 | ||
5ab70651 MT |
766 | # Markdown Renderer |
767 | self.renderer = markdown.Markdown( | |
768 | extensions=[ | |
2ab2c753 | 769 | LinkedFilesExtractorExtension(), |
5ab70651 MT |
770 | PrettyLinksExtension(), |
771 | "codehilite", | |
772 | "fenced_code", | |
773 | "footnotes", | |
774 | "nl2br", | |
775 | "sane_lists", | |
776 | "tables", | |
777 | "toc", | |
778 | ], | |
779 | ) | |
780 | ||
781 | # Render! | |
782 | self.html = self._render() | |
2901b734 | 783 | |
4ddad3e5 MT |
784 | def _render_link(self, m): |
785 | url, text = m.groups() | |
2901b734 | 786 | |
e50a437a MT |
787 | # External Links |
788 | for schema in self.schemas: | |
789 | if url.startswith(schema): | |
790 | return """<a class="link-external" href="%s">%s</a>""" % \ | |
791 | (url, text or url) | |
792 | ||
4ddad3e5 MT |
793 | # Emails |
794 | if "@" in url: | |
795 | # Strip mailto: | |
796 | if url.startswith("mailto:"): | |
797 | url = url[7:] | |
2901b734 | 798 | |
4ddad3e5 MT |
799 | return """<a class="link-external" href="mailto:%s">%s</a>""" % \ |
800 | (url, text or url) | |
2901b734 | 801 | |
4ddad3e5 MT |
802 | # Everything else must be an internal link |
803 | path = self.backend.wiki.make_path(self.path, url) | |
2901b734 | 804 | |
46b77977 | 805 | return """<a href="/docs%s">%s</a>""" % \ |
4ddad3e5 | 806 | (path, text or self.backend.wiki.get_page_title(path)) |
2901b734 | 807 | |
c78ad26e | 808 | def _render_image(self, m): |
e9c6d581 | 809 | alt_text, url, caption = m.groups() |
2901b734 | 810 | |
ef963ecb MT |
811 | # Compute a hash over the URL |
812 | h = hashlib.new("md5") | |
813 | h.update(url.encode()) | |
814 | id = h.hexdigest() | |
815 | ||
4a1bfdd5 | 816 | html = """ |
3ae53eac MT |
817 | <div class="columns is-centered"> |
818 | <div class="column is-8"> | |
ef963ecb | 819 | <figure class="image modal-trigger" data-target="%(id)s"> |
6d8a51d9 | 820 | <img src="/docs%(url)s?s=640&%(args)s" alt="%(caption)s"> |
ef963ecb MT |
821 | |
822 | <figcaption class="figure-caption">%(caption)s</figcaption> | |
3ae53eac | 823 | </figure> |
ef963ecb MT |
824 | |
825 | <div class="modal is-large" id="%(id)s"> | |
826 | <div class="modal-background"></div> | |
827 | ||
828 | <div class="modal-content"> | |
829 | <p class="image"> | |
6d8a51d9 | 830 | <img src="/docs%(url)s?s=1920&%(args)s" alt="%(caption)s" |
ef963ecb MT |
831 | loading="lazy"> |
832 | </p> | |
aabfb733 MT |
833 | |
834 | <a class="button is-small" href="/docs%(url)s?action=detail"> | |
835 | <span class="icon"> | |
836 | <i class="fa-solid fa-circle-info"></i> | |
837 | </span> | |
838 | </a> | |
ef963ecb MT |
839 | </div> |
840 | ||
841 | <button class="modal-close is-large" aria-label="close"></button> | |
842 | </div> | |
3ae53eac MT |
843 | </div> |
844 | </div> | |
4a1bfdd5 MT |
845 | """ |
846 | ||
a1f5f64b MT |
847 | # Try to split query string |
848 | url, delimiter, qs = url.partition("?") | |
849 | ||
850 | # Parse query arguments | |
851 | args = urllib.parse.parse_qs(qs) | |
852 | ||
c78ad26e | 853 | # Skip any absolute and external URLs |
25e2dbe5 | 854 | if url.startswith("https://") or url.startswith("http://"): |
ef963ecb MT |
855 | return html % { |
856 | "caption" : caption or "", | |
857 | "id" : id, | |
ef963ecb | 858 | "url" : url, |
fcfccf8a | 859 | "args" : args, |
ef963ecb | 860 | } |
2901b734 | 861 | |
c78ad26e | 862 | # Build absolute path |
25e2dbe5 | 863 | url = self.backend.wiki.make_path(self.path, url) |
2901b734 | 864 | |
c78ad26e | 865 | # Find image |
25e2dbe5 | 866 | file = self.backend.wiki.get_file_by_path(url, revision=self.revision) |
c78ad26e MT |
867 | if not file or not file.is_image(): |
868 | return "<!-- Could not find image %s in %s -->" % (url, self.path) | |
2901b734 | 869 | |
6d8a51d9 MT |
870 | # Remove any requested size |
871 | if "s" in args: | |
872 | del args["s"] | |
2901b734 | 873 | |
25e2dbe5 MT |
874 | # Link the image that has been the current version at the time of the page edit |
875 | if file: | |
876 | args["revision"] = file.timestamp | |
4a1bfdd5 | 877 | |
ef963ecb MT |
878 | return html % { |
879 | "caption" : caption or "", | |
880 | "id" : id, | |
ef963ecb | 881 | "url" : url, |
25e2dbe5 | 882 | "args" : urllib.parse.urlencode(args), |
ef963ecb | 883 | } |
2901b734 | 884 | |
5ab70651 | 885 | def _render(self): |
c78ad26e | 886 | logging.debug("Rendering %s" % self.path) |
2901b734 | 887 | |
245a2e36 | 888 | # Render... |
5ab70651 | 889 | text = self.renderer.convert(self.text) |
9881e9ef | 890 | |
4ddad3e5 | 891 | # Postprocess links |
5ab70651 | 892 | text = self._links.sub(self._render_link, text) |
4ddad3e5 | 893 | |
9881e9ef | 894 | # Postprocess images to <figure> |
5ab70651 | 895 | text = self._images.sub(self._render_image, text) |
c78ad26e | 896 | |
9881e9ef | 897 | return text |
5ab70651 MT |
898 | |
899 | @lazy_property | |
2ab2c753 | 900 | def files(self): |
5ab70651 | 901 | """ |
2ab2c753 | 902 | A list of all linked files that have been part of the rendered markup |
5ab70651 | 903 | """ |
2ab2c753 | 904 | files = [] |
5ab70651 | 905 | |
2ab2c753 | 906 | for url in self.renderer.files: |
5ab70651 MT |
907 | # Skip external images |
908 | if url.startswith("https://") or url.startswith("http://"): | |
909 | continue | |
910 | ||
911 | # Make the URL absolute | |
912 | url = self.backend.wiki.make_path(self.path, url) | |
913 | ||
ca493e11 MT |
914 | # Check if this is a file (it could also just be a page) |
915 | file = self.backend.wiki.get_file_by_path(url) | |
916 | if file: | |
917 | files.append(url) | |
5ab70651 | 918 | |
2ab2c753 | 919 | return files |
5ab70651 MT |
920 | |
921 | ||
922 | class PrettyLinksExtension(markdown.extensions.Extension): | |
923 | def extendMarkdown(self, md): | |
924 | # Create links to Bugzilla | |
925 | md.preprocessors.register(BugzillaLinksPreprocessor(md), "bugzilla", 10) | |
926 | ||
927 | # Create links to CVE | |
928 | md.preprocessors.register(CVELinksPreprocessor(md), "cve", 10) | |
929 | ||
930 | ||
931 | class BugzillaLinksPreprocessor(markdown.preprocessors.Preprocessor): | |
932 | regex = re.compile(r"(?:#(\d{5,}))", re.I) | |
933 | ||
934 | def run(self, lines): | |
935 | for line in lines: | |
936 | yield self.regex.sub(r"[#\1](https://bugzilla.ipfire.org/show_bug.cgi?id=\1)", line) | |
937 | ||
938 | ||
939 | class CVELinksPreprocessor(markdown.preprocessors.Preprocessor): | |
940 | regex = re.compile(r"(?:CVE)[\s\-](\d{4}\-\d+)") | |
941 | ||
942 | def run(self, lines): | |
943 | for line in lines: | |
944 | yield self.regex.sub(r"[CVE-\1](https://cve.mitre.org/cgi-bin/cvename.cgi?name=\1)", line) | |
945 | ||
946 | ||
2ab2c753 | 947 | class LinkedFilesExtractor(markdown.treeprocessors.Treeprocessor): |
5ab70651 | 948 | """ |
2ab2c753 | 949 | Finds all Linked Files |
5ab70651 MT |
950 | """ |
951 | def run(self, root): | |
2ab2c753 | 952 | self.md.files = [] |
5ab70651 MT |
953 | |
954 | # Find all images and store the URLs | |
955 | for image in root.findall(".//img"): | |
956 | src = image.get("src") | |
957 | ||
2ab2c753 | 958 | self.md.files.append(src) |
5ab70651 | 959 | |
ca493e11 MT |
960 | # Find all links |
961 | for link in root.findall(".//a"): | |
962 | href = link.get("href") | |
963 | ||
964 | self.md.files.append(href) | |
965 | ||
5ab70651 | 966 | |
2ab2c753 | 967 | class LinkedFilesExtractorExtension(markdown.extensions.Extension): |
5ab70651 | 968 | def extendMarkdown(self, md): |
2ab2c753 | 969 | md.treeprocessors.register(LinkedFilesExtractor(md), "linked-files-extractor", 10) |