]>
git.ipfire.org Git - ipfire.org.git/blob - src/backend/wiki.py
12 from .decorators
import *
15 "google" : ("https://www.google.com/search?q=%(url)s", None, "fab fa-google"),
16 "rfc" : ("https://tools.ietf.org/html/rfc%(name)s", "RFC %s", None),
17 "wp" : ("https://en.wikipedia.org/wiki/%(name)s", None, "fab fa-wikipedia-w"),
20 class Wiki(misc
.Object
):
21 def _get_pages(self
, query
, *args
):
22 res
= self
.db
.query(query
, *args
)
25 yield Page(self
.backend
, row
.id, data
=row
)
27 def _get_page(self
, query
, *args
):
28 res
= self
.db
.get(query
, *args
)
31 return Page(self
.backend
, res
.id, data
=res
)
33 def get_page_title(self
, page
, default
=None):
34 # Try to retrieve title from cache
35 title
= self
.memcache
.get("wiki:title:%s" % page
)
39 # If the title has not been in the cache, we will
41 doc
= self
.get_page(page
)
45 title
= os
.path
.basename(page
)
47 # Save in cache for forever
48 self
.memcache
.set("wiki:title:%s" % page
, title
)
52 def get_page(self
, page
, revision
=None):
53 page
= Page
.sanitise_page_name(page
)
57 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
58 AND timestamp = %s", page
, revision
)
60 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
61 ORDER BY timestamp DESC LIMIT 1", page
)
63 def get_recent_changes(self
, account
, limit
=None):
64 pages
= self
._get
_pages
("SELECT * FROM wiki \
65 WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
66 ORDER BY timestamp DESC")
69 if not page
.check_acl(account
):
78 def create_page(self
, page
, author
, content
, changes
=None, address
=None):
79 page
= Page
.sanitise_page_name(page
)
81 # Write page to the database
82 page
= self
._get
_page
("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
83 VALUES(%s, %s, %s, %s, %s) RETURNING *", page
, author
.uid
, content
or None, changes
, address
)
86 self
.memcache
.set("wiki:title:%s" % page
.page
, page
.title
)
88 # Send email to all watchers
89 page
._send
_watcher
_emails
(excludes
=[author
])
93 def delete_page(self
, page
, author
, **kwargs
):
94 # Do nothing if the page does not exist
95 if not self
.get_page(page
):
98 # Just creates a blank last version of the page
99 self
.create_page(page
, author
=author
, content
=None, **kwargs
)
101 def make_breadcrumbs(self
, url
):
102 # Split and strip all empty elements (double slashes)
103 parts
= list(e
for e
in url
.split("/") if e
)
106 for part
in ("/".join(parts
[:i
]) for i
in range(1, len(parts
))):
107 ret
.append(("/%s" % part
, self
.get_page_title(part
, os
.path
.basename(part
))))
111 def search(self
, query
, account
=None, limit
=None):
112 query
= util
.parse_search_query(query
)
114 res
= self
._get
_pages
("SELECT wiki.* FROM wiki_search_index search_index \
115 LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
116 WHERE search_index.document @@ to_tsquery('english', %s) \
117 ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
122 # Skip any pages the user doesn't have permission for
123 if not page
.check_acl(account
):
126 # Return any other pages
129 # Break when we have found enough pages
130 if limit
and len(pages
) >= limit
:
137 Needs to be called after a page has been changed
139 self
.db
.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
143 def check_acl(self
, page
, account
):
144 res
= self
.db
.query("SELECT * FROM wiki_acls \
145 WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page
)
148 # Access not permitted when user is not logged in
152 # If user is in a matching group, we grant permission
153 for group
in row
.groups
:
154 if group
in account
.groups
:
157 # Otherwise access is not permitted
160 # If no ACLs are found, we permit access
165 def _get_files(self
, query
, *args
):
166 res
= self
.db
.query(query
, *args
)
169 yield File(self
.backend
, row
.id, data
=row
)
171 def _get_file(self
, query
, *args
):
172 res
= self
.db
.get(query
, *args
)
175 return File(self
.backend
, res
.id, data
=res
)
177 def get_files(self
, path
):
178 files
= self
._get
_files
("SELECT * FROM wiki_files \
179 WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path
)
183 def get_file_by_path(self
, path
):
184 path
, filename
= os
.path
.dirname(path
), os
.path
.basename(path
)
186 return self
._get
_file
("SELECT * FROM wiki_files \
187 WHERE path = %s AND filename = %s AND deleted_at IS NULL", path
, filename
)
189 def upload(self
, path
, filename
, data
, mimetype
, author
, address
):
190 # Upload the blob first
191 blob
= self
.db
.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data
)
193 # Create entry for file
194 return self
._get
_file
("INSERT INTO wiki_files(path, filename, author_uid, address, \
195 mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path
,
196 filename
, author
.uid
, address
, mimetype
, blob
.id, len(data
))
198 def find_image(self
, path
, filename
):
199 for p
in (path
, os
.path
.dirname(path
)):
200 file = self
.get_file_by_path(os
.path
.join(p
, filename
))
202 if file and file.is_image():
206 class Page(misc
.Object
):
208 external_link
= re
.compile(r
"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]")
210 # Interwiki links e.g. [[wp>IPFire]]
211 interwiki_link
= re
.compile(r
"\[\[(\w+)>(.+?)(?:\|(.+?))?\]\]")
214 email_link
= re
.compile(r
"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)(?:\|(.+?))?\]\]")
216 def init(self
, id, data
=None):
221 return "<%s %s %s>" % (self
.__class
__.__name
__, self
.page
, self
.timestamp
)
223 def __eq__(self
, other
):
224 if isinstance(other
, self
.__class
__):
225 return self
.id == other
.id
227 def __lt__(self
, other
):
228 if isinstance(other
, self
.__class
__):
229 if self
.page
== other
.page
:
230 return self
.timestamp
< other
.timestamp
232 return self
.page
< other
.page
235 def sanitise_page_name(page
):
239 # Make sure that the page name does NOT end with a /
240 if page
.endswith("/"):
243 # Make sure the page name starts with a /
244 if not page
.startswith("/"):
247 # Remove any double slashes
248 page
= page
.replace("//", "/")
258 return "https://wiki.ipfire.org%s" % self
.url
262 return self
.data
.page
266 return self
._title
or os
.path
.basename(self
.page
[1:])
270 if not self
.markdown
:
273 # Find first H1 headline in markdown
274 markdown
= self
.markdown
.splitlines()
276 m
= re
.match(r
"^# (.*)( #)?$", markdown
[0])
282 if self
.data
.author_uid
:
283 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
285 def _render_external_link(self
, m
):
286 url
, alias
= m
.groups()
288 return """<a class="link-external" href="%s">%s</a>""" % (url
, alias
or url
)
290 def _render_interwiki_link(self
, m
):
297 url
, repl
, icon
= INTERWIKIS
[wiki
]
299 logging
.warning("Invalid interwiki: %s" % wiki
)
308 "url" : urllib
.parse
.quote(name
),
311 # Get alias (if present)
314 if not alias
and repl
:
317 # Put everything together
321 s
.append("<span class=\"%s\"></span>" % icon
)
323 s
.append("""<a class="link-external" href="%s">%s</a>""" % (url
, alias
or name
))
327 def _render_email_link(self
, m
):
328 address
, alias
= m
.groups()
330 return """<a class="link-external" href="mailto:%s">%s</a>""" \
331 % (address
, alias
or address
)
333 def _render(self
, text
):
334 logging
.debug("Rendering %s" % self
)
338 for match
in re
.finditer(r
"!\[(.*?)\]\((.*?)\)", text
):
339 alt_text
, url
= match
.groups()
341 # Skip any absolute and external URLs
342 if url
.startswith("/") or url
.startswith("https://") or url
.startswith("http://"):
345 # Try to split query string
346 url
, delimiter
, qs
= url
.partition("?")
348 # Parse query arguments
349 args
= urllib
.parse
.parse_qs(qs
)
352 file = self
.backend
.wiki
.find_image(self
.page
, url
)
356 # Scale down the image if not already done
361 url
= "%s?%s" % (file.url
, urllib
.parse
.urlencode(args
))
363 replacements
.append((match
.span(), file, alt_text
, url
))
365 # Apply all replacements
366 for (start
, end
), file, alt_text
, url
in reversed(replacements
):
367 text
= text
[:start
] + "[![%s](%s)](%s?action=detail)" % (alt_text
, url
, file.url
) + text
[end
:]
369 # Handle interwiki links
370 text
= self
.interwiki_link
.sub(self
._render
_interwiki
_link
, text
)
372 # Handle external links
373 text
= self
.external_link
.sub(self
._render
_external
_link
, text
)
376 text
= self
.email_link
.sub(self
._render
_email
_link
, text
)
380 (r
"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r
"\1", r
"\2", None, True),
381 (r
"\[\[([\w\d\/\-\.]+)\]\]", r
"\1", r
"\1", self
.backend
.wiki
.get_page_title
, True),
384 for pattern
, link
, title
, repl
, internal
in patterns
:
387 for match
in re
.finditer(pattern
, text
):
388 l
= match
.expand(link
)
389 t
= match
.expand(title
)
392 # Allow relative links
393 if not l
.startswith("/"):
394 l
= os
.path
.join(self
.page
, l
)
397 l
= os
.path
.normpath(l
)
402 replacements
.append((match
.span(), t
or l
, l
))
404 # Apply all replacements
405 for (start
, end
), t
, l
in reversed(replacements
):
406 text
= text
[:start
] + "[%s](%s)" % (t
, l
) + text
[end
:]
408 # Borrow this from the blog
409 return self
.backend
.blog
._render
_text
(text
, lang
="markdown")
413 return self
.data
.markdown
or ""
417 return self
._render
(self
.markdown
)
421 return self
.data
.timestamp
423 def was_deleted(self
):
424 return self
.markdown
is None
427 def breadcrumbs(self
):
428 return self
.backend
.wiki
.make_breadcrumbs(self
.page
)
430 def get_latest_revision(self
):
431 revisions
= self
.get_revisions()
433 # Return first object
434 for rev
in revisions
:
437 def get_revisions(self
):
438 return self
.backend
.wiki
._get
_pages
("SELECT * FROM wiki \
439 WHERE page = %s ORDER BY timestamp DESC", self
.page
)
442 def previous_revision(self
):
443 return self
.backend
.wiki
._get
_page
("SELECT * FROM wiki \
444 WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
445 LIMIT 1", self
.page
, self
.timestamp
)
449 return self
.data
.changes
453 def check_acl(self
, account
):
454 return self
.backend
.wiki
.check_acl(self
.page
, account
)
460 parts
= self
.page
.split("/")
463 sidebar
= self
.backend
.wiki
.get_page("%s/sidebar" % os
.path
.join(*parts
))
473 if self
.previous_revision
:
474 diff
= difflib
.unified_diff(
475 self
.previous_revision
.markdown
.splitlines(),
476 self
.markdown
.splitlines(),
479 return "\n".join(diff
)
483 res
= self
.db
.query("SELECT uid FROM wiki_watchlist \
484 WHERE page = %s", self
.page
)
487 # Search for account by UID and skip if none was found
488 account
= self
.backend
.accounts
.get_by_uid(row
.uid
)
495 def is_watched_by(self
, account
):
496 res
= self
.db
.get("SELECT 1 FROM wiki_watchlist \
497 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
504 def add_watcher(self
, account
):
505 if self
.is_watched_by(account
):
508 self
.db
.execute("INSERT INTO wiki_watchlist(page, uid) \
509 VALUES(%s, %s)", self
.page
, account
.uid
)
511 def remove_watcher(self
, account
):
512 self
.db
.execute("DELETE FROM wiki_watchlist \
513 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
515 def _send_watcher_emails(self
, excludes
=[]):
516 # Nothing to do if there was no previous revision
517 if not self
.previous_revision
:
520 for watcher
in self
.watchers
:
521 # Skip everyone who is excluded
522 if watcher
in excludes
:
523 logging
.debug("Excluding %s" % watcher
)
526 logging
.debug("Sending watcher email to %s" % watcher
)
529 self
.backend
.messages
.send_template("wiki/messages/page-changed",
530 recipients
=[watcher
], page
=self
, priority
=-10)
533 class File(misc
.Object
):
534 def init(self
, id, data
):
540 return os
.path
.join(self
.path
, self
.filename
)
544 return self
.data
.path
548 return self
.data
.filename
552 return self
.data
.mimetype
556 return self
.data
.size
560 if self
.data
.author_uid
:
561 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
564 def created_at(self
):
565 return self
.data
.created_at
568 return self
.mimetype
in ("application/pdf", "application/x-pdf")
571 return self
.mimetype
.startswith("image/")
575 res
= self
.db
.get("SELECT data FROM wiki_blobs \
576 WHERE id = %s", self
.data
.blob_id
)
579 return bytes(res
.data
)
581 def get_thumbnail(self
, size
):
582 cache_key
= "-".join((self
.path
, util
.normalize(self
.filename
), self
.created_at
.isoformat(), "%spx" % size
))
584 # Try to fetch the data from the cache
585 thumbnail
= self
.memcache
.get(cache_key
)
589 # Generate the thumbnail
590 thumbnail
= util
.generate_thumbnail(self
.blob
, size
)
592 # Put it into the cache for forever
593 self
.memcache
.set(cache_key
, thumbnail
)