]>
git.ipfire.org Git - ipfire.org.git/blob - src/backend/wiki.py
a390ce37d5622516481f6c3bcdb72202a47008aa
15 from .decorators
import *
17 class Wiki(misc
.Object
):
18 def _get_pages(self
, query
, *args
):
19 res
= self
.db
.query(query
, *args
)
22 yield Page(self
.backend
, row
.id, data
=row
)
24 def _get_page(self
, query
, *args
):
25 res
= self
.db
.get(query
, *args
)
28 return Page(self
.backend
, res
.id, data
=res
)
30 def get_page_title(self
, page
, default
=None):
31 doc
= self
.get_page(page
)
35 return default
or os
.path
.basename(page
)
37 def get_page(self
, page
, revision
=None):
38 page
= Page
.sanitise_page_name(page
)
42 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
43 AND timestamp = %s", page
, revision
)
45 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
46 ORDER BY timestamp DESC LIMIT 1", page
)
48 def get_recent_changes(self
, account
, limit
=None):
49 pages
= self
._get
_pages
("SELECT * FROM wiki \
50 WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
51 ORDER BY timestamp DESC")
54 if not page
.check_acl(account
):
63 def create_page(self
, page
, author
, content
, changes
=None, address
=None):
64 page
= Page
.sanitise_page_name(page
)
66 # Write page to the database
67 page
= self
._get
_page
("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
68 VALUES(%s, %s, %s, %s, %s) RETURNING *", page
, author
.uid
, content
or None, changes
, address
)
70 # Send email to all watchers
71 page
._send
_watcher
_emails
(excludes
=[author
])
75 def delete_page(self
, page
, author
, **kwargs
):
76 # Do nothing if the page does not exist
77 if not self
.get_page(page
):
80 # Just creates a blank last version of the page
81 self
.create_page(page
, author
=author
, content
=None, **kwargs
)
83 def make_breadcrumbs(self
, url
):
84 # Split and strip all empty elements (double slashes)
85 parts
= list(e
for e
in url
.split("/") if e
)
88 for part
in ("/".join(parts
[:i
]) for i
in range(1, len(parts
))):
89 ret
.append(("/%s" % part
, self
.get_page_title(part
, os
.path
.basename(part
))))
93 def search(self
, query
, account
=None, limit
=None):
94 query
= util
.parse_search_query(query
)
96 res
= self
._get
_pages
("SELECT wiki.* FROM wiki_search_index search_index \
97 LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
98 WHERE search_index.document @@ to_tsquery('english', %s) \
99 ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
104 # Skip any pages the user doesn't have permission for
105 if not page
.check_acl(account
):
108 # Return any other pages
111 # Break when we have found enough pages
112 if limit
and len(pages
) >= limit
:
119 Needs to be called after a page has been changed
121 self
.db
.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
125 def check_acl(self
, page
, account
):
126 res
= self
.db
.query("SELECT * FROM wiki_acls \
127 WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page
)
130 # Access not permitted when user is not logged in
134 # If user is in a matching group, we grant permission
135 for group
in row
.groups
:
136 if group
in account
.groups
:
139 # Otherwise access is not permitted
142 # If no ACLs are found, we permit access
147 def _get_files(self
, query
, *args
):
148 res
= self
.db
.query(query
, *args
)
151 yield File(self
.backend
, row
.id, data
=row
)
153 def _get_file(self
, query
, *args
):
154 res
= self
.db
.get(query
, *args
)
157 return File(self
.backend
, res
.id, data
=res
)
159 def get_files(self
, path
):
160 files
= self
._get
_files
("SELECT * FROM wiki_files \
161 WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path
)
165 def get_file_by_path(self
, path
):
166 path
, filename
= os
.path
.dirname(path
), os
.path
.basename(path
)
168 return self
._get
_file
("SELECT * FROM wiki_files \
169 WHERE path = %s AND filename = %s AND deleted_at IS NULL", path
, filename
)
171 def upload(self
, path
, filename
, data
, mimetype
, author
, address
):
172 # Upload the blob first
173 blob
= self
.db
.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data
)
175 # Create entry for file
176 return self
._get
_file
("INSERT INTO wiki_files(path, filename, author_uid, address, \
177 mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path
,
178 filename
, author
.uid
, address
, mimetype
, blob
.id, len(data
))
180 def find_image(self
, path
, filename
):
181 for p
in (path
, os
.path
.dirname(path
)):
182 file = self
.get_file_by_path(os
.path
.join(p
, filename
))
184 if file and file.is_image():
188 class Page(misc
.Object
):
189 def init(self
, id, data
=None):
194 return "<%s %s %s>" % (self
.__class
__.__name
__, self
.page
, self
.timestamp
)
196 def __eq__(self
, other
):
197 if isinstance(other
, self
.__class
__):
198 return self
.id == other
.id
200 def __lt__(self
, other
):
201 if isinstance(other
, self
.__class
__):
202 if self
.page
== other
.page
:
203 return self
.timestamp
< other
.timestamp
205 return self
.page
< other
.page
208 def sanitise_page_name(page
):
212 # Make sure that the page name does NOT end with a /
213 if page
.endswith("/"):
216 # Make sure the page name starts with a /
217 if not page
.startswith("/"):
220 # Remove any double slashes
221 page
= page
.replace("//", "/")
231 return "https://wiki.ipfire.org%s" % self
.url
235 return self
.data
.page
239 return self
._title
or os
.path
.basename(self
.page
[1:])
243 if not self
.markdown
:
246 # Find first H1 headline in markdown
247 markdown
= self
.markdown
.splitlines()
249 m
= re
.match(r
"^# (.*)( #)?$", markdown
[0])
255 if self
.data
.author_uid
:
256 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
258 def _render(self
, text
):
259 logging
.debug("Rendering %s" % self
)
263 for match
in re
.finditer(r
"!\[(.*?)\]\((.*?)\)", text
):
264 alt_text
, url
= match
.groups()
266 # Skip any absolute and external URLs
267 if url
.startswith("/") or url
.startswith("https://") or url
.startswith("http://"):
270 # Try to split query string
271 url
, delimiter
, qs
= url
.partition("?")
273 # Parse query arguments
274 args
= urllib
.parse
.parse_qs(qs
)
277 file = self
.backend
.wiki
.find_image(self
.page
, url
)
281 # Scale down the image if not already done
286 url
= "%s?%s" % (file.url
, urllib
.parse
.urlencode(args
))
288 replacements
.append((match
.span(), file, alt_text
, url
))
290 # Apply all replacements
291 for (start
, end
), file, alt_text
, url
in reversed(replacements
):
292 text
= text
[:start
] + "[![%s](%s)](%s?action=detail)" % (alt_text
, url
, file.url
) + text
[end
:]
296 (r
"\[\[([\w\d\/\-\.]+)(?:\|(.+?))\]\]", r
"\1", r
"\2", None, True),
297 (r
"\[\[([\w\d\/\-\.]+)\]\]", r
"\1", r
"\1", self
.backend
.wiki
.get_page_title
, True),
300 (r
"\[\[((?:ftp|git|https?|rsync|sftp|ssh|webcal)\:\/\/.+?)(?:\|(.+?))\]\]",
301 r
"\1", r
"\2", None, False),
304 (r
"\[\[([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)\]\]",
305 r
"\1", r
"\1", None, False),
308 for pattern
, link
, title
, repl
, internal
in patterns
:
311 for match
in re
.finditer(pattern
, text
):
312 l
= match
.expand(link
)
313 t
= match
.expand(title
)
316 # Allow relative links
317 if not l
.startswith("/"):
318 l
= os
.path
.join(self
.page
, l
)
321 l
= os
.path
.normpath(l
)
326 replacements
.append((match
.span(), t
or l
, l
))
328 # Apply all replacements
329 for (start
, end
), t
, l
in reversed(replacements
):
330 text
= text
[:start
] + "[%s](%s)" % (t
, l
) + text
[end
:]
332 # Borrow this from the blog
333 return self
.backend
.blog
._render
_text
(text
, lang
="markdown")
337 return self
.data
.markdown
or ""
341 return self
._render
(self
.markdown
)
345 return self
.data
.timestamp
347 def was_deleted(self
):
348 return self
.markdown
is None
351 def breadcrumbs(self
):
352 return self
.backend
.wiki
.make_breadcrumbs(self
.page
)
354 def get_latest_revision(self
):
355 revisions
= self
.get_revisions()
357 # Return first object
358 for rev
in revisions
:
361 def get_revisions(self
):
362 return self
.backend
.wiki
._get
_pages
("SELECT * FROM wiki \
363 WHERE page = %s ORDER BY timestamp DESC", self
.page
)
366 def previous_revision(self
):
367 return self
.backend
.wiki
._get
_page
("SELECT * FROM wiki \
368 WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
369 LIMIT 1", self
.page
, self
.timestamp
)
373 return self
.data
.changes
377 def check_acl(self
, account
):
378 return self
.backend
.wiki
.check_acl(self
.page
, account
)
384 parts
= self
.page
.split("/")
387 sidebar
= self
.backend
.wiki
.get_page("%s/sidebar" % os
.path
.join(*parts
))
397 if self
.previous_revision
:
398 diff
= difflib
.unified_diff(
399 self
.previous_revision
.markdown
.splitlines(),
400 self
.markdown
.splitlines(),
403 return "\n".join(diff
)
407 res
= self
.db
.query("SELECT uid FROM wiki_watchlist \
408 WHERE page = %s", self
.page
)
411 # Search for account by UID and skip if none was found
412 account
= self
.backend
.accounts
.get_by_uid(row
.uid
)
419 def is_watched_by(self
, account
):
420 res
= self
.db
.get("SELECT 1 FROM wiki_watchlist \
421 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
428 def add_watcher(self
, account
):
429 if self
.is_watched_by(account
):
432 self
.db
.execute("INSERT INTO wiki_watchlist(page, uid) \
433 VALUES(%s, %s)", self
.page
, account
.uid
)
435 def remove_watcher(self
, account
):
436 self
.db
.execute("DELETE FROM wiki_watchlist \
437 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
439 def _send_watcher_emails(self
, excludes
=[]):
440 # Nothing to do if there was no previous revision
441 if not self
.previous_revision
:
444 for watcher
in self
.watchers
:
445 # Skip everyone who is excluded
446 if watcher
in excludes
:
447 logging
.debug("Excluding %s" % watcher
)
450 logging
.debug("Sending watcher email to %s" % watcher
)
453 self
.backend
.messages
.send_template("wiki/messages/page-changed",
454 recipients
=[watcher
], page
=self
, priority
=-10)
457 class File(misc
.Object
):
458 def init(self
, id, data
):
464 return os
.path
.join(self
.path
, self
.filename
)
468 return self
.data
.path
472 return self
.data
.filename
476 return self
.data
.mimetype
480 return self
.data
.size
484 if self
.data
.author_uid
:
485 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
488 def created_at(self
):
489 return self
.data
.created_at
492 return self
.mimetype
in ("application/pdf", "application/x-pdf")
495 return self
.mimetype
.startswith("image/")
499 res
= self
.db
.get("SELECT data FROM wiki_blobs \
500 WHERE id = %s", self
.data
.blob_id
)
503 return bytes(res
.data
)
505 def get_thumbnail(self
, size
):
506 cache_key
= "-".join((self
.path
, util
.normalize(self
.filename
), self
.created_at
.isoformat(), "%spx" % size
))
508 # Try to fetch the data from the cache
509 thumbnail
= self
.memcache
.get(cache_key
)
513 # Generate the thumbnail
514 thumbnail
= self
._generate
_thumbnail
(size
)
516 # Put it into the cache for forever
517 self
.memcache
.set(cache_key
, thumbnail
)
521 def _generate_thumbnail(self
, size
, **args
):
522 image
= PIL
.Image
.open(io
.BytesIO(self
.blob
))
524 # Remove any alpha-channels
525 if image
.format
== "JPEG" and not image
.mode
== "RGB":
526 # Make a white background
527 background
= PIL
.Image
.new("RGBA", image
.size
, (255,255,255))
529 # Flatten both images together
530 flattened_image
= PIL
.Image
.alpha_composite(background
, image
)
532 # Remove the alpha channel
533 image
= flattened_image
.convert("RGB")
535 # Resize the image to the desired resolution
536 image
.thumbnail((size
, size
), PIL
.Image
.LANCZOS
)
538 if image
.format
== "JPEG":
539 # Apply a gaussian blur to make compression easier
540 image
= image
.filter(PIL
.ImageFilter
.GaussianBlur(radius
=0.05))
542 # Arguments to optimise the compression
544 "subsampling" : "4:2:0",
548 with io
.BytesIO() as f
:
549 # If writing out the image does not work with optimization,
550 # we try to write it out without any optimization.
552 image
.save(f
, image
.format
, optimize
=True, **args
)
554 image
.save(f
, image
.format
, **args
)