]>
git.ipfire.org Git - ipfire.org.git/blob - src/backend/wiki.py
12 from .decorators
import *
14 class Wiki(misc
.Object
):
15 def _get_pages(self
, query
, *args
):
16 res
= self
.db
.query(query
, *args
)
19 yield Page(self
.backend
, row
.id, data
=row
)
21 def _get_page(self
, query
, *args
):
22 res
= self
.db
.get(query
, *args
)
25 return Page(self
.backend
, res
.id, data
=res
)
27 def get_page_title(self
, page
, default
=None):
28 doc
= self
.get_page(page
)
32 return default
or os
.path
.basename(page
)
34 def get_page(self
, page
, revision
=None):
35 page
= Page
.sanitise_page_name(page
)
39 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
40 AND timestamp = %s", page
, revision
)
42 return self
._get
_page
("SELECT * FROM wiki WHERE page = %s \
43 ORDER BY timestamp DESC LIMIT 1", page
)
45 def get_recent_changes(self
, account
, limit
=None):
46 pages
= self
._get
_pages
("SELECT * FROM wiki \
47 WHERE timestamp >= NOW() - INTERVAL '4 weeks' \
48 ORDER BY timestamp DESC")
51 if not page
.check_acl(account
):
60 def create_page(self
, page
, author
, content
, changes
=None, address
=None):
61 page
= Page
.sanitise_page_name(page
)
63 return self
._get
_page
("INSERT INTO wiki(page, author_uid, markdown, changes, address) \
64 VALUES(%s, %s, %s, %s, %s) RETURNING *", page
, author
.uid
, content
or None, changes
, address
)
66 def delete_page(self
, page
, author
, **kwargs
):
67 # Do nothing if the page does not exist
68 if not self
.get_page(page
):
71 # Just creates a blank last version of the page
72 self
.create_page(page
, author
=author
, content
=None, **kwargs
)
74 def make_breadcrumbs(self
, url
):
75 # Split and strip all empty elements (double slashes)
76 parts
= list(e
for e
in url
.split("/") if e
)
79 for part
in ("/".join(parts
[:i
]) for i
in range(1, len(parts
))):
80 ret
.append(("/%s" % part
, self
.get_page_title(part
, os
.path
.basename(part
))))
84 def search(self
, query
, account
=None, limit
=None):
85 query
= util
.parse_search_query(query
)
87 res
= self
._get
_pages
("SELECT wiki.* FROM wiki_search_index search_index \
88 LEFT JOIN wiki ON search_index.wiki_id = wiki.id \
89 WHERE search_index.document @@ to_tsquery('english', %s) \
90 ORDER BY ts_rank(search_index.document, to_tsquery('english', %s)) DESC",
94 # Skip any pages the user doesn't have permission for
95 if not page
.check_acl(account
):
98 # Return any other pages
107 Needs to be called after a page has been changed
109 self
.db
.execute("REFRESH MATERIALIZED VIEW wiki_search_index")
113 def check_acl(self
, page
, account
):
114 res
= self
.db
.query("SELECT * FROM wiki_acls \
115 WHERE %s ILIKE (path || '%%') ORDER BY LENGTH(path) DESC LIMIT 1", page
)
118 # Access not permitted when user is not logged in
122 # If user is in a matching group, we grant permission
123 for group
in row
.groups
:
124 if group
in account
.groups
:
127 # Otherwise access is not permitted
130 # If no ACLs are found, we permit access
135 def _get_files(self
, query
, *args
):
136 res
= self
.db
.query(query
, *args
)
139 yield File(self
.backend
, row
.id, data
=row
)
141 def _get_file(self
, query
, *args
):
142 res
= self
.db
.get(query
, *args
)
145 return File(self
.backend
, res
.id, data
=res
)
147 def get_files(self
, path
):
148 files
= self
._get
_files
("SELECT * FROM wiki_files \
149 WHERE path = %s AND deleted_at IS NULL ORDER BY filename", path
)
153 def get_file_by_path(self
, path
):
154 path
, filename
= os
.path
.dirname(path
), os
.path
.basename(path
)
156 return self
._get
_file
("SELECT * FROM wiki_files \
157 WHERE path = %s AND filename = %s AND deleted_at IS NULL", path
, filename
)
159 def upload(self
, path
, filename
, data
, mimetype
, author
, address
):
160 # Upload the blob first
161 blob
= self
.db
.get("INSERT INTO wiki_blobs(data) VALUES(%s) RETURNING id", data
)
163 # Create entry for file
164 return self
._get
_file
("INSERT INTO wiki_files(path, filename, author_uid, address, \
165 mimetype, blob_id, size) VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING *", path
,
166 filename
, author
.uid
, address
, mimetype
, blob
.id, len(data
))
168 def find_image(self
, path
, filename
):
169 for p
in (path
, os
.path
.dirname(path
)):
170 file = self
.get_file_by_path(os
.path
.join(p
, filename
))
172 if file and file.is_image():
176 class Page(misc
.Object
):
177 def init(self
, id, data
=None):
182 return "<%s %s %s>" % (self
.__class
__.__name
__, self
.page
, self
.timestamp
)
184 def __eq__(self
, other
):
185 if isinstance(other
, self
.__class
__):
186 return self
.id == other
.id
188 def __lt__(self
, other
):
189 if isinstance(other
, self
.__class
__):
190 if self
.page
== other
.page
:
191 return self
.timestamp
< other
.timestamp
193 return self
.page
< other
.page
196 def sanitise_page_name(page
):
200 # Make sure that the page name does NOT end with a /
201 if page
.endswith("/"):
204 # Make sure the page name starts with a /
205 if not page
.startswith("/"):
208 # Remove any double slashes
209 page
= page
.replace("//", "/")
219 return self
.data
.page
223 return self
._title
or os
.path
.basename(self
.page
[1:])
227 if not self
.markdown
:
230 # Find first H1 headline in markdown
231 markdown
= self
.markdown
.splitlines()
233 m
= re
.match(r
"^# (.*)( #)?$", markdown
[0])
239 if self
.data
.author_uid
:
240 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
242 def _render(self
, text
):
243 logging
.debug("Rendering %s" % self
)
247 for match
in re
.finditer(r
"!\[(.*)\]\((.*)\)", text
):
248 alt_text
, url
= match
.groups()
250 # Skip any absolute and external URLs
251 if url
.startswith("/") or url
.startswith("https://") or url
.startswith("http://"):
254 # Try to split query string
255 url
, delimiter
, qs
= url
.partition("?")
257 # Parse query arguments
258 args
= urllib
.parse
.parse_qs(qs
)
261 file = self
.backend
.wiki
.find_image(self
.page
, url
)
265 # Scale down the image if not already done
270 url
= "%s?%s" % (file.url
, urllib
.parse
.urlencode(args
))
272 replacements
.append((match
.span(), file, alt_text
, url
))
274 # Apply all replacements
275 for (start
, end
), file, alt_text
, url
in reversed(replacements
):
276 text
= text
[:start
] + "[![%s](%s)](%s?action=detail)" % (alt_text
, url
, file.url
) + text
[end
:]
280 (r
"\[\[([\w\d\/]+)(?:\|([\w\d\s]+))\]\]", r
"/\1", r
"\2", None, None),
281 (r
"\[\[([\w\d\/\-]+)\]\]", r
"/\1", r
"\1", self
.backend
.wiki
.get_page_title
, r
"\1"),
284 for pattern
, link
, title
, repl
, args
in patterns
:
287 for match
in re
.finditer(pattern
, text
):
288 l
= match
.expand(link
)
289 t
= match
.expand(title
)
292 t
= repl(match
.expand(args
)) or t
294 replacements
.append((match
.span(), t
or l
, l
))
296 # Apply all replacements
297 for (start
, end
), t
, l
in reversed(replacements
):
298 text
= text
[:start
] + "[%s](%s)" % (t
, l
) + text
[end
:]
300 # Borrow this from the blog
301 return self
.backend
.blog
._render
_text
(text
, lang
="markdown")
305 return self
.data
.markdown
or ""
309 return self
.data
.html
or self
._render
(self
.markdown
)
313 return self
.data
.timestamp
315 def was_deleted(self
):
316 return self
.markdown
is None
319 def breadcrumbs(self
):
320 return self
.backend
.wiki
.make_breadcrumbs(self
.page
)
322 def get_latest_revision(self
):
323 revisions
= self
.get_revisions()
325 # Return first object
326 for rev
in revisions
:
329 def get_revisions(self
):
330 return self
.backend
.wiki
._get
_pages
("SELECT * FROM wiki \
331 WHERE page = %s ORDER BY timestamp DESC", self
.page
)
334 def previous_revision(self
):
335 return self
.backend
.wiki
._get
_page
("SELECT * FROM wiki \
336 WHERE page = %s AND timestamp < %s ORDER BY timestamp DESC \
337 LIMIT 1", self
.page
, self
.timestamp
)
341 return self
.data
.changes
345 def check_acl(self
, account
):
346 return self
.backend
.wiki
.check_acl(self
.page
, account
)
352 parts
= self
.page
.split("/")
355 sidebar
= self
.backend
.wiki
.get_page("%s/sidebar" % os
.path
.join(*parts
))
363 def is_watched_by(self
, account
):
364 res
= self
.db
.get("SELECT 1 FROM wiki_watchlist \
365 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
372 def add_watcher(self
, account
):
373 if self
.is_watched_by(account
):
376 self
.db
.execute("INSERT INTO wiki_watchlist(page, uid) \
377 VALUES(%s, %s)", self
.page
, account
.uid
)
379 def remove_watcher(self
, account
):
380 self
.db
.execute("DELETE FROM wiki_watchlist \
381 WHERE page = %s AND uid = %s", self
.page
, account
.uid
)
384 class File(misc
.Object
):
385 def init(self
, id, data
):
391 return os
.path
.join(self
.path
, self
.filename
)
395 return self
.data
.path
399 return self
.data
.filename
403 return self
.data
.mimetype
407 return self
.data
.size
411 if self
.data
.author_uid
:
412 return self
.backend
.accounts
.get_by_uid(self
.data
.author_uid
)
415 def created_at(self
):
416 return self
.data
.created_at
419 return self
.mimetype
in ("application/pdf", "application/x-pdf")
422 return self
.mimetype
.startswith("image/")
426 res
= self
.db
.get("SELECT data FROM wiki_blobs \
427 WHERE id = %s", self
.data
.blob_id
)
430 return bytes(res
.data
)
432 def get_thumbnail(self
, size
):
433 cache_key
= "-".join((self
.path
, util
.normalize(self
.filename
), self
.created_at
.isoformat(), "%spx" % size
))
435 # Try to fetch the data from the cache
436 thumbnail
= self
.memcache
.get(cache_key
)
440 # Generate the thumbnail
441 thumbnail
= self
._generate
_thumbnail
(size
)
443 # Put it into the cache for forever
444 self
.memcache
.set(cache_key
, thumbnail
)
448 def _generate_thumbnail(self
, size
):
449 image
= PIL
.Image
.open(io
.BytesIO(self
.blob
))
451 # Resize the image to the desired resolution
452 image
.thumbnail((size
, size
), PIL
.Image
.ANTIALIAS
)
454 with io
.BytesIO() as f
:
455 # If writing out the image does not work with optimization,
456 # we try to write it out without any optimization.
458 image
.save(f
, image
.format
, optimize
=True, quality
=98)
460 image
.save(f
, image
.format
, quality
=98)