--- /dev/null
+import re
+
+def autodoc_skip_member(app, what, name, obj, skip, options):
+ if what == 'class' and skip and \
+ name in ('__init__', '__eq__', '__ne__', '__lt__',
+ '__le__', '__call__') and \
+ obj.__doc__:
+ return False
+ else:
+ return skip
+
+# im sure this is in the app somewhere, but I don't really
+# know where, so we're doing it here.
+_track_autodoced = {}
+_inherited_names = set()
+def autodoc_process_docstring(app, what, name, obj, options, lines):
+ if what == "class":
+ _track_autodoced[name] = obj
+ elif what in ("attribute", "method") and \
+ options.get("inherited-members"):
+ m = re.match(r'(.*?)\.([\w_]+)$', name)
+ if m:
+ clsname, attrname = m.group(1, 2)
+ if clsname in _track_autodoced:
+ cls = _track_autodoced[clsname]
+ for supercls in cls.__mro__:
+ if attrname in supercls.__dict__:
+ break
+ if supercls is not cls:
+ _inherited_names.add("%s.%s" % (supercls.__module__, supercls.__name__))
+ _inherited_names.add("%s.%s.%s" % (supercls.__module__, supercls.__name__, attrname))
+ lines[:0] = [
+ ".. container:: inherited_member",
+ "",
+ " *inherited from the* :%s:`~%s.%s.%s` *%s of* :class:`~%s.%s`" % (
+ "attr" if what == "attribute"
+ else "meth",
+ supercls.__module__, supercls.__name__,
+ attrname,
+ what,
+ supercls.__module__, supercls.__name__
+ ),
+ ""
+ ]
+
+from docutils import nodes
+def missing_reference(app, env, node, contnode):
+ if node.attributes['reftarget'] in _inherited_names:
+ return node.children[0]
+ else:
+ return None
+
+
+
+def setup(app):
+ app.connect('autodoc-skip-member', autodoc_skip_member)
+ app.connect('autodoc-process-docstring', autodoc_process_docstring)
+
+ app.connect('missing-reference', missing_reference)
+++ /dev/null
-from sphinx.application import TemplateBridge
-from sphinx.builders.html import StandaloneHTMLBuilder
-from sphinx.highlighting import PygmentsBridge
-from sphinx.jinja2glue import BuiltinTemplateLoader
-from pygments import highlight
-from pygments.lexer import RegexLexer, bygroups, using
-from pygments.token import *
-from pygments.filter import Filter, apply_filters
-from pygments.lexers import PythonLexer, PythonConsoleLexer
-from pygments.formatters import HtmlFormatter, LatexFormatter
-import re
-from mako.lookup import TemplateLookup
-from mako.template import Template
-from mako import __version__
-import os
-
-rtd = os.environ.get('READTHEDOCS', None) == 'True'
-
-class MakoBridge(TemplateBridge):
- def init(self, builder, *args, **kw):
- self.jinja2_fallback = BuiltinTemplateLoader()
- self.jinja2_fallback.init(builder, *args, **kw)
-
- builder.config.html_context['release_date'] = builder.config['release_date']
- builder.config.html_context['site_base'] = builder.config['site_base']
-
- self.lookup = TemplateLookup(directories=builder.config.templates_path,
- #format_exceptions=True,
- imports=[
- "from builder import util"
- ]
- )
-
- if rtd:
- import urllib2
- template_url = builder.config['site_base'] + "/docs_base.mako"
- template = urllib2.urlopen(template_url).read()
- self.lookup.put_string("/rtd_base.mako", template)
-
- def render(self, template, context):
- template = template.replace(".html", ".mako")
- context['prevtopic'] = context.pop('prev', None)
- context['nexttopic'] = context.pop('next', None)
- version = context['version']
- pathto = context['pathto']
-
- # RTD layout
- if rtd:
- # add variables if not present, such
- # as if local test of READTHEDOCS variable
- if 'MEDIA_URL' not in context:
- context['MEDIA_URL'] = "http://media.readthedocs.org/"
- if 'slug' not in context:
- context['slug'] = context['project'].lower()
- if 'url' not in context:
- context['url'] = "/some/test/url"
- if 'current_version' not in context:
- context['current_version'] = "latest"
-
- if 'name' not in context:
- context['name'] = context['project'].lower()
-
- context['rtd'] = True
- context['toolbar'] = True
- context['layout'] = "rtd_layout.mako"
- context['base'] = "rtd_base.mako"
- context['pdf_url'] = "%spdf/%s/%s/%s.pdf" % (
- context['MEDIA_URL'],
- context['slug'],
- context['current_version'],
- context['slug']
- )
- # local docs layout
- else:
- context['rtd'] = False
- context['toolbar'] = False
- context['layout'] = "layout.mako"
- context['base'] = "static_base.mako"
-
- context.setdefault('_', lambda x:x)
- return self.lookup.get_template(template).render_unicode(**context)
-
- def render_string(self, template, context):
- # this is used for .js, .css etc. and we don't have
- # local copies of that stuff here so use the jinja render.
- return self.jinja2_fallback.render_string(template, context)
-
-class StripDocTestFilter(Filter):
- def filter(self, lexer, stream):
- for ttype, value in stream:
- if ttype is Token.Comment and re.match(r'#\s*doctest:', value):
- continue
- yield ttype, value
-
-class PyConWithSQLLexer(RegexLexer):
- name = 'PyCon+SQL'
- aliases = ['pycon+sql']
-
- flags = re.IGNORECASE | re.DOTALL
-
- tokens = {
- 'root': [
- (r'{sql}', Token.Sql.Link, 'sqlpopup'),
- (r'{opensql}', Token.Sql.Open, 'opensqlpopup'),
- (r'.*?\n', using(PythonConsoleLexer))
- ],
- 'sqlpopup':[
- (
- r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))',
- bygroups(using(PythonConsoleLexer), Token.Sql.Popup),
- "#pop"
- )
- ],
- 'opensqlpopup':[
- (
- r'.*?(?:{stop}\n*|$)',
- Token.Sql,
- "#pop"
- )
- ]
- }
-
-
-class PythonWithSQLLexer(RegexLexer):
- name = 'Python+SQL'
- aliases = ['pycon+sql']
-
- flags = re.IGNORECASE | re.DOTALL
-
- tokens = {
- 'root': [
- (r'{sql}', Token.Sql.Link, 'sqlpopup'),
- (r'{opensql}', Token.Sql.Open, 'opensqlpopup'),
- (r'.*?\n', using(PythonLexer))
- ],
- 'sqlpopup':[
- (
- r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))',
- bygroups(using(PythonLexer), Token.Sql.Popup),
- "#pop"
- )
- ],
- 'opensqlpopup':[
- (
- r'.*?(?:{stop}\n*|$)',
- Token.Sql,
- "#pop"
- )
- ]
- }
-
-
-def _strip_trailing_whitespace(iter_):
- buf = list(iter_)
- if buf:
- buf[-1] = (buf[-1][0], buf[-1][1].rstrip())
- for t, v in buf:
- yield t, v
-
-class PopupSQLFormatter(HtmlFormatter):
- def _format_lines(self, tokensource):
- buf = []
- for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):
- if ttype in Token.Sql:
- for t, v in HtmlFormatter._format_lines(self, iter(buf)):
- yield t, v
- buf = []
-
- if ttype is Token.Sql:
- yield 1, "<div class='show_sql'>%s</div>" % re.sub(r'(?:[{stop}|\n]*)$', '', value)
- elif ttype is Token.Sql.Link:
- yield 1, "<a href='#' class='sql_link'>sql</a>"
- elif ttype is Token.Sql.Popup:
- yield 1, "<div class='popup_sql'>%s</div>" % re.sub(r'(?:[{stop}|\n]*)$', '', value)
- else:
- buf.append((ttype, value))
-
- for t, v in _strip_trailing_whitespace(HtmlFormatter._format_lines(self, iter(buf))):
- yield t, v
-
-class PopupLatexFormatter(LatexFormatter):
- def _filter_tokens(self, tokensource):
- for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):
- if ttype in Token.Sql:
- if ttype is not Token.Sql.Link and ttype is not Token.Sql.Open:
- yield Token.Literal, re.sub(r'{stop}', '', value)
- else:
- continue
- else:
- yield ttype, value
-
- def format(self, tokensource, outfile):
- LatexFormatter.format(self, self._filter_tokens(tokensource), outfile)
-
-def autodoc_skip_member(app, what, name, obj, skip, options):
- if what == 'class' and skip and name in ('__init__', '__eq__', '__ne__', '__lt__', '__le__') and obj.__doc__:
- return False
- else:
- return skip
-
-def setup(app):
- app.add_lexer('pycon+sql', PyConWithSQLLexer())
- app.add_lexer('python+sql', PythonWithSQLLexer())
- app.add_config_value('release_date', "", True)
- app.add_config_value('site_base', "", True)
- app.add_config_value('build_number', "", 1)
- app.connect('autodoc-skip-member', autodoc_skip_member)
- PygmentsBridge.html_formatter = PopupSQLFormatter
- PygmentsBridge.latex_formatter = PopupLatexFormatter
-
--- /dev/null
+import re
+from sphinx.util.compat import Directive
+from docutils.statemachine import StringList
+from docutils import nodes, utils
+import textwrap
+import itertools
+import collections
+import md5
+
+def _comma_list(text):
+ return re.split(r"\s*,\s*", text.strip())
+
+def _parse_content(content):
+ d = {}
+ d['text'] = []
+ idx = 0
+ for line in content:
+ idx += 1
+ m = re.match(r' *\:(.+?)\:(?: +(.+))?', line)
+ if m:
+ attrname, value = m.group(1, 2)
+ d[attrname] = value or ''
+ else:
+ break
+ d["text"] = content[idx:]
+ return d
+
+
+class EnvDirective(object):
+ @property
+ def env(self):
+ return self.state.document.settings.env
+
+class ChangeLogDirective(EnvDirective, Directive):
+ has_content = True
+
+ type_ = "change"
+
+ default_section = 'misc'
+
+ def _organize_by_section(self, changes):
+ compound_sections = [(s, s.split(" ")) for s in
+ self.sections if " " in s]
+
+ bysection = collections.defaultdict(list)
+ all_sections = set()
+ for rec in changes:
+ inner_tag = rec['tags'].intersection(self.inner_tag_sort)
+ if inner_tag:
+ inner_tag = inner_tag.pop()
+ else:
+ inner_tag = ""
+
+ for compound, comp_words in compound_sections:
+ if rec['tags'].issuperset(comp_words):
+ bysection[(compound, inner_tag)].append(rec)
+ all_sections.add(compound)
+ break
+ else:
+ intersect = rec['tags'].intersection(self.sections)
+ if intersect:
+ for sec in rec['sorted_tags']:
+ if sec in intersect:
+ bysection[(sec, inner_tag)].append(rec)
+ all_sections.add(sec)
+ break
+ else:
+ bysection[(self.default_section, inner_tag)].append(rec)
+ return bysection, all_sections
+
+ @classmethod
+ def changes(cls, env):
+ return env.temp_data['ChangeLogDirective_%s_changes' % cls.type_]
+
+ def _setup_run(self):
+ self.sections = self.env.config.changelog_sections
+ self.inner_tag_sort = self.env.config.changelog_inner_tag_sort + [""]
+ self.env.temp_data['ChangeLogDirective_%s_changes' % self.type_] = []
+ self._parsed_content = _parse_content(self.content)
+
+ p = nodes.paragraph('', '',)
+ self.state.nested_parse(self.content[1:], 0, p)
+
+ def run(self):
+ self._setup_run()
+ changes = self.changes(self.env)
+ output = []
+
+ self.version = version = self._parsed_content.get('version', '')
+ id_prefix = "%s-%s" % (self.type_, version)
+ topsection = self._run_top(id_prefix)
+ output.append(topsection)
+
+ bysection, all_sections = self._organize_by_section(changes)
+
+ counter = itertools.count()
+
+ sections_to_render = [s for s in self.sections if s in all_sections]
+ if not sections_to_render:
+ for cat in self.inner_tag_sort:
+ append_sec = self._append_node()
+
+ for rec in bysection[(self.default_section, cat)]:
+ rec["id"] = "%s-%s" % (id_prefix, next(counter))
+
+ self._render_rec(rec, None, cat, append_sec)
+
+ if append_sec.children:
+ topsection.append(append_sec)
+ else:
+ for section in sections_to_render + [self.default_section]:
+ sec = nodes.section('',
+ nodes.title(section, section),
+ ids=["%s-%s" % (id_prefix, section.replace(" ", "-"))]
+ )
+
+ append_sec = self._append_node()
+ sec.append(append_sec)
+
+ for cat in self.inner_tag_sort:
+ for rec in bysection[(section, cat)]:
+ rec["id"] = "%s-%s" % (id_prefix, next(counter))
+ self._render_rec(rec, section, cat, append_sec)
+
+ if append_sec.children:
+ topsection.append(sec)
+
+ return output
+
+ def _append_node(self):
+ return nodes.bullet_list()
+
+ def _run_top(self, id_prefix):
+ version = self._parsed_content.get('version', '')
+ topsection = nodes.section('',
+ nodes.title(version, version),
+ ids=[id_prefix]
+ )
+
+ if self._parsed_content.get("released"):
+ topsection.append(nodes.Text("Released: %s" %
+ self._parsed_content['released']))
+ else:
+ topsection.append(nodes.Text("no release date"))
+ return topsection
+
+
+ def _render_rec(self, rec, section, cat, append_sec):
+ para = rec['node'].deepcopy()
+
+ text = _text_rawsource_from_node(para)
+
+ to_hash = "%s %s" % (self.version, text[0:100])
+ targetid = "%s-%s" % (self.type_,
+ md5.md5(to_hash.encode('ascii', 'ignore')
+ ).hexdigest())
+ targetnode = nodes.target('', '', ids=[targetid])
+ para.insert(0, targetnode)
+ permalink = nodes.reference('', '',
+ nodes.Text("(link)", "(link)"),
+ refid=targetid,
+ classes=['changeset-link']
+ )
+ para.append(permalink)
+
+ insert_ticket = nodes.paragraph('')
+ para.append(insert_ticket)
+
+ i = 0
+ for collection, render, prefix in (
+ (rec['tickets'], self.env.config.changelog_render_ticket, "#%s"),
+ (rec['pullreq'], self.env.config.changelog_render_pullreq,
+ "pull request %s"),
+ (rec['changeset'], self.env.config.changelog_render_changeset, "r%s"),
+ ):
+ for refname in collection:
+ if i > 0:
+ insert_ticket.append(nodes.Text(", ", ", "))
+ else:
+ insert_ticket.append(nodes.Text(" ", " "))
+ i += 1
+ if render is not None:
+ refuri = render % refname
+ node = nodes.reference('', '',
+ nodes.Text(prefix % refname, prefix % refname),
+ refuri=refuri
+ )
+ else:
+ node = nodes.Text(prefix % refname, prefix % refname)
+ insert_ticket.append(node)
+
+ if rec['tags']:
+ tag_node = nodes.strong('',
+ " ".join("[%s]" % t for t
+ in
+ [t1 for t1 in [section, cat]
+ if t1 in rec['tags']] +
+
+ list(rec['tags'].difference([section, cat]))
+ ) + " "
+ )
+ para.children[0].insert(0, tag_node)
+
+ append_sec.append(
+ nodes.list_item('',
+ nodes.target('', '', ids=[rec['id']]),
+ para
+ )
+ )
+
+
+class ChangeDirective(EnvDirective, Directive):
+ has_content = True
+
+ type_ = "change"
+ parent_cls = ChangeLogDirective
+
+ def run(self):
+ content = _parse_content(self.content)
+ p = nodes.paragraph('', '',)
+ sorted_tags = _comma_list(content.get('tags', ''))
+ rec = {
+ 'tags': set(sorted_tags).difference(['']),
+ 'tickets': set(_comma_list(content.get('tickets', ''))).difference(['']),
+ 'pullreq': set(_comma_list(content.get('pullreq', ''))).difference(['']),
+ 'changeset': set(_comma_list(content.get('changeset', ''))).difference(['']),
+ 'node': p,
+ 'type': self.type_,
+ "title": content.get("title", None),
+ 'sorted_tags': sorted_tags
+ }
+
+ if "declarative" in rec['tags']:
+ rec['tags'].add("orm")
+
+ self.state.nested_parse(content['text'], 0, p)
+ self.parent_cls.changes(self.env).append(rec)
+
+ return []
+
+def _text_rawsource_from_node(node):
+ src = []
+ stack = [node]
+ while stack:
+ n = stack.pop(0)
+ if isinstance(n, nodes.Text):
+ src.append(n.rawsource)
+ stack.extend(n.children)
+ return "".join(src)
+
+def _rst2sphinx(text):
+ return StringList(
+ [line.strip() for line in textwrap.dedent(text).split("\n")]
+ )
+
+
+def make_ticket_link(name, rawtext, text, lineno, inliner,
+ options={}, content=[]):
+ env = inliner.document.settings.env
+ render_ticket = env.config.changelog_render_ticket or "%s"
+ prefix = "#%s"
+ if render_ticket:
+ ref = render_ticket % text
+ node = nodes.reference(rawtext, prefix % text, refuri=ref, **options)
+ else:
+ node = nodes.Text(prefix % text, prefix % text)
+ return [node], []
+
+def setup(app):
+ app.add_directive('changelog', ChangeLogDirective)
+ app.add_directive('change', ChangeDirective)
+ app.add_config_value("changelog_sections", [], 'env')
+ app.add_config_value("changelog_inner_tag_sort", [], 'env')
+ app.add_config_value("changelog_render_ticket",
+ None,
+ 'env'
+ )
+ app.add_config_value("changelog_render_pullreq",
+ None,
+ 'env'
+ )
+ app.add_config_value("changelog_render_changeset",
+ None,
+ 'env'
+ )
+ app.add_role('ticket', make_ticket_link)
--- /dev/null
+import re
+from sphinx.util.compat import Directive
+from docutils import nodes
+
+class DialectDirective(Directive):
+ has_content = True
+
+ _dialects = {}
+
+ def _parse_content(self):
+ d = {}
+ d['default'] = self.content[0]
+ d['text'] = []
+ idx = 0
+ for line in self.content[1:]:
+ idx += 1
+ m = re.match(r'\:(.+?)\: +(.+)', line)
+ if m:
+ attrname, value = m.group(1, 2)
+ d[attrname] = value
+ else:
+ break
+ d["text"] = self.content[idx + 1:]
+ return d
+
+ def _dbapi_node(self):
+
+ dialect_name, dbapi_name = self.dialect_name.split("+")
+
+ try:
+ dialect_directive = self._dialects[dialect_name]
+ except KeyError:
+ raise Exception("No .. dialect:: %s directive has been established"
+ % dialect_name)
+
+ output = []
+
+ content = self._parse_content()
+
+ parent_section_ref = self.state.parent.children[0]['ids'][0]
+ self._append_dbapi_bullet(dialect_name, dbapi_name,
+ content['name'], parent_section_ref)
+
+ p = nodes.paragraph('', '',
+ nodes.Text(
+ "Support for the %s database via the %s driver." % (
+ dialect_directive.database_name,
+ content['name']
+ ),
+ "Support for the %s database via the %s driver." % (
+ dialect_directive.database_name,
+ content['name']
+ )
+ ),
+ )
+
+ self.state.nested_parse(content['text'], 0, p)
+ output.append(p)
+
+ if "url" in content or "driverurl" in content:
+ sec = nodes.section(
+ '',
+ nodes.title("DBAPI", "DBAPI"),
+ ids=["dialect-%s-%s-url" % (dialect_name, dbapi_name)]
+ )
+ if "url" in content:
+ text = "Documentation and download information (if applicable) "\
+ "for %s is available at:\n" % content["name"]
+ uri = content['url']
+ sec.append(
+ nodes.paragraph('', '',
+ nodes.Text(text, text),
+ nodes.reference('', '',
+ nodes.Text(uri, uri),
+ refuri=uri,
+ )
+ )
+ )
+ if "driverurl" in content:
+ text = "Drivers for this database are available at:\n"
+ sec.append(
+ nodes.paragraph('', '',
+ nodes.Text(text, text),
+ nodes.reference('', '',
+ nodes.Text(content['driverurl'], content['driverurl']),
+ refuri=content['driverurl']
+ )
+ )
+ )
+ output.append(sec)
+
+
+ if "connectstring" in content:
+ sec = nodes.section(
+ '',
+ nodes.title("Connecting", "Connecting"),
+ nodes.paragraph('', '',
+ nodes.Text("Connect String:", "Connect String:"),
+ nodes.literal_block(content['connectstring'],
+ content['connectstring'])
+ ),
+ ids=["dialect-%s-%s-connect" % (dialect_name, dbapi_name)]
+ )
+ output.append(sec)
+
+ return output
+
+ def _dialect_node(self):
+ self._dialects[self.dialect_name] = self
+
+ content = self._parse_content()
+ self.database_name = content['name']
+
+ self.bullets = nodes.bullet_list()
+ text = "The following dialect/DBAPI options are available. "\
+ "Please refer to individual DBAPI sections for connect information."
+ sec = nodes.section('',
+ nodes.paragraph('', '',
+ nodes.Text(
+ "Support for the %s database." % content['name'],
+ "Support for the %s database." % content['name']
+ ),
+ ),
+ nodes.title("DBAPI Support", "DBAPI Support"),
+ nodes.paragraph('', '',
+ nodes.Text(text, text),
+ self.bullets
+ ),
+ ids=["dialect-%s" % self.dialect_name]
+ )
+
+ return [sec]
+
+ def _append_dbapi_bullet(self, dialect_name, dbapi_name, name, idname):
+ env = self.state.document.settings.env
+ dialect_directive = self._dialects[dialect_name]
+
+ list_node = nodes.list_item('',
+ nodes.paragraph('', '',
+ nodes.reference('', '',
+ nodes.Text(name, name),
+ refdocname=self.docname,
+ refuri=env.app.builder.get_relative_uri(
+ dialect_directive.docname, self.docname) +
+ "#" + idname
+ ),
+ #nodes.Text(" ", " "),
+ #nodes.reference('', '',
+ # nodes.Text("(connectstring)", "(connectstring)"),
+ # refdocname=self.docname,
+ # refuri=env.app.builder.get_relative_uri(
+ # dialect_directive.docname, self.docname) +
+ ## "#" + ("dialect-%s-%s-connect" %
+ # (dialect_name, dbapi_name))
+ # )
+ )
+ )
+ dialect_directive.bullets.append(list_node)
+
+ def run(self):
+ env = self.state.document.settings.env
+ self.docname = env.docname
+
+ self.dialect_name = dialect_name = self.content[0]
+
+ has_dbapi = "+" in dialect_name
+ if has_dbapi:
+ return self._dbapi_node()
+ else:
+ return self._dialect_node()
+
+def setup(app):
+ app.add_directive('dialect', DialectDirective)
+
--- /dev/null
+from __future__ import absolute_import
+
+from sphinx.application import TemplateBridge
+from sphinx.jinja2glue import BuiltinTemplateLoader
+from mako.lookup import TemplateLookup
+import os
+
+rtd = os.environ.get('READTHEDOCS', None) == 'True'
+
+class MakoBridge(TemplateBridge):
+ def init(self, builder, *args, **kw):
+ self.jinja2_fallback = BuiltinTemplateLoader()
+ self.jinja2_fallback.init(builder, *args, **kw)
+
+ builder.config.html_context['release_date'] = builder.config['release_date']
+ builder.config.html_context['site_base'] = builder.config['site_base']
+
+ self.lookup = TemplateLookup(directories=builder.config.templates_path,
+ #format_exceptions=True,
+ imports=[
+ "from builder import util"
+ ]
+ )
+
+ if rtd:
+ import urllib2
+ template_url = builder.config['site_base'] + "/docs_base.mako"
+ template = urllib2.urlopen(template_url).read()
+ self.lookup.put_string("/rtd_base.mako", template)
+
+ def render(self, template, context):
+ template = template.replace(".html", ".mako")
+ context['prevtopic'] = context.pop('prev', None)
+ context['nexttopic'] = context.pop('next', None)
+
+ # RTD layout
+ if rtd:
+ # add variables if not present, such
+ # as if local test of READTHEDOCS variable
+ if 'MEDIA_URL' not in context:
+ context['MEDIA_URL'] = "http://media.readthedocs.org/"
+ if 'slug' not in context:
+ context['slug'] = context['project'].lower()
+ if 'url' not in context:
+ context['url'] = "/some/test/url"
+ if 'current_version' not in context:
+ context['current_version'] = "latest"
+
+ if 'name' not in context:
+ context['name'] = context['project'].lower()
+
+ context['rtd'] = True
+ context['toolbar'] = True
+ context['layout'] = "rtd_layout.mako"
+ context['base'] = "rtd_base.mako"
+
+ # pdf gen is just broken on RTD
+ #context['pdf_url'] = "%spdf/%s/%s/%s.pdf" % (
+ # context['MEDIA_URL'],
+ # context['slug'],
+ # context['current_version'],
+ # context['slug']
+ #)
+ # local docs layout
+ else:
+ context['rtd'] = False
+ context['toolbar'] = False
+ context['layout'] = "layout.mako"
+ context['base'] = "static_base.mako"
+
+ context.setdefault('_', lambda x: x)
+ return self.lookup.get_template(template).render_unicode(**context)
+
+ def render_string(self, template, context):
+ # this is used for .js, .css etc. and we don't have
+ # local copies of that stuff here so use the jinja render.
+ return self.jinja2_fallback.render_string(template, context)
+
+def setup(app):
+ app.config['template_bridge'] = "builder.mako.MakoBridge"
+ app.add_config_value('release_date', "", 'env')
+ app.add_config_value('site_base', "", 'env')
+ app.add_config_value('build_number', "", 'env')
+
--- /dev/null
+from pygments.lexer import RegexLexer, bygroups, using
+from pygments.token import Token
+from pygments.filter import Filter
+from pygments.filter import apply_filters
+from pygments.lexers import PythonLexer, PythonConsoleLexer
+from sphinx.highlighting import PygmentsBridge
+from pygments.formatters import HtmlFormatter, LatexFormatter
+
+import re
+
+
+def _strip_trailing_whitespace(iter_):
+ buf = list(iter_)
+ if buf:
+ buf[-1] = (buf[-1][0], buf[-1][1].rstrip())
+ for t, v in buf:
+ yield t, v
+
+
+class StripDocTestFilter(Filter):
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype is Token.Comment and re.match(r'#\s*doctest:', value):
+ continue
+ yield ttype, value
+
+class PyConWithSQLLexer(RegexLexer):
+ name = 'PyCon+SQL'
+ aliases = ['pycon+sql']
+
+ flags = re.IGNORECASE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'{sql}', Token.Sql.Link, 'sqlpopup'),
+ (r'{opensql}', Token.Sql.Open, 'opensqlpopup'),
+ (r'.*?\n', using(PythonConsoleLexer))
+ ],
+ 'sqlpopup': [
+ (
+ r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|'
+ 'COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA'
+ '|DESCRIBE).*?(?:{stop}\n?|$))',
+ bygroups(using(PythonConsoleLexer), Token.Sql.Popup),
+ "#pop"
+ )
+ ],
+ 'opensqlpopup': [
+ (
+ r'.*?(?:{stop}\n*|$)',
+ Token.Sql,
+ "#pop"
+ )
+ ]
+ }
+
+
+class PythonWithSQLLexer(RegexLexer):
+ name = 'Python+SQL'
+ aliases = ['pycon+sql']
+
+ flags = re.IGNORECASE | re.DOTALL
+
+ tokens = {
+ 'root': [
+ (r'{sql}', Token.Sql.Link, 'sqlpopup'),
+ (r'{opensql}', Token.Sql.Open, 'opensqlpopup'),
+ (r'.*?\n', using(PythonLexer))
+ ],
+ 'sqlpopup': [
+ (
+ r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK'
+ '|COMMIT|ALTER|UPDATE|CREATE|DROP'
+ '|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))',
+ bygroups(using(PythonLexer), Token.Sql.Popup),
+ "#pop"
+ )
+ ],
+ 'opensqlpopup': [
+ (
+ r'.*?(?:{stop}\n*|$)',
+ Token.Sql,
+ "#pop"
+ )
+ ]
+ }
+
+class PopupSQLFormatter(HtmlFormatter):
+ def _format_lines(self, tokensource):
+ buf = []
+ for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):
+ if ttype in Token.Sql:
+ for t, v in HtmlFormatter._format_lines(self, iter(buf)):
+ yield t, v
+ buf = []
+
+ if ttype is Token.Sql:
+ yield 1, "<div class='show_sql'>%s</div>" % \
+ re.sub(r'(?:[{stop}|\n]*)$', '', value)
+ elif ttype is Token.Sql.Link:
+ yield 1, "<a href='#' class='sql_link'>sql</a>"
+ elif ttype is Token.Sql.Popup:
+ yield 1, "<div class='popup_sql'>%s</div>" % \
+ re.sub(r'(?:[{stop}|\n]*)$', '', value)
+ else:
+ buf.append((ttype, value))
+
+ for t, v in _strip_trailing_whitespace(
+ HtmlFormatter._format_lines(self, iter(buf))):
+ yield t, v
+
+class PopupLatexFormatter(LatexFormatter):
+ def _filter_tokens(self, tokensource):
+ for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]):
+ if ttype in Token.Sql:
+ if ttype is not Token.Sql.Link and ttype is not Token.Sql.Open:
+ yield Token.Literal, re.sub(r'{stop}', '', value)
+ else:
+ continue
+ else:
+ yield ttype, value
+
+ def format(self, tokensource, outfile):
+ LatexFormatter.format(self, self._filter_tokens(tokensource), outfile)
+
+def setup(app):
+ app.add_lexer('pycon+sql', PyConWithSQLLexer())
+ app.add_lexer('python+sql', PythonWithSQLLexer())
+
+ PygmentsBridge.html_formatter = PopupSQLFormatter
+ PygmentsBridge.latex_formatter = PopupLatexFormatter
+
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
# 'sphinx.ext.doctest', 'builder.builders']
-extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.doctest', 'builder.builders']
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'builder.autodoc_mods',
+ 'builder.changelog',
+ 'builder.dialect_info',
+ 'builder.mako',
+ 'builder.sqlformatter',
+ ]
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# The suffix of source filenames.
source_suffix = '.rst'
-template_bridge = "builder.builders.MakoBridge"
-
# The encoding of source files.
#source_encoding = 'utf-8-sig'