desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Transform a Python signature into RST nodes. Returns (fully qualified name of the thing, classname if any). If inside a class, the current class name is handled intelligently: * it is stripped from the displayed name if present * it is added to the full name (return value) if not present'
def parse_signature(self, sig, signode):
m = py_sig_re.match(sig) if (m is None): raise ValueError (classname, name, arglist, retann) = m.groups() if self.env.currclass: add_module = False if (classname and classname.startswith(self.env.currclass)): fullname = (classname + name) classname = class...
'Return the text for the index entry of the object.'
def get_index_text(self, modname, name):
raise NotImplementedError('must be implemented in subclasses')
'Transform a C (or C++) signature into RST nodes.'
def parse_signature(self, sig, signode):
m = c_funcptr_sig_re.match(sig) if (m is None): m = c_sig_re.match(sig) if (m is None): raise ValueError('no match') (rettype, name, arglist, const) = m.groups() signode += addnodes.desc_type('', '') self._parse_type(signode[(-1)], rettype) try: (classname, funcnam...
'Transform an option description into RST nodes.'
def parse_signature(self, sig, signode):
count = 0 firstname = '' for m in option_desc_re.finditer(sig): (optname, args) = m.groups() if count: signode += addnodes.desc_addname(', ', ', ') signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if (not c...
'getattr() override for types such as Zope interfaces.'
@staticmethod def get_attr(obj, name, *defargs):
for (typ, func) in AutoDirective._special_attrgetters.iteritems(): if isinstance(obj, typ): return func(obj, name, *defargs) return safe_getattr(obj, name, *defargs)
'Called to see if a member can be documented by this documenter.'
@classmethod def can_document_member(cls, member, membername, isattr, parent):
raise NotImplementedError('must be implemented in subclasses')
'Append one line of generated reST to the output.'
def add_line(self, line, source, *lineno):
self.directive.result.append((self.indent + line), source, *lineno)
'Resolve the module and name of the object to document given by the arguments and the current module/class. Must return a pair of the module name and a chain of attributes; for example, it would return ``(\'zipfile\', [\'ZipFile\', \'open\'])`` for the ``zipfile.ZipFile.open`` method.'
def resolve_name(self, modname, parents, path, base):
raise NotImplementedError('must be implemented in subclasses')
'Determine what module to import and what attribute to document. Returns True and sets *self.modname*, *self.objpath*, *self.fullname*, *self.args* and *self.retann* if parsing and resolving was successful.'
def parse_name(self):
try: (explicit_modname, path, base, args, retann) = py_ext_sig_re.match(self.name).groups() except AttributeError: self.directive.warn(('invalid signature for auto%s (%r)' % (self.objtype, self.name))) return False if (explicit_modname is not None): modname = expl...
'Import the object given by *self.modname* and *self.objpath* and sets it as *self.object*. Returns True if successful, False if an error occurred.'
def import_object(self):
try: __import__(self.modname) obj = self.module = sys.modules[self.modname] for part in self.objpath: obj = self.get_attr(obj, part) self.object = obj return True except (SyntaxError, ImportError, AttributeError) as err: self.directive.warn(('autodoc ...
'Get the real module name of an object to document. (It can differ from the name of the module through which the object was imported.)'
def get_real_modname(self):
return (self.get_attr(self.object, '__module__', None) or self.modname)
'Check if *self.object* is really defined in the module given by *self.modname*.'
def check_module(self):
modname = self.get_attr(self.object, '__module__', None) if (modname and (modname != self.modname)): return False return True
'Format the argument signature of *self.object*. Should return None if the object does not have a signature.'
def format_args(self):
return None
'Format the signature (arguments and return annotation) of the object. Let the user process it via the ``autodoc-process-signature`` event.'
def format_signature(self):
if (self.args is not None): args = ('(%s)' % self.args) else: args = self.format_args() if (args is None): return '' retann = self.retann result = self.env.app.emit_firstresult('autodoc-process-signature', self.objtype, self.fullname, self.object, self.options, args, retann) ...
'Add the directive header and options to the generated content.'
def add_directive_header(self, sig):
directive = getattr(self, 'directivetype', self.objtype) name_in_directive = ('.'.join(self.objpath) or self.modname) self.add_line((u'.. %s:: %s%s' % (directive, name_in_directive, sig)), '<autodoc>') if self.options.noindex: self.add_line(u' :noindex:', '<autodoc>') if sel...
'Decode and return lines of the docstring(s) for the object.'
def get_doc(self, encoding=None):
docstring = self.get_attr(self.object, '__doc__', None) if docstring: return [prepare_docstring(force_decode(docstring, encoding))] return []
'Let the user process the docstrings before adding them.'
def process_doc(self, docstrings):
for docstringlines in docstrings: if self.env.app: self.env.app.emit('autodoc-process-docstring', self.objtype, self.fullname, self.object, self.options, docstringlines) for line in docstringlines: (yield line)
'Add content from docstrings, attribute documentation and user.'
def add_content(self, more_content, no_docstring=False):
if self.analyzer: filename = unicode(self.analyzer.srcname, sys.getfilesystemencoding(), 'replace') sourcename = (u'%s:docstring of %s' % (filename, self.fullname)) attr_docs = self.analyzer.find_attr_docs() if self.objpath: key = ('.'.join(self.objpath[:(-1)]), sel...
'Return `(members_check_module, members)` where `members` is a list of `(membername, member)` pairs of the members of *self.object*. If *want_all* is True, return all members. Else, only return those members given by *self.options.members* (which may also be none).'
def get_object_members(self, want_all):
if (not want_all): if (not self.options.members): return (False, []) ret = [] for mname in self.options.members: try: ret.append((mname, self.get_attr(self.object, mname))) except AttributeError: self.directive.warn(('missin...
'Filter the given member list: members are skipped if - they are private (except if given explicitly) - they are undocumented (except if undoc-members is given) The user can override the skipping decision by connecting to the ``autodoc-skip-member`` event.'
def filter_members(self, members, want_all):
ret = [] namespace = '.'.join(self.objpath) if self.analyzer: attr_docs = self.analyzer.find_attr_docs() else: attr_docs = {} for (membername, member) in members: isattr = False if (want_all and membername.startswith('_')): skip = True elif ((names...
'Generate reST for member documentation. If *all_members* is True, do all members, else those given by *self.options.members*.'
def document_members(self, all_members=False):
self.env.autodoc_current_module = self.modname if self.objpath: self.env.autodoc_current_class = self.objpath[0] want_all = (all_members or self.options.inherited_members or (self.options.members is ALL)) (members_check_module, members) = self.get_object_members(want_all) if self.options.exc...
'Generate reST for the object given by *self.name*, and possibly members. If *more_content* is given, include that content. If *real_modname* is given, use that module name to find attribute docs. If *check_module* is True, only generate if the object is defined in the module name it is imported from. If *all_members* ...
def generate(self, more_content=None, real_modname=None, check_module=False, all_members=False):
if (not self.parse_name()): self.directive.warn(('don\'t know which module to import for autodocumenting %r (try placing a "module" or "currentmodule" directive in the document, or giving an explicit module name)' % self.name)) ...
'*class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph.'
def __init__(self, class_names, currmodule, show_builtins=False):
self.class_names = class_names self.classes = self._import_classes(class_names, currmodule) self.all_classes = self._all_classes(self.classes) if (len(self.all_classes) == 0): raise InheritanceException('No classes found for inheritance diagram') self.show_builtins = show_buil...
'Import a class using its fully-qualified *name*.'
def _import_class_or_module(self, name, currmodule):
try: (path, base) = class_sig_re.match(name).groups() except ValueError: raise InheritanceException(('Invalid class or module %r specified for inheritance diagram' % name)) fullname = ((path or '') + base) path = ((path and path.rstrip('.')) or '') try: ...
'Import a list of classes.'
def _import_classes(self, class_names, currmodule):
classes = [] for name in class_names: classes.extend(self._import_class_or_module(name, currmodule)) return classes
'Return a list of all classes that are ancestors of *classes*.'
def _all_classes(self, classes):
all_classes = {} def recurse(cls): all_classes[cls] = None for c in cls.__bases__: if (c not in all_classes): recurse(c) for cls in classes: recurse(cls) return all_classes.keys()
'Given a class object, return a fully-qualified name. This works for things I\'ve tested in matplotlib so far, but may not be completely general.'
def class_name(self, cls, parts=0):
module = cls.__module__ if (module == '__builtin__'): fullname = cls.__name__ else: fullname = ('%s.%s' % (module, cls.__name__)) if (parts == 0): return fullname name_parts = fullname.split('.') return '.'.join(name_parts[(- parts):])
'Get all of the class names involved in the graph.'
def get_all_class_names(self):
return [self.class_name(x) for x in self.all_classes]
'Generate a graphviz dot graph from the classes that were passed in to __init__. *name* is the name of the graph. *urls* is a dictionary mapping class names to HTTP URLs. *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing key/value pairs to pass on as graphviz properties.'
def generate_dot(self, name, parts=0, urls={}, env=None, graph_attrs={}, node_attrs={}, edge_attrs={}):
g_attrs = self.default_graph_attrs.copy() n_attrs = self.default_node_attrs.copy() e_attrs = self.default_edge_attrs.copy() g_attrs.update(graph_attrs) n_attrs.update(node_attrs) e_attrs.update(edge_attrs) if env: g_attrs.update(env.config.inheritance_graph_attrs) n_attrs.upd...
'Import and setup a Sphinx extension module. No-op if called twice.'
def setup_extension(self, extension):
if (extension in self._extensions): return try: mod = __import__(extension, None, None, ['setup']) except ImportError as err: raise ExtensionError(('Could not import extension %s' % extension), err) if (not hasattr(mod, 'setup')): self.warn(('extension %r ...
'Import an object from a \'module.name\' string.'
def import_object(self, objname, source=None):
try: (module, name) = objname.rsplit('.', 1) except ValueError as err: raise ExtensionError((('Invalid full object name %s' % objname) + ((source and (' (needed for %s)' % source)) or '')), err) try: return getattr(__import__(module, None, None, [name]), name) ...
'Called by the builder to initialize the template system. *builder* is the builder object; you\'ll probably want to look at the value of ``builder.config.templates_path``. *theme* is a :class:`sphinx.theming.Theme` object or None; in the latter case, *dirs* can be list of fixed directories to look for templates.'
def init(self, builder, theme=None, dirs=None):
raise NotImplementedError('must be implemented in subclasses')
'Called by the builder to determine if output files are outdated because of template changes. Return the mtime of the newest template file that was changed. The default implementation returns ``0``.'
def newest_template_mtime(self):
return 0
'Called by the builder to render a template given as a filename with a specified context (a Python dictionary).'
def render(self, template, context):
raise NotImplementedError('must be implemented in subclasses')
'Called by the builder to render a template given as a string with a specified context (a Python dictionary).'
def render_string(self, template, context):
raise NotImplementedError('must be implemented in subclasses')
'Check whether a node represents an inline element.'
def is_inline(self, node):
return isinstance(node.parent, nodes.TextElement)
'line-block: * whitespace (including linebreaks) is significant * inline markup is supported. * serif typeface'
def visit_line_block(self, node):
self.body.append('{\\raggedright{}') self.literal_whitespace = 1
'The delimiter betweeen an option and its argument.'
def visit_option_argument(self, node):
self.body.append(node.get('delimiter', ' '))
'Remove all traces of a source file in the inventory.'
def clear_doc(self, docname):
if (docname in self.all_docs): self.all_docs.pop(docname, None) self.metadata.pop(docname, None) self.dependencies.pop(docname, None) self.titles.pop(docname, None) self.longtitles.pop(docname, None) self.tocs.pop(docname, None) self.toc_secnumbers.pop(docname...
'Return the filename for the document name. If base is True, return absolute path under self.srcdir. If base is None, return relative path to self.srcdir. If base is a path string, return absolute path under that. If suffix is not None, add it instead of config.source_suffix.'
def doc2path(self, docname, base=True, suffix=None):
suffix = (suffix or self.config.source_suffix) if (base is True): return (path.join(self.srcdir, docname.replace(SEP, path.sep)) + suffix) elif (base is None): return (docname.replace(SEP, path.sep) + suffix) else: return (path.join(base, docname.replace(SEP, path.sep)) + suffix)...
'Find all source files in the source dir and put them in self.found_docs.'
def find_files(self, config):
exclude_dirs = [d.replace(SEP, path.sep) for d in config.exclude_dirs] exclude_trees = [d.replace(SEP, path.sep) for d in config.exclude_trees] self.found_docs = set(get_matching_docs(self.srcdir, config.source_suffix, exclude_docs=set(config.unused_docs), exclude_dirs=exclude_dirs, exclude_trees=exclude_tr...
'Return (added, changed, removed) sets.'
def get_outdated_files(self, config_changed):
removed = (set(self.all_docs) - self.found_docs) added = set() changed = set() if config_changed: added = self.found_docs else: for docname in self.found_docs: if (docname not in self.all_docs): added.add(docname) continue if (n...
'(Re-)read all files new or changed since last update. Returns a summary, the total count of documents to reread and an iterator that yields docnames as it processes them. Store all environment docnames in the canonical format (ie using SEP as a separator in place of os.path.sep).'
def update(self, config, srcdir, doctreedir, app=None):
config_changed = False if (self.config is None): msg = '[new config] ' config_changed = True else: for (key, descr) in config.values.iteritems(): if (descr[1] != 'env'): continue if (self.config[key] != config[key]): msg =...
'Custom decoding error handler that warns and replaces.'
def warn_and_replace(self, error):
linestart = error.object.rfind('\n', 0, error.start) lineend = error.object.find('\n', error.start) if (lineend == (-1)): lineend = len(error.object) lineno = (error.object.count('\n', 0, error.start) + 1) self.warn(self.docname, ('undecodable source characters, replacing with ...
'Parse a file and add/update inventory entries for the doctree. If srcpath is given, read from a different source file.'
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
if app: app.emit('env-purge-doc', self, docname) self.clear_doc(docname) if (src_path is None): src_path = self.doc2path(docname) if self.config.default_role: (role_fn, messages) = roles.role(self.config.default_role, english, 0, dummy_reporter) if role_fn: ro...
'Filter system messages from a doctree.'
def filter_messages(self, doctree):
filterlevel = ((self.config.keep_warnings and 2) or 5) for node in doctree.traverse(nodes.system_message): if (node['level'] < filterlevel): node.parent.remove(node)
'Process docutils-generated dependency info.'
def process_dependencies(self, docname, doctree):
cwd = os.getcwd() frompath = path.join(path.normpath(self.srcdir), 'dummy') deps = doctree.settings.record_dependencies if (not deps): return for dep in deps.list: relpath = relative_path(frompath, path.normpath(path.join(cwd, dep))) self.dependencies.setdefault(docname, set(...
'Process downloadable file paths.'
def process_downloads(self, docname, doctree):
docdir = path.dirname(self.doc2path(docname, base=None)) for node in doctree.traverse(addnodes.download_reference): targetname = node['reftarget'] if (targetname.startswith('/') or targetname.startswith(os.sep)): filepath = targetname[1:] else: filepath = path.nor...
'Process and rewrite image URIs.'
def process_images(self, docname, doctree):
docdir = path.dirname(self.doc2path(docname, base=None)) for node in doctree.traverse(nodes.image): node['candidates'] = candidates = {} imguri = node['uri'] if (imguri.find('://') != (-1)): self.warn(docname, ('nonlocal image URI found: %s' % imguri), node.line) ...
'Process the docinfo part of the doctree as metadata.'
def process_metadata(self, docname, doctree):
self.metadata[docname] = md = {} try: docinfo = doctree[0] except IndexError: return if (docinfo.__class__ is not nodes.docinfo): return for node in docinfo: if (node.__class__ is nodes.author): md['author'] = node.astext() elif (node.__class__ is ...
'Add a title node to the document (just copy the first section title), and store that title in the environment.'
def create_title_from(self, docname, document):
titlenode = nodes.title() longtitlenode = titlenode if document.has_key('title'): longtitlenode = nodes.title() longtitlenode += nodes.Text(document['title']) for node in document.traverse(nodes.section): visitor = SphinxContentsFilter(document) node[0].walkabout(visitor)...
'Note a TOC tree directive in a document and gather information about file relations from it.'
def note_toctree(self, docname, toctreenode):
if toctreenode['glob']: self.glob_toctrees.add(docname) if toctreenode.get('numbered'): self.numbered_toctrees.add(docname) includefiles = toctreenode['includefiles'] for includefile in includefiles: self.files_to_rebuild.setdefault(includefile, set()).add(docname) self.toctr...
'Build a TOC from the doctree and store it in the inventory.'
def build_toc_from(self, docname, document):
numentries = [0] try: maxdepth = int(self.metadata[docname].get('tocdepth', 0)) except ValueError: maxdepth = 0 def traverse_in_section(node, cls): 'Like traverse(), but stay within the same section.' result = [] if isinstance(node, cls): ...
'Return a TOC nodetree -- for use on the same page only!'
def get_toc_for(self, docname):
toc = self.tocs[docname].deepcopy() for node in toc.traverse(nodes.reference): node['refuri'] = node['anchorname'] return toc
'Return the global TOC nodetree.'
def get_toctree_for(self, docname, builder, collapse):
doctree = self.get_doctree(self.config.master_doc) for toctreenode in doctree.traverse(addnodes.toctree): result = self.resolve_toctree(docname, builder, toctreenode, prune=True, collapse=collapse) if (result is not None): return result
'Read the doctree for a file from the pickle and return it.'
def get_doctree(self, docname):
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree') f = open(doctree_filename, 'rb') try: doctree = pickle.load(f) finally: f.close() doctree.settings.env = self doctree.reporter = Reporter(self.doc2path(docname), 2, 4, stream=WarningStream(self._warnfunc)) ...
'Read the doctree from the pickle, resolve cross-references and toctrees and return it.'
def get_and_resolve_doctree(self, docname, builder, doctree=None, prune_toctrees=True):
if (doctree is None): doctree = self.get_doctree(docname) self.resolve_references(doctree, docname, builder) for toctreenode in doctree.traverse(addnodes.toctree): result = self.resolve_toctree(docname, builder, toctreenode, prune=prune_toctrees) if (result is None): toct...
'Resolve a *toctree* node into individual bullet lists with titles as items, returning None (if no containing titles are found) or a new node. If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0, to the value of the *maxdepth* option on the *toctree* node. If *titles_only* is True, only toplevel docum...
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0, titles_only=False, collapse=False):
if toctree.get('hidden', False): return None def _walk_depth(node, depth, maxdepth): 'Utility: Cut a TOC at a specified depth.' for subnode in node.children[:]: if isinstance(subnode, (addnodes.compact_paragraph, nodes.list_item)): subnode...
'Assign a section number to each heading under a numbered toctree.'
def assign_section_numbers(self):
rewrite_needed = [] old_secnumbers = self.toc_secnumbers self.toc_secnumbers = {} def _walk_toc(node, secnums, titlenode=None): for subnode in node.children: if isinstance(subnode, nodes.bullet_list): numstack.append(0) _walk_toc(subnode, secnums, titl...
'Create the real index from the collected index entries.'
def create_index(self, builder, _fixre=re.compile('(.*) ([(][^()]*[)])')):
new = {} def add_entry(word, subword, dic=new): entry = dic.get(word) if (not entry): dic[word] = entry = [[], {}] if subword: add_entry(subword, '', dic=entry[1]) else: try: entry[0].append(((builder.get_relative_uri('genindex'...
'Do consistency checks.'
def check_consistency(self):
for docname in sorted(self.all_docs): if (docname not in self.files_to_rebuild): if (docname == self.config.master_doc): continue self.warn(docname, "document isn't included in any toctree")
'Find a description node matching "name", perhaps using the given module and/or classname.'
def find_desc(self, modname, classname, name, type, searchorder=0):
if (name[(-2):] == '()'): name = name[:(-2)] if (not name): return (None, None) if ((type[0] == 'c') and (type not in ('class', 'const'))): name = name.rstrip(' *') if ((name in self.descrefs) and (self.descrefs[name][1][0] == 'c')): return (name, self.descrefs...
'Find keyword matches for a keyword. If there\'s an exact match, just return it, else return a list of fuzzy matches if avoid_fuzzy isn\'t True. Keywords searched are: first modules, then descrefs. Returns: None if nothing found (type, docname, anchorname) if exact match found list of (quality, type, docname, anchornam...
def find_keyword(self, keyword, avoid_fuzzy=False, cutoff=0.6, n=20):
if (keyword in self.modules): (docname, title, system, deprecated) = self.modules[keyword] return ('module', docname, ('module-' + keyword)) if (keyword in self.descrefs): (docname, ref_type) = self.descrefs[keyword] return (ref_type, docname, keyword) if ('.' not in keyword)...
'Load necessary templates and perform initialization. The default implementation does nothing.'
def init(self):
pass
'Return the template bridge configured.'
def create_template_bridge(self):
if self.config.template_bridge: self.templates = self.app.import_object(self.config.template_bridge, 'template_bridge setting')() else: from sphinx.jinja2glue import BuiltinTemplateLoader self.templates = BuiltinTemplateLoader()
'Return the target URI for a document name (*typ* can be used to qualify the link characteristic for individual builders).'
def get_target_uri(self, docname, typ=None):
raise NotImplementedError
'Return a relative URI between two source filenames. May raise environment.NoUri if there\'s no way to return a sensible URI.'
def get_relative_uri(self, from_, to, typ=None):
return relative_uri(self.get_target_uri(from_), self.get_target_uri(to, typ))
'Return an iterable of output files that are outdated, or a string describing what an update build will build. If the builder does not output individual files corresponding to source files, return a string here. If it does, return an iterable of those files that need to be written.'
def get_outdated_docs(self):
raise NotImplementedError
'Pick the best candidate for all image URIs.'
def post_process_images(self, doctree):
for node in doctree.traverse(nodes.image): if ('?' in node['candidates']): continue if ('*' not in node['candidates']): for imgtype in self.supported_image_types: candidate = node['candidates'].get(imgtype, None) if candidate: ...
'Load translated strings from the configured localedirs if enabled in the configuration.'
def load_i18n(self):
self.translator = None if (self.config.language is not None): self.info(bold(('loading translations [%s]... ' % self.config.language)), nonl=True) locale_dirs = ([None, path.join(package_dir, 'locale')] + [path.join(self.srcdir, x) for x in self.config.locale_dirs]) for dir_ in ...
'Set up the build environment.'
def load_env(self):
if self.env: return if (not self.freshenv): try: self.info(bold('loading pickled environment... '), nonl=True) self.env = BuildEnvironment.frompickle(self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME)) self.info('done') except Except...
'Build all source files.'
def build_all(self):
self.build(None, summary='all source files', method='all')
'Only rebuild as much as needed for changes in the *filenames*.'
def build_specific(self, filenames):
dirlen = (len(self.srcdir) + 1) to_write = [] suffix = self.config.source_suffix for filename in filenames: filename = path.normpath(path.abspath(filename)) if (not filename.startswith(self.srcdir)): self.warn(('file %r given on command line is not und...
'Only rebuild what was changed or added since last build.'
def build_update(self):
to_build = self.get_outdated_docs() if isinstance(to_build, str): self.build(['__all__'], to_build) else: to_build = list(to_build) self.build(to_build, summary=('targets for %d source files that are out of date' % len(to_build)))
'Main build method. First updates the environment, and then calls :meth:`write`.'
def build(self, docnames, summary=None, method='update'):
if summary: self.info(bold(('building [%s]: ' % self.name)), nonl=1) self.info(summary) updated_docnames = set() warnings = [] self.env.set_warnfunc((lambda *args: warnings.append(args))) self.info(bold('updating environment: '), nonl=1) (msg, length, iterator) = self...
'Finish the building process. The default implementation does nothing.'
def finish(self):
pass
'Utility: Render a lone doctree node.'
def render_partial(self, node):
doc = new_document('<partial node>') doc.append(node) return publish_parts(doc, source_class=DocTreeInput, reader=DoctreeReader(), writer=HTMLWriter(self), settings_overrides={'output_encoding': 'unicode'})
'Collect items for the template context of a page.'
def get_doc_context(self, docname, body, metatags):
prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if (related and related[2]): try: next = {'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[relate...
'Pick the best candidate for an image and link down-scaled images to their high res version.'
def post_process_images(self, doctree):
Builder.post_process_images(self, doctree) for node in doctree.traverse(nodes.image): if ((not node.has_key('scale')) or isinstance(node.parent, nodes.reference)): continue uri = node['uri'] reference = nodes.reference() if (uri in self.images): reference[...
'Search all theme paths for available themes.'
@classmethod def init_themes(cls, builder):
cls.themepath = list(builder.config.html_theme_path) cls.themepath.append(path.join(package_dir, 'themes')) for themedir in cls.themepath[::(-1)]: themedir = path.join(builder.confdir, themedir) if (not path.isdir(themedir)): continue for theme in os.listdir(themedir): ...
'Return the value for a theme configuration setting, searching the base theme chain.'
def get_confstr(self, section, name, default=NODEFAULT):
try: return self.themeconf.get(section, name) except (ConfigParser.NoOptionError, ConfigParser.NoSectionError): if (self.base is not None): return self.base.get_confstr(section, name, default) if (default is NODEFAULT): raise ThemeError(('setting %s.%s occur...
'Return a dictionary of theme options and their values.'
def get_options(self, overrides):
chain = [self.themeconf] base = self.base while (base is not None): chain.append(base.themeconf) base = base.base options = {} for conf in reversed(chain): try: options.update(conf.items('options')) except ConfigParser.NoSectionError: pass ...
'Return a list of theme directories, beginning with this theme\'s, then the base theme\'s, then that one\'s base theme\'s, etc.'
def get_dirchain(self):
chain = [self.themedir] base = self.base while (base is not None): chain.append(base.themedir) base = base.base return chain
'Remove temporary directories.'
def cleanup(self):
if self.themedir_created: try: shutil.rmtree(self.themedir) except Exception: pass if self.base: self.base.cleanup()
'Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If no...
def __init__(self, grammar, convert=None):
self.grammar = grammar self.convert = (convert or (lambda grammar, node: node))
'Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar\'s start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (imp...
def setup(self, start=None):
if (start is None): start = self.grammar.start newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set()
'Add a token; return True iff this is the end of the program.'
def addtoken(self, type, value, context):
ilabel = self.classify(type, value, context) while True: (dfa, state, node) = self.stack[(-1)] (states, first) = dfa arcs = states[state] for (i, newstate) in arcs: (t, v) = self.grammar.labels[i] if (ilabel == i): assert (t < 256) ...
'Turn a token into a label. (Internal)'
def classify(self, type, value, context):
if (type == token.NAME): self.used_names.add(value) ilabel = self.grammar.keywords.get(value) if (ilabel is not None): return ilabel ilabel = self.grammar.tokens.get(type) if (ilabel is None): raise ParseError('bad token', type, value, context) return ilabe...
'Shift a token. (Internal)'
def shift(self, type, value, newstate, context):
(dfa, state, node) = self.stack[(-1)] newnode = (type, value, context, None) newnode = self.convert(self.grammar, newnode) if (newnode is not None): node[(-1)].append(newnode) self.stack[(-1)] = (dfa, newstate, node)
'Push a nonterminal. (Internal)'
def push(self, type, newdfa, newstate, context):
(dfa, state, node) = self.stack[(-1)] newnode = (type, None, context, []) self.stack[(-1)] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode))
'Pop a nonterminal. (Internal)'
def pop(self):
(popdfa, popstate, popnode) = self.stack.pop() newnode = self.convert(self.grammar, popnode) if (newnode is not None): if self.stack: (dfa, state, node) = self.stack[(-1)] node[(-1)].append(newnode) else: self.rootnode = newnode self.rootnode.u...
'Dump the grammar tables to a pickle file.'
def dump(self, filename):
f = open(filename, 'wb') pickle.dump(self.__dict__, f, 2) f.close()
'Load the grammar tables from a pickle file.'
def load(self, filename):
f = open(filename, 'rb') d = pickle.load(f) f.close() self.__dict__.update(d)
'Dump the grammar tables to standard output, for debugging.'
def report(self):
from pprint import pprint print 's2n' pprint(self.symbol2number) print 'n2s' pprint(self.number2symbol) print 'states' pprint(self.states) print 'dfas' pprint(self.dfas) print 'labels' pprint(self.labels) print 'start', self.start
'Parse a series of tokens and return the syntax tree.'
def parse_tokens(self, tokens, debug=False):
p = parse.Parser(self.grammar, self.convert) p.setup() lineno = 1 column = 0 type = value = start = end = line_text = None prefix = '' opmap = grammar.opmap for (type, value, start, end, line_text) in tokens: if (start != (lineno, column)): assert ((lineno, column) <=...
'Parse a stream and return the syntax tree.'
def parse_stream_raw(self, stream, debug=False):
tokens = tokenize.generate_tokens(stream.readline) return self.parse_tokens(tokens, debug)
'Parse a stream and return the syntax tree.'
def parse_stream(self, stream, debug=False):
return self.parse_stream_raw(stream, debug)
'Parse a file and return the syntax tree.'
def parse_file(self, filename, debug=False):
stream = open(filename) try: return self.parse_stream(stream, debug) finally: stream.close()
'Parse a string and return the syntax tree.'
def parse_string(self, text, debug=False):
tokens = tokenize.generate_tokens(generate_lines(text).next) return self.parse_tokens(tokens, debug)
'Visit an assignment which may have a special comment before it.'
def visit_expr_stmt(self, node):
if (_eq not in node.children): return pnode = node[0] prefix = pnode.get_prefix() while (not prefix): pnode = pnode.get_prev_leaf() if ((not pnode) or (pnode.type not in (token.INDENT, token.DEDENT))): break prefix = pnode.get_prefix() prefix = prefix.deco...