desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Special optimized matcher for bare_name.'
def _bare_name_matches(self, nodes):
count = 0 r = {} done = False max = len(nodes) while ((not done) and (count < max)): done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] ...
'Helper to recursively yield the matches.'
def _recursive_matches(self, nodes, count):
assert (self.content is not None) if (count >= self.min): (yield (0, {})) if (count < self.max): for alt in self.content: for (c0, r0) in generate_matches(alt, nodes): for (c1, r1) in self._recursive_matches(nodes[c0:], (count + 1)): r = {} ...
'Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively \'$\' in regex lingo). If it is not None, this matches whenever the argument pattern doesn\'t have any matches.'
def __init__(self, content=None):
if (content is not None): assert isinstance(content, BasePattern), repr(content) self.content = content
'Initializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to.'
def __init__(self, options, log):
self.options = options self.log = log self.compile_pattern()
'Compiles self.PATTERN into self.pattern. Subclass may override if it doesn\'t want to use self.{pattern,PATTERN} in .match().'
def compile_pattern(self):
if (self.PATTERN is not None): PC = PatternCompiler() (self.pattern, self.pattern_tree) = PC.compile_pattern(self.PATTERN, with_tree=True)
'Set the filename, and a logger derived from it. The main refactoring tool should call this.'
def set_filename(self, filename):
self.filename = filename self.logger = logging.getLogger(filename)
'Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override.'
def match(self, node):
results = {'node': node} return (self.pattern.match(node, results) and results)
'Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same ...
def transform(self, node, results):
raise NotImplementedError()
'Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers.'
def new_name(self, template=u'xxx_todo_changeme'):
name = template while (name in self.used_names): name = (template + unicode(self.numbers.next())) self.used_names.add(name) return name
'Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def cannot_convert(self, node, reason=None):
lineno = node.get_lineno() for_output = node.clone() for_output.prefix = u'' msg = 'Line %d: could not convert: %s' self.log_message((msg % (lineno, for_output))) if reason: self.log_message(reason)
'Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can\'t be converted.'
def warning(self, node, reason):
lineno = node.get_lineno() self.log_message(('Line %d: %s' % (lineno, reason)))
'Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def start_tree(self, tree, filename):
self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True
'Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from.'
def finish_tree(self, tree, filename):
pass
'Construct a SequenceMatcher. Optional arg isjunk is None (the default), or a one-argument function that takes a sequence element and returns true iff the element is junk. None is equivalent to passing "lambda x: 0", i.e. no elements are considered to be junk. For example, pass lambda x: x in " \t" if you\'re compari...
def __init__(self, isjunk=None, a='', b='', autojunk=True):
self.isjunk = isjunk self.a = self.b = None self.autojunk = autojunk self.set_seqs(a, b)
'Set the two sequences to be compared. >>> s = SequenceMatcher() >>> s.set_seqs("abcd", "bcde") >>> s.ratio() 0.75'
def set_seqs(self, a, b):
self.set_seq1(a) self.set_seq2(b)
'Set the first sequence to be compared. The second sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq1("bcde") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence ...
def set_seq1(self, a):
if (a is self.a): return self.a = a self.matching_blocks = self.opcodes = None
'Set the second sequence to be compared. The first sequence to be compared is not changed. >>> s = SequenceMatcher(None, "abcd", "bcde") >>> s.ratio() 0.75 >>> s.set_seq2("abcd") >>> s.ratio() 1.0 SequenceMatcher computes and caches detailed information about the second sequence, so if you want to compare one sequence ...
def set_seq2(self, b):
if (b is self.b): return self.b = b self.matching_blocks = self.opcodes = None self.fullbcount = None self.__chain_b()
'Find longest matching block in a[alo:ahi] and b[blo:bhi]. If isjunk is not defined: Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where alo <= i <= i+k <= ahi blo <= j <= j+k <= bhi and for all (i\',j\',k\') meeting those conditions, k >= k\' i <= i\' and if i == i\', j <= j\' In other words, of all maximal ...
def find_longest_match(self, alo, ahi, blo, bhi):
(a, b, b2j, isbjunk) = (self.a, self.b, self.b2j, self.isbjunk) (besti, bestj, bestsize) = (alo, blo, 0) j2len = {} nothing = [] for i in xrange(alo, ahi): j2lenget = j2len.get newj2len = {} for j in b2j.get(a[i], nothing): if (j < blo): continue ...
'Return list of triples describing matching subsequences. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in i and in j. New in Python 2.5, it\'s also guaranteed that if (i, j, n) and (i\', j\', n\') are adjacent triples in the list, and the second i...
def get_matching_blocks(self):
if (self.matching_blocks is not None): return self.matching_blocks (la, lb) = (len(self.a), len(self.b)) queue = [(0, la, 0, lb)] matching_blocks = [] while queue: (alo, ahi, blo, bhi) = queue.pop() (i, j, k) = x = self.find_longest_match(alo, ahi, blo, bhi) if k: ...
'Return list of 5-tuples describing how to turn a into b. Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the tuple preceding it, and likewise for j1 == the previous j2. The tags are strings, with these meanings: \'replace\': a[i1:i2] sho...
def get_opcodes(self):
if (self.opcodes is not None): return self.opcodes i = j = 0 self.opcodes = answer = [] for (ai, bj, size) in self.get_matching_blocks(): tag = '' if ((i < ai) and (j < bj)): tag = 'replace' elif (i < ai): tag = 'delete' elif (j < bj): ...
'Isolate change clusters by eliminating ranges with no changes. Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint >>> a = map(str, range(1,40)) >>> b = a[:] >>> b[8:8] = [\'i\'] # Make an insertion >>> b[20] += \'...
def get_grouped_opcodes(self, n=3):
codes = self.get_opcodes() if (not codes): codes = [('equal', 0, 1, 0, 1)] if (codes[0][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[0] codes[0] = (tag, max(i1, (i2 - n)), i2, max(j1, (j2 - n)), j2) if (codes[(-1)][0] == 'equal'): (tag, i1, i2, j1, j2) = codes[(-1)] ...
'Return a measure of the sequences\' similarity (float in [0,1]). Where T is the total number of elements in both sequences, and M is the number of matches, this is 2.0*M / T. Note that this is 1 if the sequences are identical, and 0 if they have nothing in common. .ratio() is expensive to compute if you haven\'t alrea...
def ratio(self):
matches = reduce((lambda sum, triple: (sum + triple[(-1)])), self.get_matching_blocks(), 0) return _calculate_ratio(matches, (len(self.a) + len(self.b)))
'Return an upper bound on ratio() relatively quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute.'
def quick_ratio(self):
if (self.fullbcount is None): self.fullbcount = fullbcount = {} for elt in self.b: fullbcount[elt] = (fullbcount.get(elt, 0) + 1) fullbcount = self.fullbcount avail = {} (availhas, matches) = (avail.__contains__, 0) for elt in self.a: if availhas(elt): ...
'Return an upper bound on ratio() very quickly. This isn\'t defined beyond that it is an upper bound on .ratio(), and is faster to compute than either .ratio() or .quick_ratio().'
def real_quick_ratio(self):
(la, lb) = (len(self.a), len(self.b)) return _calculate_ratio(min(la, lb), (la + lb))
'Construct a text differencer, with optional filters. The two optional keyword parameters are for filter functions: - `linejunk`: A function that should accept a single string argument, and return true iff the string is junk. The module-level function `IS_LINE_JUNK` may be used to filter out lines without visible chara...
def __init__(self, linejunk=None, charjunk=None):
self.linejunk = linejunk self.charjunk = charjunk
'Compare two sequences of lines; generate the resulting delta. Each sequence must contain individual single-line strings ending with newlines. Such sequences can be obtained from the `readlines()` method of file-like objects. The delta generated also consists of newline- terminated strings, ready to be printed as-is v...
def compare(self, a, b):
cruncher = SequenceMatcher(self.linejunk, a, b) for (tag, alo, ahi, blo, bhi) in cruncher.get_opcodes(): if (tag == 'replace'): g = self._fancy_replace(a, alo, ahi, b, blo, bhi) elif (tag == 'delete'): g = self._dump('-', a, alo, ahi) elif (tag == 'insert'): ...
'Generate comparison results for a same-tagged range.'
def _dump(self, tag, x, lo, hi):
for i in xrange(lo, hi): (yield ('%s %s' % (tag, x[i])))
'When replacing one block of lines with another, search the blocks for *similar* lines; the best-matching pair (if any) is used as a synch point, and intraline difference marking is done on the similar pair. Lots of work, but often worth it. Example: >>> d = Differ() >>> results = d._fancy_replace([\'abcDefghiJkl\n\'],...
def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
(best_ratio, cutoff) = (0.74, 0.75) cruncher = SequenceMatcher(self.charjunk) (eqi, eqj) = (None, None) for j in xrange(blo, bhi): bj = b[j] cruncher.set_seq2(bj) for i in xrange(alo, ahi): ai = a[i] if (ai == bj): if (eqi is None): ...
'Format "?" output and deal with leading tabs. Example: >>> d = Differ() >>> results = d._qformat(\'\tabcDefghiJkl\n\', \'\tabcdefGhijkl\n\', >>> for line in results: print repr(line) \'- \tabcDefghiJkl\n\' \'? \t ^ ^ ^\n\' \'+ \tabcdefGhijkl\n\' \'? \t ^ ^ ^\n\''
def _qformat(self, aline, bline, atags, btags):
common = min(_count_leading(aline, ' DCTB '), _count_leading(bline, ' DCTB ')) common = min(common, _count_leading(atags[:common], ' ')) common = min(common, _count_leading(btags[:common], ' ')) atags = atags[common:].rstrip() btags = btags[common:].rstrip() (yield ('- ' + aline)) i...
'HtmlDiff instance initializer Arguments: tabsize -- tab stop spacing, defaults to 8. wrapcolumn -- column number where lines are broken and wrapped, defaults to None where lines are not wrapped. linejunk,charjunk -- keyword arguments passed into ndiff() (used to by HtmlDiff() to generate the side by side HTML differen...
def __init__(self, tabsize=8, wrapcolumn=None, linejunk=None, charjunk=IS_CHARACTER_JUNK):
self._tabsize = tabsize self._wrapcolumn = wrapcolumn self._linejunk = linejunk self._charjunk = charjunk
'Returns HTML file of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full diff...
def make_file(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
return (self._file_template % dict(styles=self._styles, legend=self._legend, table=self.make_table(fromlines, tolines, fromdesc, todesc, context=context, numlines=numlines)))
'Returns from/to line lists with tabs expanded and newlines removed. Instead of tab characters being replaced by the number of spaces needed to fill in to the next tab stop, this function will fill the space with tab characters. This is done so that the difference algorithms can identify changes in a file when tabs ar...
def _tab_newline_replace(self, fromlines, tolines):
def expand_tabs(line): line = line.replace(' ', '\x00') line = line.expandtabs(self._tabsize) line = line.replace(' ', ' DCTB ') return line.replace('\x00', ' ').rstrip('\n') fromlines = [expand_tabs(line) for line in fromlines] tolines = [expand_tabs(line) for line ...
'Builds list of text lines by splitting text lines at wrap point This function will determine if the input text line needs to be wrapped (split) into separate lines. If so, the first wrap point will be determined and the first line appended to the output text line list. This function is used recursively to handle the...
def _split_line(self, data_list, line_num, text):
if (not line_num): data_list.append((line_num, text)) return size = len(text) max = self._wrapcolumn if ((size <= max) or ((size - (text.count('\x00') * 3)) <= max)): data_list.append((line_num, text)) return i = 0 n = 0 mark = '' while ((n < max) and (i <...
'Returns iterator that splits (wraps) mdiff text lines'
def _line_wrapper(self, diffs):
for (fromdata, todata, flag) in diffs: if (flag is None): (yield (fromdata, todata, flag)) continue ((fromline, fromtext), (toline, totext)) = (fromdata, todata) (fromlist, tolist) = ([], []) self._split_line(fromlist, fromline, fromtext) self._split_l...
'Collects mdiff output into separate lists Before storing the mdiff from/to data into a list, it is converted into a single line of text with HTML markup.'
def _collect_lines(self, diffs):
(fromlist, tolist, flaglist) = ([], [], []) for (fromdata, todata, flag) in diffs: try: fromlist.append(self._format_line(0, flag, *fromdata)) tolist.append(self._format_line(1, flag, *todata)) except TypeError: fromlist.append(None) tolist.append(...
'Returns HTML markup of "from" / "to" text lines side -- 0 or 1 indicating "from" or "to" text flag -- indicates if difference on line linenum -- line number (used for line number column) text -- line text to be marked up'
def _format_line(self, side, flag, linenum, text):
try: linenum = ('%d' % linenum) id = (' id="%s%s"' % (self._prefix[side], linenum)) except TypeError: id = '' text = text.replace('&', '&amp;').replace('>', '&gt;').replace('<', '&lt;') text = text.replace(' ', '&nbsp;').rstrip() return ('<td class="diff_header"%s>%s...
'Create unique anchor prefixes'
def _make_prefix(self):
fromprefix = ('from%d_' % HtmlDiff._default_prefix) toprefix = ('to%d_' % HtmlDiff._default_prefix) HtmlDiff._default_prefix += 1 self._prefix = [fromprefix, toprefix]
'Makes list of "next" links'
def _convert_flags(self, fromlist, tolist, flaglist, context, numlines):
toprefix = self._prefix[1] next_id = ([''] * len(flaglist)) next_href = ([''] * len(flaglist)) (num_chg, in_change) = (0, False) last = 0 for (i, flag) in enumerate(flaglist): if flag: if (not in_change): in_change = True last = i ...
'Returns HTML table of side by side comparison with change highlights Arguments: fromlines -- list of "from" lines tolines -- list of "to" lines fromdesc -- "from" file column header string todesc -- "to" file column header string context -- set to True for contextual differences (defaults to False which shows full dif...
def make_table(self, fromlines, tolines, fromdesc='', todesc='', context=False, numlines=5):
self._make_prefix() (fromlines, tolines) = self._tab_newline_replace(fromlines, tolines) if context: context_lines = numlines else: context_lines = None diffs = _mdiff(fromlines, tolines, context_lines, linejunk=self._linejunk, charjunk=self._charjunk) if self._wrapcolumn: ...
'This is an abstract class.'
def __init__(self):
if (self.__class__ is BaseSet): raise TypeError, 'BaseSet is an abstract class. Use Set or ImmutableSet.'
'Return the number of elements of a set.'
def __len__(self):
return len(self._data)
'Return string representation of a set. This looks like \'Set([<list of elements>])\'.'
def __repr__(self):
return self._repr()
'Return an iterator over the elements or a set. This is the keys iterator for the underlying dict.'
def __iter__(self):
return self._data.iterkeys()
'Return a shallow copy of a set.'
def copy(self):
result = self.__class__() result._data.update(self._data) return result
'Return a deep copy of a set; used by copy module.'
def __deepcopy__(self, memo):
from copy import deepcopy result = self.__class__() memo[id(self)] = result data = result._data value = True for elt in self: data[deepcopy(elt, memo)] = value return result
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def __or__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.union(other)
'Return the union of two sets as a new set. (I.e. all elements that are in either set.)'
def union(self, other):
result = self.__class__(self) result._update(other) return result
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def __and__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.intersection(other)
'Return the intersection of two sets as a new set. (I.e. all elements that are in both sets.)'
def intersection(self, other):
if (not isinstance(other, BaseSet)): other = Set(other) if (len(self) <= len(other)): (little, big) = (self, other) else: (little, big) = (other, self) common = ifilter(big._data.__contains__, little) return self.__class__(common)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def __xor__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.symmetric_difference(other)
'Return the symmetric difference of two sets as a new set. (I.e. all elements that are in exactly one of the sets.)'
def symmetric_difference(self, other):
result = self.__class__() data = result._data value = True selfdata = self._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data for elt in ifilterfalse(otherdata.__contains__, selfdata): data[elt] = value for elt in ifilterfalse(s...
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def __sub__(self, other):
if (not isinstance(other, BaseSet)): return NotImplemented return self.difference(other)
'Return the difference of two sets as a new Set. (I.e. all elements that are in this set and not in the other.)'
def difference(self, other):
result = self.__class__() data = result._data try: otherdata = other._data except AttributeError: otherdata = Set(other)._data value = True for elt in ifilterfalse(otherdata.__contains__, self): data[elt] = value return result
'Report whether an element is a member of a set. (Called in response to the expression `element in self\'.)'
def __contains__(self, element):
try: return (element in self._data) except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise return (transform() in self._data)
'Report whether another set contains this set.'
def issubset(self, other):
self._binary_sanity_check(other) if (len(self) > len(other)): return False for elt in ifilterfalse(other._data.__contains__, self): return False return True
'Report whether this set contains another set.'
def issuperset(self, other):
self._binary_sanity_check(other) if (len(self) < len(other)): return False for elt in ifilterfalse(self._data.__contains__, other): return False return True
'Construct an immutable set from an optional iterable.'
def __init__(self, iterable=None):
self._hashcode = None self._data = {} if (iterable is not None): self._update(iterable)
'Construct a set from an optional iterable.'
def __init__(self, iterable=None):
self._data = {} if (iterable is not None): self._update(iterable)
'Update a set with the union of itself and another.'
def __ior__(self, other):
self._binary_sanity_check(other) self._data.update(other._data) return self
'Update a set with the union of itself and another.'
def union_update(self, other):
self._update(other)
'Update a set with the intersection of itself and another.'
def __iand__(self, other):
self._binary_sanity_check(other) self._data = (self & other)._data return self
'Update a set with the intersection of itself and another.'
def intersection_update(self, other):
if isinstance(other, BaseSet): self &= other else: self._data = self.intersection(other)._data
'Update a set with the symmetric difference of itself and another.'
def __ixor__(self, other):
self._binary_sanity_check(other) self.symmetric_difference_update(other) return self
'Update a set with the symmetric difference of itself and another.'
def symmetric_difference_update(self, other):
data = self._data value = True if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in other: if (elt in data): del data[elt] else: data[elt] = value
'Remove all elements of another set from this set.'
def __isub__(self, other):
self._binary_sanity_check(other) self.difference_update(other) return self
'Remove all elements of another set from this set.'
def difference_update(self, other):
data = self._data if (not isinstance(other, BaseSet)): other = Set(other) if (self is other): self.clear() for elt in ifilter(data.__contains__, other): del data[elt]
'Add all values from an iterable (such as a list or file).'
def update(self, iterable):
self._update(iterable)
'Remove all elements from this set.'
def clear(self):
self._data.clear()
'Add an element to a set. This has no effect if the element is already present.'
def add(self, element):
try: self._data[element] = True except TypeError: transform = getattr(element, '__as_immutable__', None) if (transform is None): raise self._data[transform()] = True
'Remove an element from a set; it must be a member. If the element is not a member, raise a KeyError.'
def remove(self, element):
try: del self._data[element] except TypeError: transform = getattr(element, '__as_temporarily_immutable__', None) if (transform is None): raise del self._data[transform()]
'Remove an element from a set if it is a member. If the element is not a member, do nothing.'
def discard(self, element):
try: self.remove(element) except KeyError: pass
'Remove and return an arbitrary set element.'
def pop(self):
return self._data.popitem()[0]
'Override server_bind to store the server name.'
def server_bind(self):
HTTPServer.server_bind(self) self.setup_environ()
'Handle a single HTTP request'
def handle(self):
self.raw_requestline = self.rfile.readline(65537) if (len(self.raw_requestline) > 65536): self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if (not self.parse_request()): return handler = ServerHandler(self.rfile...
'Return the total number of headers, including duplicates.'
def __len__(self):
return len(self._headers)
'Set the value of a header.'
def __setitem__(self, name, val):
del self[name] self._headers.append((name, val))
'Delete all occurrences of a header, if present. Does *not* raise an exception if the header is missing.'
def __delitem__(self, name):
name = name.lower() self._headers[:] = [kv for kv in self._headers if (kv[0].lower() != name)]
'Get the first header value for \'name\' Return None if the header is missing instead of raising an exception. Note that if the header appeared multiple times, the first exactly which occurrence gets returned is undefined. Use getall() to get all the values matching a header field name.'
def __getitem__(self, name):
return self.get(name)
'Return true if the message contains the header.'
def has_key(self, name):
return (self.get(name) is not None)
'Return a list of all the values for the named field. These will be sorted in the order they appeared in the original header list or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. If no fields exist with the given name, returns an emp...
def get_all(self, name):
name = name.lower() return [kv[1] for kv in self._headers if (kv[0].lower() == name)]
'Get the first header value for \'name\', or return \'default\''
def get(self, name, default=None):
name = name.lower() for (k, v) in self._headers: if (k.lower() == name): return v return default
'Return a list of all the header field names. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def keys(self):
return [k for (k, v) in self._headers]
'Return a list of all header values. These will be sorted in the order they appeared in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def values(self):
return [v for (k, v) in self._headers]
'Get all the header fields and values. These will be sorted in the order they were in the original header list, or were added to this instance, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.'
def items(self):
return self._headers[:]
'str() returns the formatted headers, complete with end line, suitable for direct HTTP transmission.'
def __str__(self):
return '\r\n'.join(([('%s: %s' % kv) for kv in self._headers] + ['', '']))
'Return first matching header value for \'name\', or \'value\' If there is no header named \'name\', add a new header with name \'name\' and value \'value\'.'
def setdefault(self, name, value):
result = self.get(name) if (result is None): self._headers.append((name, value)) return value else: return result
'Extended header setting. _name is the header field to add. keyword arguments can be used to set additional parameters for the header field, with underscores converted to dashes. Normally the parameter will be added as key="value" unless value is None, in which case only the key will be added. Example: h.add_header(\...
def add_header(self, _name, _value, **_params):
parts = [] if (_value is not None): parts.append(_value) for (k, v) in _params.items(): if (v is None): parts.append(k.replace('_', '-')) else: parts.append(_formatparam(k.replace('_', '-'), v)) self._headers.append((_name, '; '.join(parts)))
'Invoke the application'
def run(self, application):
try: self.setup_environ() self.result = application(self.environ, self.start_response) self.finish_response() except: try: self.handle_error() except: self.close() raise
'Set up the environment for one request'
def setup_environ(self):
env = self.environ = self.os_environ.copy() self.add_cgi_vars() env['wsgi.input'] = self.get_stdin() env['wsgi.errors'] = self.get_stderr() env['wsgi.version'] = self.wsgi_version env['wsgi.run_once'] = self.wsgi_run_once env['wsgi.url_scheme'] = self.get_scheme() env['wsgi.multithread']...
'Send any iterable data, then close self and the iterable Subclasses intended for use in asynchronous servers will want to redefine this method, such that it sets up callbacks in the event loop to iterate over the data, and to call \'self.close()\' once the response is finished.'
def finish_response(self):
try: if ((not self.result_is_file()) or (not self.sendfile())): for data in self.result: self.write(data) self.finish_content() finally: self.close()
'Return the URL scheme being used'
def get_scheme(self):
return guess_scheme(self.environ)
'Compute Content-Length or switch to chunked encoding if possible'
def set_content_length(self):
try: blocks = len(self.result) except (TypeError, AttributeError, NotImplementedError): pass else: if (blocks == 1): self.headers['Content-Length'] = str(self.bytes_sent) return
'Make any necessary header changes or defaults Subclasses can extend this to add other defaults.'
def cleanup_headers(self):
if ('Content-Length' not in self.headers): self.set_content_length()
'\'start_response()\' callable as specified by PEP 333'
def start_response(self, status, headers, exc_info=None):
if exc_info: try: if self.headers_sent: raise exc_info[0], exc_info[1], exc_info[2] finally: exc_info = None elif (self.headers is not None): raise AssertionError('Headers already set!') assert (type(status) is StringType), 'Status mus...
'Transmit version/status/date/server, via self._write()'
def send_preamble(self):
if self.origin_server: if self.client_is_modern(): self._write(('HTTP/%s %s\r\n' % (self.http_version, self.status))) if ('Date' not in self.headers): self._write(('Date: %s\r\n' % format_date_time(time.time()))) if (self.server_software and ('Server...
'\'write()\' callable as specified by PEP 333'
def write(self, data):
assert (type(data) is StringType), 'write() argument must be string' if (not self.status): raise AssertionError('write() before start_response()') elif (not self.headers_sent): self.bytes_sent = len(data) self.send_headers() else: self.bytes_sent += len(...
'Platform-specific file transmission Override this method in subclasses to support platform-specific file transmission. It is only called if the application\'s return iterable (\'self.result\') is an instance of \'self.wsgi_file_wrapper\'. This method should return a true value if it was able to actually transmit the ...
def sendfile(self):
return False
'Ensure headers and content have both been sent'
def finish_content(self):
if (not self.headers_sent): self.headers.setdefault('Content-Length', '0') self.send_headers() else: pass
'Close the iterable (if needed) and reset all instance vars Subclasses may want to also drop the client connection.'
def close(self):
try: if hasattr(self.result, 'close'): self.result.close() finally: self.result = self.headers = self.status = self.environ = None self.bytes_sent = 0 self.headers_sent = False
'Transmit headers to the client, via self._write()'
def send_headers(self):
self.cleanup_headers() self.headers_sent = True if ((not self.origin_server) or self.client_is_modern()): self.send_preamble() self._write(str(self.headers))