desc stringlengths 3 26.7k | decl stringlengths 11 7.89k | bodies stringlengths 8 553k |
|---|---|---|
'Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.'
| def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs):
| if (expected_regexp is not None):
expected_regexp = re.compile(expected_regexp)
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if (callable_obj is None):
return context
with context:
callable_obj(*args, **kwargs)
|
'Fail the test unless the text matches the regular expression.'
| def assertRegexpMatches(self, text, expected_regexp, msg=None):
| if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if (not expected_regexp.search(text)):
msg = (msg or "Regexp didn't match")
msg = ('%s: %r not found in %r' % (msg, expected_regexp.pattern, text))
raise self.failureExc... |
'Fail the test if the text matches the regular expression.'
| def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
| if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = (msg or 'Regexp matched')
msg = ('%s: %r matches %r in %r' % (msg, text[match.start():match.end()], unexpected_rege... |
'Called when the given test is about to be run'
| def startTest(self, test):
| self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
|
'Called when the given test has been run'
| def stopTest(self, test):
| self._restoreStdout()
self._mirrorOutput = False
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addError(self, test, err):
| self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addFailure(self, test, err):
| self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when a test has completed successfully'
| def addSuccess(self, test):
| pass
|
'Called when a test is skipped.'
| def addSkip(self, test, reason):
| self.skipped.append((test, reason))
|
'Called when an expected failure/error occurred.'
| def addExpectedFailure(self, test, err):
| self.expectedFailures.append((test, self._exc_info_to_string(err, test)))
|
'Called when a test was expected to fail, but succeed.'
| @failfast
def addUnexpectedSuccess(self, test):
| self.unexpectedSuccesses.append(test)
|
'Tells whether or not this result was a success'
| def wasSuccessful(self):
| return (len(self.failures) == len(self.errors) == 0)
|
'Indicates that the tests should be aborted'
| def stop(self):
| self.shouldStop = True
|
'Converts a sys.exc_info()-style tuple of values into a string.'
| def _exc_info_to_string(self, err, test):
| (exctype, value, tb) = err
while (tb and self._is_relevant_tb_level(tb)):
tb = tb.tb_next
if (exctype is test.failureException):
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format... |
'Run the given test case or test suite.'
| def run(self, test):
| result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if (startTestRun is not None):
startTestRun()
try:
test(result)
finally:
... |
'Return a suite of all tests cases contained in testCaseClass'
| def loadTestsFromTestCase(self, testCaseClass):
| if issubclass(testCaseClass, suite.TestSuite):
raise TypeError('Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?')
testCaseNames = self.getTestCaseNames(testCaseClass)
if ((not testCaseNames) and hasattr(testCaseClass... |
'Return a suite of all tests cases contained in the given module'
| def loadTestsFromModule(self, module, use_load_tests=True):
| tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, case.TestCase)):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if (use_load_tests ... |
'Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.'
| def loadTestsFromName(self, name, module=None):
| parts = name.split('.')
if (module is None):
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[(-1)]
if (not parts_copy):
... |
'Return a suite of all tests cases found using the given sequence
of string specifiers. See \'loadTestsFromName()\'.'
| def loadTestsFromNames(self, names, module=None):
| suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
|
'Return a sorted sequence of method names found within testCaseClass'
| def getTestCaseNames(self, testCaseClass):
| def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
return (attrname.startswith(prefix) and hasattr(getattr(testCaseClass, attrname), '__call__'))
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_Cmp... |
'Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top leve... | def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
| set_implicit_top = False
if ((top_level_dir is None) and (self._top_level_dir is not None)):
top_level_dir = self._top_level_dir
elif (top_level_dir is None):
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if (not (top_level_d... |
'Used by discovery. Yields test suites it loads.'
| def _find_tests(self, start_dir, pattern):
| paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if (not VALID_MODULE_NAME.match(path)):
continue
if (not self._match_path(path, full_path, pattern)):
continue
... |
'Tests shortDescription() for a method with a docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithOneLineDocstring(self):
| self.assertEqual(self.shortDescription(), 'Tests shortDescription() for a method with a docstring.')
|
'Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithMultiLineDocstring(self):
| self.assertEqual(self.shortDescription(), 'Tests shortDescription() for a method with a longer docstring.')
|
'Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.'
| def testSynonymAssertMethodNames(self):
| self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
|
'Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.'
| def testPendingDeprecationMethodNames(self):
| with test_support.check_warnings():
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, (lambda _: (3.14 + u'spam')))
self.failIf(False)
|
'Tests getDescription() for a method with a docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithOneLineDocstring(self):
| result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), (('testGetDescriptionWithOneLineDocstring (' + __name__) + '.Test_TestResult)\nTests getDescription() for a method with a docstring.'))
|
'Tests getDescription() for a method with a longer docstring.
The second line of the docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithMultiLineDocstring(self):
| result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), (('testGetDescriptionWithMultiLineDocstring (' + __name__) + '.Test_TestResult)\nTests getDescription() for a method with a longer docstring.'))
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| for test in self:
test.debug()
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| debug = _DebugResult()
self.run(debug, True)
|
'A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.'
| def next(self):
| _complain_ifclosed(self.closed)
r = self.readline()
if (not r):
raise StopIteration
return r
|
'Free the memory buffer.'
| def close(self):
| if (not self.closed):
self.closed = True
del self.buf, self.pos
|
'Returns False because StringIO objects are not connected to a
tty-like device.'
| def isatty(self):
| _complain_ifclosed(self.closed)
return False
|
'Set the file\'s current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file\'s end).
There is no return value.'
| def seek(self, pos, mode=0):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if (mode == 1):
pos += self.pos
elif (mode == 2):
pos += self.len
self.pos = max(0, pos)
|
'Return the file\'s current position.'
| def tell(self):
| _complain_ifclosed(self.closed)
return self.pos
|
'Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.'
| def read(self, n=(-1)):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if ((n is None) or (n < 0)):
newpos = self.len
else:
newpos = min((self.pos + n), self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
|
'Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is r... | def readline(self, length=None):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if (i < 0):
newpos = self.len
else:
newpos = (i + 1)
if ((length is not None) and (length >= 0)):
if ((self.pos + length)... |
'Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).'
| def readlines(self, sizehint=0):
| total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if (0 < sizehint <= total):
break
line = self.readline()
return lines
|
'Truncate the file\'s size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file\'s current size, the
file remain... | def truncate(self, size=None):
| _complain_ifclosed(self.closed)
if (size is None):
size = self.pos
elif (size < 0):
raise IOError(EINVAL, 'Negative size not allowed')
elif (size < self.pos):
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
|
'Write a string to the file.
There is no return value.'
| def write(self, s):
| _complain_ifclosed(self.closed)
if (not s):
return
if (not isinstance(s, basestring)):
s = str(s)
spos = self.pos
slen = self.len
if (spos == slen):
self.buflist.append(s)
self.len = self.pos = (spos + len(s))
return
if (spos > slen):
self.bufl... |
'Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)'
| def writelines(self, iterable):
| write = self.write
for line in iterable:
write(line)
|
'Flush the internal buffer'
| def flush(self):
| _complain_ifclosed(self.closed)
|
'Retrieve the entire contents of the "file" at any time before
the StringIO object\'s close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cau... | def getvalue(self):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
|
'real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie\'s value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.'
| def value_decode(self, val):
| return (val, val)
|
'real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie\'s value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.'
| def value_encode(self, val):
| strval = str(val)
return (strval, strval)
|
'Private method for setting a cookie\'s value'
| def __set(self, key, real_value, coded_value):
| M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
|
'Dictionary style assignment.'
| def __setitem__(self, key, value):
| if isinstance(value, Morsel):
dict.__setitem__(self, key, value)
else:
(rval, cval) = self.value_encode(value)
self.__set(key, rval, cval)
|
'Return a string suitable for HTTP.'
| def output(self, attrs=None, header='Set-Cookie:', sep='\r\n'):
| result = []
items = self.items()
items.sort()
for (K, V) in items:
result.append(V.output(attrs, header))
return sep.join(result)
|
'Return a string suitable for JavaScript.'
| def js_output(self, attrs=None):
| result = []
items = self.items()
items.sort()
for (K, V) in items:
result.append(V.js_output(attrs))
return _nulljoin(result)
|
'Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary \'d\'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())'
| def load(self, rawdata):
| if (type(rawdata) == type('')):
self.__ParseString(rawdata)
else:
for (k, v) in rawdata.items():
self[k] = v
return
|
'Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab owne... | def acquire(self, blocking=1):
| me = _get_ident()
if (self.__owner == me):
self.__count = (self.__count + 1)
if __debug__:
self._note('%s.acquire(%s): recursive success', self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
... |
'Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the... | def release(self):
| if (self.__owner != _get_ident()):
raise RuntimeError('cannot release un-acquired lock')
self.__count = count = (self.__count - 1)
if (not count):
self.__owner = None
self.__block.release()
if __debug__:
self._note('%s.release(): final release', sel... |
'Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notifyAll() call for the same condition
variable in another thread, or unt... | def wait(self, timeout=None):
| if (not self._is_owned()):
raise RuntimeError('cannot wait on un-acquired lock')
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try:
if (timeout is None):
waiter.acquire()
if __debug_... |
'Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.'
| def notify(self, n=1):
| if (not self._is_owned()):
raise RuntimeError('cannot notify on un-acquired lock')
__waiters = self.__waiters
waiters = __waiters[:n]
if (not waiters):
if __debug__:
self._note('%s.notify(): no waiters', self)
return
self._note('%s.notify(): n... |
'Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.'
| def notifyAll(self):
| self.notify(len(self.__waiters))
|
'Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done ... | def acquire(self, blocking=1):
| rc = False
with self.__cond:
while (self.__value == 0):
if (not blocking):
break
if __debug__:
self._note('%s.acquire(%s): blocked waiting, value=%s', self, blocking, self.__value)
self.__cond.wait()
else:
s... |
'Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.'
| def release(self):
| with self.__cond:
self.__value = (self.__value + 1)
if __debug__:
self._note('%s.release: success, value=%s', self, self.__value)
self.__cond.notify()
|
'Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.'
| def release(self):
| with self._Semaphore__cond:
if (self._Semaphore__value >= self._initial_value):
raise ValueError('Semaphore released too many times')
self._Semaphore__value += 1
self._Semaphore__cond.notify()
|
'Return true if and only if the internal flag is true.'
| def isSet(self):
| return self.__flag
|
'Set the internal flag to true.
All threads waiting for the flag to become true are awakened. Threads
that call wait() once the flag is true will not block at all.'
| def set(self):
| with self.__cond:
self.__flag = True
self.__cond.notify_all()
|
'Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.'
| def clear(self):
| with self.__cond:
self.__flag = False
|
'Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout... | def wait(self, timeout=None):
| with self.__cond:
if (not self.__flag):
self.__cond.wait(timeout)
return self.__flag
|
'This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By de... | def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
| assert (group is None), 'group argument must be None for now'
_Verbose.__init__(self, verbose)
if (kwargs is None):
kwargs = {}
self.__target = target
self.__name = str((name or _newname()))
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_d... |
'Start the thread\'s activity.
It must be called at most once per thread object. It arranges for the
object\'s run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.'
| def start(self):
| if (not self.__initialized):
raise RuntimeError('thread.__init__() not called')
if self.__started.is_set():
raise RuntimeError('threads can only be started once')
if __debug__:
self._note('%s.start(): starting thread', self)
with _active_limbo_lock:
... |
'Method representing the thread\'s activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object\'s constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.'
| def run(self):
| try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
del self.__target, self.__args, self.__kwargs
|
'Remove current thread from the dict of currently running threads.'
| def __delete(self):
| try:
with _active_limbo_lock:
del _active[_get_ident()]
except KeyError:
if ('dummy_threading' not in _sys.modules):
raise
|
'Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a time... | def join(self, timeout=None):
| if (not self.__initialized):
raise RuntimeError('Thread.__init__() not called')
if (not self.__started.is_set()):
raise RuntimeError('cannot join thread before it is started')
if (self is current_thread()):
raise RuntimeError('cannot join current thre... |
'A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.'
| @property
def name(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__name
|
'Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.'
| @property
def ident(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__ident
|
'Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.'
| def isAlive(self):
| assert self.__initialized, 'Thread.__init__() not called'
return (self.__started.is_set() and (not self.__stopped))
|
'A boolean value indicating whether this thread is a daemon thread (True) or not (False).
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread defa... | @property
def daemon(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__daemonic
|
'Stop the timer if it hasn\'t finished yet'
| def cancel(self):
| self.finished.set()
|
'_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".'
| def _munge_whitespace(self, text):
| if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, _unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
|
'_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
\'Look,\', \' \', \'goof-\', \'ball\', \' \', \'--\', \' \',
\... | def _split(self, text):
| if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
elif self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = fil... |
'_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in \'chunks\'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.'
| def _fix_sentence_endings(self, chunks):
| i = 0
patsearch = self.sentence_end_re.search
while (i < (len(chunks) - 1)):
if ((chunks[(i + 1)] == ' ') and patsearch(chunks[i])):
chunks[(i + 1)] = ' '
i += 2
else:
i += 1
|
'_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.'
| def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
| if (width < 1):
space_left = 1
else:
space_left = (width - cur_len)
if self.break_long_words:
cur_line.append(reversed_chunks[(-1)][:space_left])
reversed_chunks[(-1)] = reversed_chunks[(-1)][space_left:]
elif (not cur_line):
cur_line.append(reversed_chunks.pop())... |
'_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length \'self.width\' or less. (If \'break_long_words\' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo \'break... | def _wrap_chunks(self, chunks):
| lines = []
if (self.width <= 0):
raise ValueError(('invalid width %r (must be > 0)' % self.width))
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.init... |
'wrap(text : string) -> [string]
Reformat the single paragraph in \'text\' so it fits in lines of
no more than \'self.width\' columns, and return a list of wrapped
lines. Tabs in \'text\' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.'
| def wrap(self, text):
| text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
|
'fill(text : string) -> string
Reformat the single paragraph in \'text\' to fit in lines of no
more than \'self.width\' columns, and return a new string
containing the entire wrapped paragraph.'
| def fill(self, text):
| return '\n'.join(self.wrap(text))
|
'Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names.'
| def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
| escape = (escape or self.escape)
results = []
here = 0
pattern = re.compile('\\b((http|ftp)://\\S+[\\w/]|RFC[- ]?(\\d+)|PEP[- ]?(\\d+)|(self\\.)?((?:\\w|\\.)+))\\b')
while 1:
match = pattern.search(text, here)
if (not match):
break
(start, end) = match.span(... |
'Produce HTML documentation for a function or method object.'
| def docroutine(self, object, name, mod=None, funcs={}, classes={}, methods={}, cl=None):
| anchor = ((((cl and cl.__name__) or '') + '-') + name)
note = ''
title = ('<a name="%s"><strong>%s</strong></a>' % (self.escape(anchor), self.escape(name)))
if inspect.ismethod(object):
(args, varargs, varkw, defaults) = inspect.getargspec(object.im_func)
argspec = inspect.formatargsp... |
'Produce HTML documentation for an XML-RPC server.'
| def docserver(self, server_name, package_documentation, methods):
| fdict = {}
for (key, value) in methods.items():
fdict[key] = ('#-' + key)
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = ('<big><big><strong>%s</strong></big></big>' % server_name)
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(packag... |
'Set the HTML title of the generated server documentation'
| def set_server_title(self, server_title):
| self.server_title = server_title
|
'Set the name of the generated HTML server documentation'
| def set_server_name(self, server_name):
| self.server_name = server_name
|
'Set the documentation string for the entire server.'
| def set_server_documentation(self, server_documentation):
| self.server_documentation = server_documentation
|
'generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide ... | def generate_html_documentation(self):
| methods = {}
for method_name in self.system_listMethods():
if (method_name in self.funcs):
method = self.funcs[method_name]
elif (self.instance is not None):
method_info = [None, None]
if hasattr(self.instance, '_get_method_argstring'):
method_... |
'Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.'
| def do_GET(self):
| if (not self.is_rpc_path_valid()):
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(re... |
'Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.'
| def handle_get(self):
| response = self.generate_html_documentation()
print 'Content-Type: text/html'
print ('Content-Length: %d' % len(response))
print
sys.stdout.write(response)
|
'Read at least wtd bytes (or until EOF)'
| def read(self, totalwtd):
| decdata = ''
wtd = totalwtd
while (wtd > 0):
if self.eof:
return decdata
wtd = (((wtd + 2) // 3) * 4)
data = self.ifp.read(wtd)
while 1:
try:
(decdatacur, self.eof) = binascii.a2b_hqx(data)
break
except binas... |
'Initialize and reset this instance.'
| def __init__(self, verbose=0):
| self.verbose = verbose
self.reset()
|
'Reset this instance. Loses all unprocessed data.'
| def reset(self):
| self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
|
'Enter literal mode (CDATA) till EOF.
Intended for derived classes only.'
| def setnomoretags(self):
| self.nomoretags = self.literal = 1
|
'Enter literal mode (CDATA).
Intended for derived classes only.'
| def setliteral(self, *args):
| self.literal = 1
|
'Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include \'
\'). (This just saves the text,
all the processing is done by goahead().)'
| def feed(self, data):
| self.rawdata = (self.rawdata + data)
self.goahead(0)
|
'Handle the remaining data.'
| def close(self):
| self.goahead(1)
|
'Convert character reference, may be overridden.'
| def convert_charref(self, name):
| try:
n = int(name)
except ValueError:
return
if (not (0 <= n <= 127)):
return
return self.convert_codepoint(n)
|
'Handle character reference, no need to override.'
| def handle_charref(self, name):
| replacement = self.convert_charref(name)
if (replacement is None):
self.unknown_charref(name)
else:
self.handle_data(replacement)
|
'Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.'
| def convert_entityref(self, name):
| table = self.entitydefs
if (name in table):
return table[name]
else:
return
|
'Handle entity references, no need to override.'
| def handle_entityref(self, name):
| replacement = self.convert_entityref(name)
if (replacement is None):
self.unknown_entityref(name)
else:
self.handle_data(replacement)
|
'Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>\' in front of
them.
Optional maxheaderlen specifies... | def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
| self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.