Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _capture_snapshot(a_snapshot: Snapshot, resolved_kwargs: Mapping[str, Any]) -> Any:
"""
Capture the snapshot from the keyword arguments resolved before the function call (including the default values).
:param a_snapshot: snapshot to be captured
:param resolved_kwargs: resolved keyword arguments (in... |
def decorate_with_checker(func: CallableT) -> CallableT:
"""Decorate the function with a checker that verifies the preconditions and postconditions."""
assert not hasattr(func, "__preconditions__"), \
"Expected func to have no list of preconditions (there should be only a single contract checker per fun... |
def _find_self(param_names: List[str], args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any:
"""Find the instance of ``self`` in the arguments."""
instance_i = param_names.index("self")
if instance_i < len(args):
instance = args[instance_i]
else:
instance = kwargs["self"]
return in... |
def _decorate_with_invariants(func: CallableT, is_init: bool) -> CallableT:
"""
Decorate the function ``func`` of the class ``cls`` with invariant checks.
If the function has been already decorated with invariant checks, the function returns immediately.
:param func: function to be wrapped
:param ... |
def _already_decorated_with_invariants(func: CallableT) -> bool:
"""Check if the function has been already decorated with an invariant check by going through its decorator stack."""
already_decorated = False
for a_decorator in _walk_decorator_stack(func=func):
if getattr(a_decorator, "__is_invariant... |
def add_invariant_checks(cls: type) -> None:
"""Decorate each of the class functions with invariant checks if not already decorated."""
# Candidates for the decoration as list of (name, dir() value)
init_name_func = None # type: Optional[Tuple[str, Callable[..., None]]]
names_funcs = [] # type: List[T... |
def main() -> None:
""""Execute the main routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--module", help="name of the module to import",
choices=[
"functions_100_with_no_contract",
"functi... |
def main() -> None:
""""Execute the main routine."""
modules = [
"functions_100_with_no_contract",
"functions_100_with_1_contract",
"functions_100_with_5_contracts",
"functions_100_with_10_contracts",
"functions_100_with_1_disabled_contract",
"functions_100_with... |
def main() -> None:
""""Execute the main routine."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--outdir", help="output directory", default=os.path.dirname(__file__))
args = parser.parse_args()
outdir = pathlib.Path(args.outdir)
if not outdir.exists():
ra... |
def visit_Num(self, node: ast.Num) -> Union[int, float]:
"""Recompute the value as the number at the node."""
result = node.n
self.recomputed_values[node] = result
return result |
def visit_Str(self, node: ast.Str) -> str:
"""Recompute the value as the string at the node."""
result = node.s
self.recomputed_values[node] = result
return result |
def visit_Bytes(self, node: ast.Bytes) -> bytes:
"""Recompute the value as the bytes at the node."""
result = node.s
self.recomputed_values[node] = result
return node.s |
def visit_List(self, node: ast.List) -> List[Any]:
"""Visit the elements and assemble the results into a list."""
if isinstance(node.ctx, ast.Store):
raise NotImplementedError("Can not compute the value of a Store on a list")
result = [self.visit(node=elt) for elt in node.elts]
... |
def visit_Tuple(self, node: ast.Tuple) -> Tuple[Any, ...]:
"""Visit the elements and assemble the results into a tuple."""
if isinstance(node.ctx, ast.Store):
raise NotImplementedError("Can not compute the value of a Store on a tuple")
result = tuple(self.visit(node=elt) for elt in ... |
def visit_Set(self, node: ast.Set) -> Set[Any]:
"""Visit the elements and assemble the results into a set."""
result = set(self.visit(node=elt) for elt in node.elts)
self.recomputed_values[node] = result
return result |
def visit_Dict(self, node: ast.Dict) -> Dict[Any, Any]:
"""Visit keys and values and assemble a dictionary with the results."""
recomputed_dict = dict() # type: Dict[Any, Any]
for key, val in zip(node.keys, node.values):
recomputed_dict[self.visit(node=key)] = self.visit(node=val)
... |
def visit_NameConstant(self, node: ast.NameConstant) -> Any:
"""Forward the node value as a result."""
self.recomputed_values[node] = node.value
return node.value |
def visit_Name(self, node: ast.Name) -> Any:
"""Load the variable by looking it up in the variable look-up and in the built-ins."""
if not isinstance(node.ctx, ast.Load):
raise NotImplementedError("Can only compute a value of Load on a name {}, but got context: {}".format(
no... |
def visit_Expr(self, node: ast.Expr) -> Any:
"""Visit the node's ``value``."""
result = self.visit(node=node.value)
self.recomputed_values[node] = result
return result |
def visit_UnaryOp(self, node: ast.UnaryOp) -> Any:
"""Visit the node operand and apply the operation on the result."""
if isinstance(node.op, ast.UAdd):
result = +self.visit(node=node.operand)
elif isinstance(node.op, ast.USub):
result = -self.visit(node=node.operand)
... |
def visit_BinOp(self, node: ast.BinOp) -> Any:
"""Recursively visit the left and right operand, respectively, and apply the operation on the results."""
# pylint: disable=too-many-branches
left = self.visit(node=node.left)
right = self.visit(node=node.right)
if isinstance(node.o... |
def visit_BoolOp(self, node: ast.BoolOp) -> Any:
"""Recursively visit the operands and apply the operation on them."""
values = [self.visit(value_node) for value_node in node.values]
if isinstance(node.op, ast.And):
result = functools.reduce(lambda left, right: left and right, value... |
def visit_Compare(self, node: ast.Compare) -> Any:
"""Recursively visit the comparators and apply the operations on them."""
# pylint: disable=too-many-branches
left = self.visit(node=node.left)
comparators = [self.visit(node=comparator) for comparator in node.comparators]
resu... |
def visit_Call(self, node: ast.Call) -> Any:
"""Visit the function and the arguments and finally make the function call with them."""
func = self.visit(node=node.func)
args = [] # type: List[Any]
for arg_node in node.args:
if isinstance(arg_node, ast.Starred):
... |
def visit_IfExp(self, node: ast.IfExp) -> Any:
"""Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``."""
test = self.visit(node=node.test)
if test:
result = self.visit(node=node.body)
else:
result = self.visit(node=node.orelse)
... |
def visit_Attribute(self, node: ast.Attribute) -> Any:
"""Visit the node's ``value`` and get the attribute from the result."""
value = self.visit(node=node.value)
if not isinstance(node.ctx, ast.Load):
raise NotImplementedError(
"Can only compute a value of Load on th... |
def visit_Index(self, node: ast.Index) -> Any:
"""Visit the node's ``value``."""
result = self.visit(node=node.value)
self.recomputed_values[node] = result
return result |
def visit_Slice(self, node: ast.Slice) -> slice:
"""Visit ``lower``, ``upper`` and ``step`` and recompute the node as a ``slice``."""
lower = None # type: Optional[int]
if node.lower is not None:
lower = self.visit(node=node.lower)
upper = None # type: Optional[int]
... |
def visit_ExtSlice(self, node: ast.ExtSlice) -> Tuple[Any, ...]:
"""Visit each dimension of the advanced slicing and assemble the dimensions in a tuple."""
result = tuple(self.visit(node=dim) for dim in node.dims)
self.recomputed_values[node] = result
return result |
def visit_Subscript(self, node: ast.Subscript) -> Any:
"""Visit the ``slice`` and a ``value`` and get the element."""
value = self.visit(node=node.value)
a_slice = self.visit(node=node.slice)
result = value[a_slice]
self.recomputed_values[node] = result
return result |
def _execute_comprehension(self, node: Union[ast.ListComp, ast.SetComp, ast.GeneratorExp, ast.DictComp]) -> Any:
"""Compile the generator or comprehension from the node and execute the compiled code."""
args = [ast.arg(arg=name) for name in sorted(self._name_to_value.keys())]
func_def_node = as... |
def visit_GeneratorExp(self, node: ast.GeneratorExp) -> Any:
"""Compile the generator expression as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
# Do not set the computed value of the... |
def visit_ListComp(self, node: ast.ListComp) -> Any:
"""Compile the list comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
re... |
def visit_SetComp(self, node: ast.SetComp) -> Any:
"""Compile the set comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
retur... |
def visit_DictComp(self, node: ast.DictComp) -> Any:
"""Compile the dictionary comprehension as a function and call it."""
result = self._execute_comprehension(node=node)
for generator in node.generators:
self.visit(generator.iter)
self.recomputed_values[node] = result
... |
def visit_Return(self, node: ast.Return) -> Any: # pylint: disable=no-self-use
"""Raise an exception that this node is unexpected."""
raise AssertionError("Unexpected return node during the re-computation: {}".format(ast.dump(node))) |
def generic_visit(self, node: ast.AST) -> None:
"""Raise an exception that this node has not been handled."""
raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node)) |
def tokenize_words(string):
"""
Tokenize input text to words.
:param string: Text to tokenize
:type string: str or unicode
:return: words
:rtype: list of strings
"""
string = six.text_type(string)
return re.findall(WORD_TOKENIZATION_RULES, string) |
def tokenize_sents(string):
"""
Tokenize input text to sentences.
:param string: Text to tokenize
:type string: str or unicode
:return: sentences
:rtype: list of strings
"""
string = six.text_type(string)
spans = []
for match in re.finditer('[^\s]+', string):
spans.appe... |
def tokenize_text(string):
"""
Tokenize input text to paragraphs, sentences and words.
Tokenization to paragraphs is done using simple Newline algorithm
For sentences and words tokenizers above are used
:param string: Text to tokenize
:type string: str or unicode
:return: text, tokenized i... |
def collate(self, graph, collation):
'''
:type graph: VariantGraph
:type collation: Collation
'''
# Build the variant graph for the first witness
# this is easy: generate a vertex for every token
first_witness = collation.witnesses[0]
tokens = first_witnes... |
def align(self):
'''
Every step we have 3 choices:
1) Move pointer witness a --> omission
2) Move pointer witness b --> addition
3) Move pointer of both witness a/b --> match
Note: a replacement is omission followed by an addition or the other way around
Choice ... |
def connect(self, source, target, witnesses):
"""
:type source: integer
:type target: integer
"""
# print("Adding Edge: "+source+":"+target)
if self.graph.has_edge(source, target):
self.graph[source][target]["label"] += ", " + str(witnesses)
else:
... |
def connect_near(self, source, target, weight):
# Near edges are added to self.near_graph, not self.graph, to avoid cycles
"""
:type source: integer
:type target: integer
"""
self.near_graph.add_edge(source, target, weight = weight, type='near') |
def merge(self, graph, witness_sigil, witness_tokens, alignments={}):
"""
:type graph: VariantGraph
"""
# NOTE: token_to_vertex only contains newly generated vertices
token_to_vertex = {}
last = graph.start
for token in witness_tokens:
vertex = alignme... |
def collate(self, graph):
"""
:type graph: VariantGraph
"""
# prepare the token index
self.token_index.prepare()
self.vertex_array = [None] * len(self.token_index.token_array)
# Build the variant graph for the first witness
# this is easy: generate a vert... |
def _parse(self, pattern):
"""Parse string of comma-separated x-y/step -like ranges"""
# Comma separated ranges
if pattern.find(',') < 0:
subranges = [pattern]
else:
subranges = pattern.split(',')
for subrange in subranges:
if subrange.find('/... |
def fromlist(cls, rnglist, autostep=None):
"""Class method that returns a new RangeSet with ranges from provided
list."""
inst = RangeSet(autostep=autostep)
inst.updaten(rnglist)
return inst |
def fromone(cls, index, pad=0, autostep=None):
"""Class method that returns a new RangeSet of one single item or
a single range (from integer or slice object)."""
inst = RangeSet(autostep=autostep)
# support slice object with duck-typing
try:
inst.add(index, pad)
... |
def set_autostep(self, val):
"""Set autostep value (property)"""
if val is None:
# disabled by default for pdsh compat (+inf is 1E400, but a bug in
# python 2.4 makes it impossible to be pickled, so we use less)
# NOTE: Later, we could consider sys.maxint here
... |
def striter(self):
"""Iterate over each (optionally padded) string element in RangeSet."""
pad = self.padding or 0
for i in self._sorted():
yield "%0*d" % (pad, i) |
def contiguous(self):
"""Object-based iterator over contiguous range sets."""
pad = self.padding or 0
for sli in self._contiguous_slices():
yield RangeSet.fromone(slice(sli.start, sli.stop, sli.step), pad) |
def _strslices(self):
"""Stringify slices list (x-y/step format)"""
pad = self.padding or 0
for sli in self.slices():
if sli.start + 1 == sli.stop:
yield "%0*d" % (pad, sli.start)
else:
assert sli.step >= 0, "Internal error: sli.step < 0"
... |
def _contiguous_slices(self):
"""Internal iterator over contiguous slices in RangeSet."""
k = j = None
for i in self._sorted():
if k is None:
k = j = i
if i - j > 1:
yield slice(k, j + 1, 1)
k = i
j = i
i... |
def _folded_slices(self):
"""Internal generator that is able to retrieve ranges organized by step.
Complexity: O(n) with n = number of ranges in tree."""
if len(self) == 0:
return
prng = None # pending range
istart = None # processing starting indice
... |
def split(self, nbr):
"""
Split the rangeset into nbr sub-rangesets (at most). Each
sub-rangeset will have the same number of elements more or
less 1. Current rangeset remains unmodified. Returns an
iterator.
>>> RangeSet("1-5").split(3)
RangeSet("1-2")
... |
def add_range(self, start, stop, step=1, pad=0):
"""
Add a range (start, stop, step and padding length) to RangeSet.
Like the Python built-in function range(), the last element is
the largest start + i * step less than stop.
"""
assert start < stop, "please provide ordere... |
def copy(self):
"""Return a shallow copy of a RangeSet."""
cpy = self.__class__()
cpy._autostep = self._autostep
cpy.padding = self.padding
cpy.update(self)
return cpy |
def _wrap_set_op(self, fun, arg):
"""Wrap built-in set operations for RangeSet to workaround built-in set
base class issues (RangeSet.__new/init__ not called)"""
result = fun(self, arg)
result._autostep = self._autostep
result.padding = self.padding
return result |
def intersection(self, other):
"""Return the intersection of two RangeSets as a new RangeSet.
(I.e. all elements that are in both sets.)
"""
#NOTE: This is a work around
# Python 3 return as the result of set.intersection a new set instance.
# Python 2 however returns a... |
def difference(self, other):
"""Return the difference of two RangeSets as a new RangeSet.
(I.e. all elements that are in this set and not in the other.)
"""
#NOTE: This is a work around
# Python 3 return as the result of set.intersection a new set instance.
# Python 2 ho... |
def issubset(self, other):
"""Report whether another set contains this RangeSet."""
self._binary_sanity_check(other)
return set.issubset(self, other) |
def issuperset(self, other):
"""Report whether this RangeSet contains another set."""
self._binary_sanity_check(other)
return set.issuperset(self, other) |
def difference_update(self, other, strict=False):
"""Remove all elements of another set from this RangeSet.
If strict is True, raise KeyError if an element cannot be removed.
(strict is a RangeSet addition)"""
if strict and other not in self:
raise KeyError(other.dif... |
def update(self, iterable):
"""Add all integers from an iterable (such as a list)."""
if isinstance(iterable, RangeSet):
# keep padding unless is has not been defined yet
if self.padding is None and iterable.padding is not None:
self.padding = iterable.padding
... |
def updaten(self, rangesets):
"""
Update a rangeset with the union of itself and several others.
"""
for rng in rangesets:
if isinstance(rng, set):
self.update(rng)
else:
self.update(RangeSet(rng)) |
def add(self, element, pad=0):
"""Add an element to a RangeSet.
This has no effect if the element is already present.
"""
set.add(self, int(element))
if pad > 0 and self.padding is None:
self.padding = pad |
def discard(self, element):
"""Remove element from the RangeSet if it is a member.
If the element is not a member, do nothing.
"""
try:
i = int(element)
set.discard(self, i)
except ValueError:
pass |
def _open(filename, mode="r"):
"""
Universal open file facility.
With normal files, this function behaves as the open builtin.
With gzip-ed files, it decompress or compress according to the specified mode.
In addition, when filename is '-', it opens the standard input or output according to
the ... |
def _radixPass(a, b, r, n, K):
"""
Stable sort of the sequence a according to the keys given in r.
>>> a=range(5)
>>> b=[0]*5
>>> r=[2,1,3,0,4]
>>> _radixPass(a, b, r, 5, 5)
>>> b
[3, 1, 0, 2, 4]
When n is less than the length of a, the end of b must be left unaltered.
>>> b=[... |
def _nbOperations(n):
"""
Exact number of atomic operations in _radixPass.
"""
if n < 2:
return 0
else:
n0 = (n + 2) // 3
n02 = n0 + n // 3
return 3 * (n02) + n0 + _nbOperations(n02) |
def _suffixArrayWithTrace(s, SA, n, K, operations, totalOperations):
"""
This function is a rewrite in Python of the C implementation proposed in Kärkkäinen and Sanders paper.
Find the suffix array SA of s[0..n-1] in {1..K}^n
Require s[n]=s[n+1]=s[n+2]=0, n>=2
"""
if _trace:
_traceSuffi... |
def _longestCommonPrefix(seq1, seq2, start1=0, start2=0):
"""
Returns the length of the longest common prefix of seq1
starting at offset start1 and seq2 starting at offset start2.
>>> _longestCommonPrefix("abcdef", "abcghj")
3
>>> _longestCommonPrefix("abcghj", "abcdef")
3
>>> _longes... |
def LCP(SA):
"""
Compute the longest common prefix for every adjacent suffixes.
The result is a list of same size as SA.
Given two suffixes at positions i and i+1,
their LCP is stored at position i+1.
A zero is stored at position 0 of the output.
>>> SA=SuffixArray("abba", unit=UNIT_BYTE)
... |
def parseArgv():
"""
Command line option parser.
"""
parser = OptionParser()
parser.usage = r""" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>]
Create the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use.
UNIT may be set to 'byte', 'charac... |
def main():
"""
Entry point for the standalone script.
"""
(options, strings) = parseArgv()
global _suffixArray, _trace
#############
# Verbosity #
#############
_trace = options.verbose
###################
# Processing unit #
###################
if options.unit ==... |
def addFeatureSA(self, callback, default=None, name=None):
"""
Add a feature to the suffix array.
The callback must return a sequence such that
the feature at position i is attached to the suffix referenced by
self.SA[i].
It is called with one argument: the instance of S... |
def addFeature(self, callback, default=None, name=None, arguments=None):
"""
Add a feature to the suffix array.
The callback must return the feature corresponding to the suffix at
position self.SA[i].
The callback must be callable (a function or lambda).
The argument nam... |
def tokenize(self, string):
"""
Tokenizer utility.
When processing byte, outputs the string unaltered.
The character unit type is used for unicode data, the string is
decoded according to the encoding provided.
In the case of word unit, EOL characters are detached from th... |
def reprString(self, string, length):
"""
Output a string of length tokens in the original form.
If string is an integer, it is considered as an offset in the text.
Otherwise string is considered as a sequence of ids (see voc and
tokId).
>>> SA=SuffixArray('mississippi',... |
def toFile(self, filename):
"""
Save the suffix array instance including all features attached in
filename. Accept any filename following the _open conventions,
for example if it ends with .gz the file created will be a compressed
GZip file.
"""
start = _time()
... |
def fromFile(cls, filename):
"""
Load a suffix array instance from filename, a file created by
toFile.
Accept any filename following the _open conventions.
"""
self = cls.__new__(cls) # new instance which does not call __init__
start = _time()
savedData... |
def _findOne(self, subString):
"""
>>> SA=SuffixArray("mississippi", unit=UNIT_BYTE)
>>> SA._findOne("ippi")
1
>>> SA._findOne("missi")
4
"""
SA = self.SA
LCPs = self._LCP_values
string = self.string
try:
subString = _... |
def find(self, subString, features=[]):
"""
Dichotomy search of subString in the suffix array.
As soon as a suffix which starts with subString is found,
it uses the LCPs in order to find the other matching suffixes.
The outputs consists in a list of tuple (pos, feature0, feature... |
def escape(s, quote=False):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences. If
the optional flag `quote` is `True`, the quotation mark character (") is
also translated.
There is a special handling for `None` which escapes to an empty string.
:param s: the string to escape.
... |
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(arg... |
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': u'unbekannter Autor'},)
... |
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % e... |
def override_djconfig(**new_cache_values):
"""
Temporarily override config values.
This is similar to :py:func:`django.test.override_settings`,\
use it in testing.
:param new_cache_values: Keyword arguments,\
the key should match one in the config,\
a new one is created otherwise,\
the... |
def serialize(value, field):
"""
Form values serialization
:param object value: A value to be serialized\
for saving it into the database and later\
loading it into the form as initial value
"""
assert isinstance(field, forms.Field)
if isinstance(field, forms.ModelMultipleChoiceField):
... |
def get_version(package):
"""Get version without importing the lib"""
with io.open(os.path.join(BASE_DIR, package, '__init__.py'), encoding='utf-8') as fh:
return [
l.split('=', 1)[1].strip().strip("'").strip('"')
for l in fh.readlines()
if '__version__' in l][0] |
def _check_backend():
"""
Check :py:class:`djconfig.middleware.DjConfigMiddleware`\
is registered into ``settings.MIDDLEWARE_CLASSES``
"""
# Django 1.10 does not allow
# both settings to be set
middleware = set(
getattr(settings, 'MIDDLEWARE', None) or
getattr(settings, 'MIDD... |
def _register(self, form_class, check_middleware=True):
"""
Register a config form into the registry
:param object form_class: The form class to register.\
Must be an instance of :py:class:`djconfig.forms.ConfigForm`
:param bool check_middleware: Check\
:py:class:`djconf... |
def _reload(self):
"""
Gets every registered form's field value.\
If a field name is found in the db, it will load it from there.\
Otherwise, the initial value from the field form is used
"""
ConfigModel = apps.get_model('djconfig.Config')
cache = {}
data ... |
def _reload_maybe(self):
"""
Reload the config if the config\
model has been updated. This is called\
once on every request by the middleware.\
Should not be called directly.
"""
ConfigModel = apps.get_model('djconfig.Config')
data = dict(
Con... |
def save(self):
"""
Save the config with the cleaned data,\
update the last modified date so\
the config is reloaded on other process/nodes.\
Reload the config so it can be called right away.
"""
assert self.__class__ in conf.config._registry,\
'%(clas... |
def register(conf, conf_admin, **options):
"""
Register a new admin section.
:param conf: A subclass of ``djconfig.admin.Config``
:param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin``
:param options: Extra options passed to ``django.contrib.admin.site.register``
"""
assert issubcl... |
def Mixed(*types):
"""Mixed type, used to indicate a field in a schema can be
one of many types. Use as a last resort only.
The Mixed type can be used directly as a class to indicate
any type is permitted for a given field:
`"my_field": {"type": Mixed}`
It can also be instantiated with list of s... |
def one_of(*args):
"""
Validates that a field value matches one of the values
given to this validator.
"""
if len(args) == 1 and isinstance(args[0], list):
items = args[0]
else:
items = list(args)
def validate(value):
if not value in items:
return e("{} i... |
def gte(min_value):
"""
Validates that a field value is greater than or equal to the
value given to this validator.
"""
def validate(value):
if value < min_value:
return e("{} is not greater than or equal to {}", value, min_value)
return validate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.