Search is not available for this dataset
text stringlengths 75 104k |
|---|
def _handle_break(self, node, scope, ctxt, stream):
"""Handle break node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling break")
raise errors.InterpBreak() |
def _handle_continue(self, node, scope, ctxt, stream):
"""Handle continue node
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling continue")
raise errors.InterpContinue() |
def _handle_decl_list(self, node, scope, ctxt, stream):
"""Handle For nodes
:node: TODO
:scope: TODO
:ctxt: TODO
:stream: TODO
:returns: TODO
"""
self._dlog("handling decl list")
# just handle each declaration
for decl in node.decls:
... |
def _create_scope(self):
"""TODO: Docstring for _create_scope.
:returns: TODO
"""
res = Scope(self._log)
for func_name,native_func in six.iteritems(self._natives):
res.add_local(func_name, native_func)
return res |
def _get_value(self, node, scope, ctxt, stream):
"""Return the value of the node. It is expected to be
either an AST.ID instance or a constant
:node: TODO
:returns: TODO
"""
res = self._handle_node(node, scope, ctxt, stream)
if isinstance(res, fields.Field):
... |
def _resolve_to_field_class(self, names, scope):
"""Resolve the names to a class in fields.py, resolving past
typedefs, etc
:names: TODO
:scope: TODO
:ctxt: TODO
:returns: TODO
"""
switch = {
"char" : "Char",
"int" : "Int",... |
def bits_to_bytes(bits):
"""Convert the bit list into bytes. (Assumes bits is a list
whose length is a multiple of 8)
"""
if len(bits) % 8 != 0:
raise Exception("num bits must be multiple of 8")
res = ""
for x in six.moves.range(0, len(bits), 8):
byte_bits = bits[x:x+8]
... |
def bytes_to_bits(bytes_):
"""Convert bytes to a list of bits
"""
res = []
for x in bytes_:
if not isinstance(x, int):
x = ord(x)
res += byte_to_bits(x)
return res |
def is_eof(self):
"""Return if the stream has reached EOF or not
without discarding any unflushed bits
:returns: True/False
"""
pos = self._stream.tell()
byte = self._stream.read(1)
self._stream.seek(pos, 0)
return utils.binary(byte) == utils.binary("") |
def close(self):
"""Close the stream
"""
self.closed = True
self._flush_bits_to_stream()
self._stream.close() |
def read(self, num):
"""Read ``num`` number of bytes from the stream. Note that this will
automatically resets/ends the current bit-reading if it does not
end on an even byte AND ``self.padded`` is True. If ``self.padded`` is
True, then the entire stream is treated as a bitstream.
... |
def read_bits(self, num):
"""Read ``num`` number of bits from the stream
:num: number of bits to read
:returns: a list of ``num`` bits, or an empty list if EOF has been reached
"""
if num > len(self._bits):
needed = num - len(self._bits)
num_bytes = int(m... |
def write(self, data):
"""Write data to the stream
:data: the data to write to the stream
:returns: None
"""
if self.padded:
# flush out any remaining bits first
if len(self._bits) > 0:
self._flush_bits_to_stream()
self._stream... |
def write_bits(self, bits):
"""Write the bits to the stream.
Add the bits to the existing unflushed bits and write
complete bytes to the stream.
"""
for bit in bits:
self._bits.append(bit)
while len(self._bits) >= 8:
byte_bits = [self._bits.pople... |
def tell(self):
"""Return the current position in the stream (ignoring bit
position)
:returns: int for the position in the stream
"""
res = self._stream.tell()
if len(self._bits) > 0:
res -= 1
return res |
def seek(self, pos, seek_type=0):
"""Seek to the specified position in the stream with seek_type.
Unflushed bits will be discarded in the case of a seek.
The stream will also keep track of which bytes have and have
not been consumed so that the dom will capture all of the
bytes ... |
def size(self):
"""Return the size of the stream, or -1 if it cannot
be determined.
"""
pos = self._stream.tell()
# seek to the end of the stream
self._stream.seek(0,2)
size = self._stream.tell()
self._stream.seek(pos, 0)
return size |
def unconsumed_ranges(self):
"""Return an IntervalTree of unconsumed ranges, of the format
(start, end] with the end value not being included
"""
res = IntervalTree()
prev = None
# normal iteration is not in a predictable order
ranges = sorted([x for x in self.r... |
def _update_consumed_ranges(self, start_pos, end_pos):
"""Update the ``self.consumed_ranges`` array with which
byte ranges have been consecutively consumed.
"""
self.range_set.add(Interval(start_pos, end_pos+1))
self.range_set.merge_overlaps() |
def _flush_bits_to_stream(self):
"""Flush the bits to the stream. This is used when
a few bits have been read and ``self._bits`` contains unconsumed/
flushed bits when data is to be written to the stream
"""
if len(self._bits) == 0:
return 0
bits = list(self.... |
def _validate_markdown(self, expfile):
'''ensure that fields are present in markdown file'''
try:
import yaml
except:
bot.error('Python yaml is required for testing yml/markdown files.')
sys.exit(1)
self.metadata = {}
uid = os.path.basename(e... |
def perform_checks(template,
do_redirect=False,
context=None,
next=None,
quiet=False):
'''return all checks for required variables before returning to
desired view
Parameters
==========
template: the html temp... |
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument")
pos = PYVAL(params[0])
curr_pos = stream.tell()
... |
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument")
skip_amt = PYVAL(params[0])
pos = skip_amt + stream.... |
def packer_gzip(params, ctxt, scope, stream, coord):
"""``PackerGZip`` - implements both unpacking and packing. Can be used
as the ``packer`` for a field. When packing, concats the build output
of all params and gzip-compresses the result. When unpacking, concats
the build output of all params and gzip-... |
def pack_gzip(params, ctxt, scope, stream, coord):
"""``PackGZip`` - Concats the build output of all params and gzips the
resulting data, returning a char array.
Example: ::
char data[0x100]<pack=PackGZip, ...>;
"""
if len(params) == 0:
raise errors.InvalidArguments(coord, "{} args... |
def watch_length(params, ctxt, scope, stream, coord):
"""WatchLength - Watch the total length of each of the params.
Example:
The code below uses the ``WatchLength`` update function to update
the ``length`` field to the length of the ``data`` field ::
int length<watch=data, upd... |
def watch_crc(params, ctxt, scope, stream, coord):
"""WatchCrc32 - Watch the total crc32 of the params.
Example:
The code below uses the ``WatchCrc32`` update function to update
the ``crc`` field to the crc of the ``length`` and ``data`` fields ::
char length;
char ... |
def _validate_folder(self, folder=None):
''' validate folder takes a cloned github repo, ensures
the existence of the config.json, and validates it.
'''
from expfactory.experiment import load_experiment
if folder is None:
folder=os.path.abspath(os.getcwd())
... |
def validate(self, folder, cleanup=False, validate_folder=True):
''' validate is the entrypoint to all validation, for
a folder, config, or url. If a URL is found, it is
cloned and cleaned up.
:param validate_folder: ensures the folder name (github repo)
... |
def _validate_config(self, folder, validate_folder=True):
''' validate config is the primary validation function that checks
for presence and format of required fields.
Parameters
==========
:folder: full path to folder with config.json
:name: if provided, the folder... |
def get_validation_fields(self):
'''get_validation_fields returns a list of tuples (each a field)
we only require the exp_id to coincide with the folder name, for the sake
of reproducibility (given that all are served from sample image or Github
organization). All other fields a... |
def get_runtime_vars(varset, experiment, token):
'''get_runtime_vars will return the urlparsed string of one or more runtime
variables. If None are present, None is returned.
Parameters
==========
varset: the variable set, a dictionary lookup with exp_id, token, vars
experiment... |
def generate_runtime_vars(variable_file=None, sep=','):
'''generate a lookup data structure from a
delimited file. We typically obtain the file name and delimiter from
the environment by way of EXPFACTORY_RUNTIME_VARS, and
EXPFACTORY_RUNTIME_DELIM, respectively, but the user can also parse
... |
def _read_runtime_vars(variable_file, sep=','):
'''read the entire runtime variable file, and return a list of lists,
each corresponding to a row. We also check the header, and exit
if anything is missing or malformed.
Parameters
==========
variable_file: full path to the tabula... |
def _validate_row(row, sep=',', required_length=None):
'''validate_row will ensure that a row has the proper length, and is
not empty and cleaned of extra spaces.
Parameters
==========
row: a single row, not yet parsed.
Returns a valid row, or None if not valid
'''
if ... |
def _parse_row(row, sep=','):
'''parse row is a helper function to simply clean up a string, and parse
into a row based on a delimiter. If a required length is provided,
we check for this too.
'''
parsed = row.split(sep)
parsed = [x for x in parsed if x.strip()]
return parsed |
def validate_header(header, required_fields=None):
'''validate_header ensures that the first row contains the exp_id,
var_name, var_value, and token. Capitalization isn't important, but
ordering is. This criteria is very strict, but it's reasonable
to require.
Parameters
=======... |
def superuser_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
@wraps(view_func)
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_superuser:
... |
def from_lines(cls, pattern_factory, lines):
"""
Compiles the pattern lines.
*pattern_factory* can be either the name of a registered pattern
factory (:class:`str`), or a :class:`~collections.abc.Callable` used
to compile patterns. It must accept an uncompiled pattern (:class:`str`)
and return the compiled... |
def match_file(self, file, separators=None):
"""
Matches the file to this path-spec.
*file* (:class:`str`) is the file path to be matched against
:attr:`self.patterns <PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`)
optionally contains the path separators to normal... |
def match_files(self, files, separators=None):
"""
Matches the files to this path-spec.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be matched against :attr:`self.patterns
<PathSpec.patterns>`.
*separators* (:class:`~collections.abc.Collection` of :class:`str`;
... |
def match_tree(self, root, on_error=None, follow_links=None):
"""
Walks the specified root path for all files and matches them to this
path-spec.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler... |
def pattern_to_regex(cls, pattern):
"""
Convert the pattern into a regular expression.
*pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
convert into a regular expression.
Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
or :data:`None`), and whether matched file... |
def _translate_segment_glob(pattern):
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
""... |
def pattern_to_regex(cls, *args, **kw):
"""
Warn about deprecation.
"""
cls._deprecated()
return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw) |
def iter_tree(root, on_error=None, follow_links=None):
"""
Walks the specified directory for all files.
*root* (:class:`str`) is the root directory to search for files.
*on_error* (:class:`~collections.abc.Callable` or :data:`None`)
optionally is the error handler for file-system exceptions. It will be
called w... |
def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links):
"""
Scan the directory for all descendant files.
*root_full* (:class:`str`) the absolute path to the root directory.
*dir_rel* (:class:`str`) the path to the directory to scan relative to
*root_full*.
*memo* (:class:`dict`) keeps track of a... |
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; ... |
def match_files(patterns, files):
"""
Matches the files to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the normalized file paths to be matched against *patt... |
def normalize_file(file, separators=None):
"""
Normalizes the file path to use the POSIX path separator (i.e., ``'/'``).
*file* (:class:`str`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does no... |
def normalize_files(files, separators=None):
"""
Normalizes the file paths to use the POSIX path separator.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
the file paths to be normalized.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally conta... |
def register_pattern(name, pattern_factory, override=None):
"""
Registers the specified pattern factory.
*name* (:class:`str`) is the name to register the pattern factory
under.
*pattern_factory* (:class:`~collections.abc.Callable`) is used to
compile patterns. It must accept an uncompiled pattern (:class:`str`... |
def message(self):
"""
*message* (:class:`str`) is the error message.
"""
return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
real=self.real_path,
first=self.first_path,
second=self.second_path,
) |
def match(self, files):
"""
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
each file relative to the root directory (e.g., ``"relative/path/to/file"``).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:... |
def match(self, files):
"""
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:clas... |
def user_default_serializer(self, obj):
"""Convert a User to a cached instance representation."""
if not obj:
return None
self.user_default_add_related_pks(obj)
return dict((
('id', obj.id),
('username', obj.username),
self.field_to_json('D... |
def user_default_loader(self, pk):
"""Load a User from the database."""
try:
obj = User.objects.get(pk=pk)
except User.DoesNotExist:
return None
else:
self.user_default_add_related_pks(obj)
return obj |
def user_default_add_related_pks(self, obj):
"""Add related primary keys to a User instance."""
if not hasattr(obj, '_votes_pks'):
obj._votes_pks = list(obj.votes.values_list('pk', flat=True)) |
def group_default_invalidator(self, obj):
"""Invalidated cached items when the Group changes."""
user_pks = User.objects.values_list('pk', flat=True)
return [('User', pk, False) for pk in user_pks] |
def question_default_serializer(self, obj):
"""Convert a Question to a cached instance representation."""
if not obj:
return None
self.question_default_add_related_pks(obj)
return dict((
('id', obj.id),
('question_text', obj.question_text),
... |
def question_default_loader(self, pk):
"""Load a Question from the database."""
try:
obj = Question.objects.get(pk=pk)
except Question.DoesNotExist:
return None
else:
self.question_default_add_related_pks(obj)
return obj |
def question_default_add_related_pks(self, obj):
"""Add related primary keys to a Question instance."""
if not hasattr(obj, '_choice_pks'):
obj._choice_pks = list(obj.choices.values_list('pk', flat=True)) |
def choice_default_serializer(self, obj):
"""Convert a Choice to a cached instance representation."""
if not obj:
return None
self.choice_default_add_related_pks(obj)
return dict((
('id', obj.id),
('choice_text', obj.choice_text),
self.fiel... |
def choice_default_loader(self, pk):
"""Load a Choice from the database."""
try:
obj = Choice.objects.get(pk=pk)
except Choice.DoesNotExist:
return None
else:
self.choice_default_add_related_pks(obj)
return obj |
def choice_default_add_related_pks(self, obj):
"""Add related primary keys to a Choice instance."""
if not hasattr(obj, '_voter_pks'):
obj._voter_pks = obj.voters.values_list('pk', flat=True) |
def choice_default_invalidator(self, obj):
"""Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)]
for pk in obj.voters.values_list('pk', flat=True):
invalid.append(('User', pk, False))
return invalid |
def cache(self):
"""Get the Django cache interface.
This allows disabling the cache with
settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that
Django Debug Toolbar will record cache requests.
"""
if not self._cache:
use_cache = getattr(setting... |
def delete_all_versions(self, model_name, obj_pk):
"""Delete all versions of a cached instance."""
if self.cache:
for version in self.versions:
key = self.key_for(version, model_name, obj_pk)
self.cache.delete(key) |
def model_function(self, model_name, version, func_name):
"""Return the model-specific caching function."""
assert func_name in ('serializer', 'loader', 'invalidator')
name = "%s_%s_%s" % (model_name.lower(), version, func_name)
return getattr(self, name) |
def field_function(self, type_code, func_name):
"""Return the field function."""
assert func_name in ('to_json', 'from_json')
name = "field_%s_%s" % (type_code.lower(), func_name)
return getattr(self, name) |
def field_to_json(self, type_code, key, *args, **kwargs):
"""Convert a field to a JSON-serializable representation."""
assert ':' not in key
to_json = self.field_function(type_code, 'to_json')
key_and_type = "%s:%s" % (key, type_code)
json_value = to_json(*args, **kwargs)
... |
def field_from_json(self, key_and_type, json_value):
"""Convert a JSON-serializable representation back to a field."""
assert ':' in key_and_type
key, type_code = key_and_type.split(':', 1)
from_json = self.field_function(type_code, 'from_json')
value = from_json(json_value)
... |
def get_instances(self, object_specs, version=None):
"""Get the cached native representation for one or more objects.
Keyword arguments:
object_specs - A sequence of triples (model name, pk, obj):
- model name - the name of the model
- pk - the primary key of the instance
... |
def update_instance(
self, model_name, pk, instance=None, version=None,
update_only=False):
"""Create or update a cached instance.
Keyword arguments are:
model_name - The name of the model
pk - The primary key of the instance
instance - The Django model i... |
def field_date_to_json(self, day):
"""Convert a date to a date triple."""
if isinstance(day, six.string_types):
day = parse_date(day)
return [day.year, day.month, day.day] if day else None |
def field_datetime_from_json(self, json_val):
"""Convert a UTC timestamp to a UTC datetime."""
if type(json_val) == int:
seconds = int(json_val)
dt = datetime.fromtimestamp(seconds, utc)
elif json_val is None:
dt = None
else:
seconds, micro... |
def field_datetime_to_json(self, dt):
"""Convert a datetime to a UTC timestamp w/ microsecond resolution.
datetimes w/o timezone will be assumed to be in UTC
"""
if isinstance(dt, six.string_types):
dt = parse_datetime(dt)
if not dt:
return None
t... |
def field_timedelta_from_json(self, json_val):
"""Convert json_val to a timedelta object.
json_val contains total number of seconds in the timedelta.
If json_val is a string it will be converted to a float.
"""
if isinstance(json_val, str):
return timedelta(seconds=f... |
def field_timedelta_to_json(self, td):
"""Convert timedelta to value containing total number of seconds.
If there are fractions of a second the return value will be a
string, otherwise it will be an int.
"""
if isinstance(td, six.string_types):
td = parse_duration(td... |
def field_pklist_from_json(self, data):
"""Load a PkOnlyQueryset from a JSON dict.
This uses the same format as cached_queryset_from_json
"""
model = get_model(data['app'], data['model'])
return PkOnlyQueryset(self, model, data['pks']) |
def field_pklist_to_json(self, model, pks):
"""Convert a list of primary keys to a JSON dict.
This uses the same format as cached_queryset_to_json
"""
app_label = model._meta.app_label
model_name = model._meta.model_name
return {
'app': app_label,
... |
def field_pk_from_json(self, data):
"""Load a PkOnlyModel from a JSON dict."""
model = get_model(data['app'], data['model'])
return PkOnlyModel(self, model, data['pk']) |
def field_pk_to_json(self, model, pk):
"""Convert a primary key to a JSON dict."""
app_label = model._meta.app_label
model_name = model._meta.model_name
return {
'app': app_label,
'model': model_name,
'pk': pk,
} |
def choice_voters_changed_update_cache(
sender, instance, action, reverse, model, pk_set, **kwargs):
"""Update cache when choice.voters changes."""
if action not in ('post_add', 'post_remove', 'post_clear'):
# post_clear is not handled, because clear is called in
# django.db.models.field... |
def post_delete_update_cache(sender, instance, **kwargs):
"""Update the cache when an instance is deleted."""
name = sender.__name__
if name in cached_model_names:
from .tasks import update_cache_for_instance
update_cache_for_instance(name, instance.pk, instance) |
def post_save_update_cache(sender, instance, created, raw, **kwargs):
"""Update the cache when an instance is created or modified."""
if raw:
return
name = sender.__name__
if name in cached_model_names:
delay_cache = getattr(instance, '_delay_cache', False)
if not delay_cache:
... |
def get_queryset(self):
"""Get the queryset for the action.
If action is read action, return a CachedQueryset
Otherwise, return a Django queryset
"""
queryset = super(CachedViewMixin, self).get_queryset()
if self.action in ('list', 'retrieve'):
return CachedQ... |
def get_object(self, queryset=None):
"""
Return the object the view is displaying.
Same as rest_framework.generics.GenericAPIView, but:
- Failed assertions instead of deprecations
"""
# Determine the base queryset to use.
assert queryset is None, "Passing a query... |
def get_object_or_404(self, queryset, *filter_args, **filter_kwargs):
"""Return an object or raise a 404.
Same as Django's standard shortcut, but make sure to raise 404
if the filter_kwargs don't match the required types.
"""
if isinstance(queryset, CachedQueryset):
... |
def r(self, **kwargs):
"""
Resolve the object.
This returns default (if present) or fails on an Empty.
"""
# by using kwargs we ensure that usage of positional arguments, as if
# this object were another kind of function, will fail-fast and raise
# a TypeError
... |
def r(self, **kwargs):
"""
Resolve the object.
This will always succeed, since, if a lookup fails, an Empty
instance will be returned farther upstream.
"""
# by using kwargs we ensure that usage of positional arguments, as if
# this object were another kind of fu... |
def update_cache_for_instance(
model_name, instance_pk, instance=None, version=None):
"""Update the cache for an instance, with cascading updates."""
cache = SampleCache()
invalid = cache.update_instance(model_name, instance_pk, instance, version)
for invalid_name, invalid_pk, invalid_version in... |
def values_list(self, *args, **kwargs):
"""Return the primary keys as a list.
The only valid call is values_list('pk', flat=True)
"""
flat = kwargs.pop('flat', False)
assert flat is True
assert len(args) == 1
assert args[0] == self.model._meta.pk.name
ret... |
def pks(self):
"""Lazy-load the primary keys."""
if self._primary_keys is None:
self._primary_keys = list(
self.queryset.values_list('pk', flat=True))
return self._primary_keys |
def count(self):
"""Return a count of instances."""
if self._primary_keys is None:
return self.queryset.count()
else:
return len(self.pks) |
def filter(self, **kwargs):
"""Filter the base queryset."""
assert not self._primary_keys
self.queryset = self.queryset.filter(**kwargs)
return self |
def get(self, *args, **kwargs):
"""Return the single item from the filtered queryset."""
assert not args
assert list(kwargs.keys()) == ['pk']
pk = kwargs['pk']
model_name = self.model.__name__
object_spec = (model_name, pk, None)
instances = self.cache.get_instanc... |
def collect(cls):
""" Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS
and concat their values.
"""
constants = {}
for method_path in WebpackConstants.get_constant_processors():
method = import_string(method_path)
if not callabl... |
def phonenumber_validation(data):
""" Validates phonenumber
Similar to phonenumber_field.validators.validate_international_phonenumber() but uses a different message if the
country prefix is absent.
"""
from phonenumber_field.phonenumber import to_python
phone_number = to_python(data)
if no... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.