repository_name stringlengths 7 55 | func_path_in_repository stringlengths 4 223 | func_name stringlengths 1 134 | whole_func_string stringlengths 75 104k | language stringclasses 1
value | func_code_string stringlengths 75 104k | func_code_tokens listlengths 19 28.4k | func_documentation_string stringlengths 1 46.9k | func_documentation_tokens listlengths 1 1.97k | split_name stringclasses 1
value | func_code_url stringlengths 87 315 |
|---|---|---|---|---|---|---|---|---|---|---|
load-tools/netort | netort/process.py | execute | def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
"""Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
"""
# FIXME: move to module level
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr | python | def execute(cmd, shell=False, poll_period=1.0, catch_out=False):
"""Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true)
"""
# FIXME: move to module level
log = logging.getLogger(__name__)
log.debug("Starting: %s", cmd)
stdout = ""
stderr = ""
if not shell and isinstance(cmd, string_types):
cmd = shlex.split(cmd)
if catch_out:
process = subprocess.Popen(
cmd,
shell=shell,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
else:
process = subprocess.Popen(cmd, shell=shell, close_fds=True)
stdout, stderr = process.communicate()
if stderr:
log.error("There were errors:\n%s", stderr)
if stdout:
log.debug("Process output:\n%s", stdout)
returncode = process.returncode
log.debug("Process exit code: %s", returncode)
return returncode, stdout, stderr | [
"def",
"execute",
"(",
"cmd",
",",
"shell",
"=",
"False",
",",
"poll_period",
"=",
"1.0",
",",
"catch_out",
"=",
"False",
")",
":",
"# FIXME: move to module level",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"log",
".",
"debug",
"(",
... | Execute UNIX command and wait for its completion
Args:
cmd (str or list): command to execute
shell (bool): invoke inside shell environment
catch_out (bool): collect process' output
Returns:
returncode (int): process return code
stdout (str): collected process stdout (only if catch_out set to true)
stderr (str): collected process stderr (only if catch_out set to true) | [
"Execute",
"UNIX",
"command",
"and",
"wait",
"for",
"its",
"completion"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/process.py#L8-L49 |
sdispater/cachy | cachy/stores/file_store.py | FileStore._get_payload | def _get_payload(self, key):
"""
Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict
"""
path = self._path(key)
# If the file doesn't exists, we obviously can't return the cache so we will
# just return null. Otherwise, we'll get the contents of the file and get
# the expiration UNIX timestamps from the start of the file's contents.
if not os.path.exists(path):
return {'data': None, 'time': None}
with open(path, 'rb') as fh:
contents = fh.read()
expire = int(contents[:10])
# If the current time is greater than expiration timestamps we will delete
# the file and return null. This helps clean up the old files and keeps
# this directory much cleaner for us as old files aren't hanging out.
if round(time.time()) >= expire:
self.forget(key)
return {'data': None, 'time': None}
data = self.unserialize(contents[10:])
# Next, we'll extract the number of minutes that are remaining for a cache
# so that we can properly retain the time for things like the increment
# operation that may be performed on the cache. We'll round this out.
time_ = math.ceil((expire - round(time.time())) / 60.)
return {'data': data, 'time': time_} | python | def _get_payload(self, key):
"""
Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict
"""
path = self._path(key)
# If the file doesn't exists, we obviously can't return the cache so we will
# just return null. Otherwise, we'll get the contents of the file and get
# the expiration UNIX timestamps from the start of the file's contents.
if not os.path.exists(path):
return {'data': None, 'time': None}
with open(path, 'rb') as fh:
contents = fh.read()
expire = int(contents[:10])
# If the current time is greater than expiration timestamps we will delete
# the file and return null. This helps clean up the old files and keeps
# this directory much cleaner for us as old files aren't hanging out.
if round(time.time()) >= expire:
self.forget(key)
return {'data': None, 'time': None}
data = self.unserialize(contents[10:])
# Next, we'll extract the number of minutes that are remaining for a cache
# so that we can properly retain the time for things like the increment
# operation that may be performed on the cache. We'll round this out.
time_ = math.ceil((expire - round(time.time())) / 60.)
return {'data': data, 'time': time_} | [
"def",
"_get_payload",
"(",
"self",
",",
"key",
")",
":",
"path",
"=",
"self",
".",
"_path",
"(",
"key",
")",
"# If the file doesn't exists, we obviously can't return the cache so we will",
"# just return null. Otherwise, we'll get the contents of the file and get",
"# the expira... | Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict | [
"Retrieve",
"an",
"item",
"and",
"expiry",
"time",
"from",
"the",
"cache",
"by",
"key",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L45-L82 |
sdispater/cachy | cachy/stores/file_store.py | FileStore.put | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
value = encode(str(self._expiration(minutes))) + encode(self.serialize(value))
path = self._path(key)
self._create_cache_directory(path)
with open(path, 'wb') as fh:
fh.write(value) | python | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
value = encode(str(self._expiration(minutes))) + encode(self.serialize(value))
path = self._path(key)
self._create_cache_directory(path)
with open(path, 'wb') as fh:
fh.write(value) | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"value",
",",
"minutes",
")",
":",
"value",
"=",
"encode",
"(",
"str",
"(",
"self",
".",
"_expiration",
"(",
"minutes",
")",
")",
")",
"+",
"encode",
"(",
"self",
".",
"serialize",
"(",
"value",
")",
")... | Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"for",
"a",
"given",
"number",
"of",
"minutes",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L84-L103 |
sdispater/cachy | cachy/stores/file_store.py | FileStore.increment | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
raw = self._get_payload(key)
integer = int(raw['data']) + value
self.put(key, integer, int(raw['time']))
return integer | python | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
raw = self._get_payload(key)
integer = int(raw['data']) + value
self.put(key, integer, int(raw['time']))
return integer | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"raw",
"=",
"self",
".",
"_get_payload",
"(",
"key",
")",
"integer",
"=",
"int",
"(",
"raw",
"[",
"'data'",
"]",
")",
"+",
"value",
"self",
".",
"put",
"(",
"key",
"... | Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool | [
"Increment",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L114-L132 |
sdispater/cachy | cachy/stores/file_store.py | FileStore.forget | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
path = self._path(key)
if os.path.exists(path):
os.remove(path)
return True
return False | python | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
path = self._path(key)
if os.path.exists(path):
os.remove(path)
return True
return False | [
"def",
"forget",
"(",
"self",
",",
"key",
")",
":",
"path",
"=",
"self",
".",
"_path",
"(",
"key",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"remove",
"(",
"path",
")",
"return",
"True",
"return",
"False"
] | Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool | [
"Remove",
"an",
"item",
"from",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L160-L176 |
sdispater/cachy | cachy/stores/file_store.py | FileStore.flush | def flush(self):
"""
Remove all items from the cache.
"""
if os.path.isdir(self._directory):
for root, dirs, files in os.walk(self._directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name)) | python | def flush(self):
"""
Remove all items from the cache.
"""
if os.path.isdir(self._directory):
for root, dirs, files in os.walk(self._directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name)) | [
"def",
"flush",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"self",
".",
"_directory",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"self",
".",
"_directory",
",",
"topdown",
"=",
"False... | Remove all items from the cache. | [
"Remove",
"all",
"items",
"from",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L178-L188 |
sdispater/cachy | cachy/stores/file_store.py | FileStore._path | def _path(self, key):
"""
Get the full path for the given cache key.
:param key: The cache key
:type key: str
:rtype: str
"""
hash_type, parts_count = self._HASHES[self._hash_type]
h = hash_type(encode(key)).hexdigest()
parts = [h[i:i+2] for i in range(0, len(h), 2)][:parts_count]
return os.path.join(self._directory, os.path.sep.join(parts), h) | python | def _path(self, key):
"""
Get the full path for the given cache key.
:param key: The cache key
:type key: str
:rtype: str
"""
hash_type, parts_count = self._HASHES[self._hash_type]
h = hash_type(encode(key)).hexdigest()
parts = [h[i:i+2] for i in range(0, len(h), 2)][:parts_count]
return os.path.join(self._directory, os.path.sep.join(parts), h) | [
"def",
"_path",
"(",
"self",
",",
"key",
")",
":",
"hash_type",
",",
"parts_count",
"=",
"self",
".",
"_HASHES",
"[",
"self",
".",
"_hash_type",
"]",
"h",
"=",
"hash_type",
"(",
"encode",
"(",
"key",
")",
")",
".",
"hexdigest",
"(",
")",
"parts",
"... | Get the full path for the given cache key.
:param key: The cache key
:type key: str
:rtype: str | [
"Get",
"the",
"full",
"path",
"for",
"the",
"given",
"cache",
"key",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/file_store.py#L190-L204 |
load-tools/netort | netort/resource.py | ResourceManager.resource_string | def resource_string(self, path):
"""
Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
string, file content
"""
opener = self.get_opener(path)
filename = opener.get_filename
try:
size = os.path.getsize(filename)
if size > 50 * 1024 * 1024:
logger.warning(
'Reading large resource to memory: %s. Size: %s bytes',
filename, size)
except Exception as exc:
logger.debug('Unable to check resource size %s. %s', filename, exc)
with opener(filename, 'r') as resource:
content = resource.read()
return content | python | def resource_string(self, path):
"""
Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
string, file content
"""
opener = self.get_opener(path)
filename = opener.get_filename
try:
size = os.path.getsize(filename)
if size > 50 * 1024 * 1024:
logger.warning(
'Reading large resource to memory: %s. Size: %s bytes',
filename, size)
except Exception as exc:
logger.debug('Unable to check resource size %s. %s', filename, exc)
with opener(filename, 'r') as resource:
content = resource.read()
return content | [
"def",
"resource_string",
"(",
"self",
",",
"path",
")",
":",
"opener",
"=",
"self",
".",
"get_opener",
"(",
"path",
")",
"filename",
"=",
"opener",
".",
"get_filename",
"try",
":",
"size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"filename",
")",
... | Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
string, file content | [
"Args",
":",
"path",
":",
"str",
"resource",
"file",
"url",
"or",
"resource",
"file",
"absolute",
"/",
"relative",
"path",
"."
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/resource.py#L63-L83 |
load-tools/netort | netort/resource.py | ResourceManager.get_opener | def get_opener(self, path):
"""
Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
file object
"""
self.path = path
opener = None
# FIXME this parser/matcher should use `urlparse` stdlib
for opener_name, signature in self.openers.items():
if self.path.startswith(signature[0]):
opener = signature[1](self.path)
break
if not opener:
opener = FileOpener(self.path)
return opener | python | def get_opener(self, path):
"""
Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
file object
"""
self.path = path
opener = None
# FIXME this parser/matcher should use `urlparse` stdlib
for opener_name, signature in self.openers.items():
if self.path.startswith(signature[0]):
opener = signature[1](self.path)
break
if not opener:
opener = FileOpener(self.path)
return opener | [
"def",
"get_opener",
"(",
"self",
",",
"path",
")",
":",
"self",
".",
"path",
"=",
"path",
"opener",
"=",
"None",
"# FIXME this parser/matcher should use `urlparse` stdlib",
"for",
"opener_name",
",",
"signature",
"in",
"self",
".",
"openers",
".",
"items",
"(",... | Args:
path: str, resource file url or resource file absolute/relative path.
Returns:
file object | [
"Args",
":",
"path",
":",
"str",
"resource",
"file",
"url",
"or",
"resource",
"file",
"absolute",
"/",
"relative",
"path",
"."
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/resource.py#L85-L102 |
sdispater/cachy | cachy/contracts/taggable_store.py | TaggableStore.tags | def tags(self, *names):
"""
Begin executing a new tags operation.
:param names: The tags
:type names: tuple
:rtype: cachy.tagged_cache.TaggedCache
"""
if len(names) == 1 and isinstance(names[0], list):
names = names[0]
return TaggedCache(self, TagSet(self, names)) | python | def tags(self, *names):
"""
Begin executing a new tags operation.
:param names: The tags
:type names: tuple
:rtype: cachy.tagged_cache.TaggedCache
"""
if len(names) == 1 and isinstance(names[0], list):
names = names[0]
return TaggedCache(self, TagSet(self, names)) | [
"def",
"tags",
"(",
"self",
",",
"*",
"names",
")",
":",
"if",
"len",
"(",
"names",
")",
"==",
"1",
"and",
"isinstance",
"(",
"names",
"[",
"0",
"]",
",",
"list",
")",
":",
"names",
"=",
"names",
"[",
"0",
"]",
"return",
"TaggedCache",
"(",
"se... | Begin executing a new tags operation.
:param names: The tags
:type names: tuple
:rtype: cachy.tagged_cache.TaggedCache | [
"Begin",
"executing",
"a",
"new",
"tags",
"operation",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/contracts/taggable_store.py#L10-L22 |
inveniosoftware/invenio-logging | invenio_logging/sentry6.py | Sentry6.get_user_info | def get_user_info(self, request):
"""Implement custom getter."""
if not current_user.is_authenticated:
return {}
user_info = {
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
return user_info | python | def get_user_info(self, request):
"""Implement custom getter."""
if not current_user.is_authenticated:
return {}
user_info = {
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
return user_info | [
"def",
"get_user_info",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"current_user",
".",
"is_authenticated",
":",
"return",
"{",
"}",
"user_info",
"=",
"{",
"'id'",
":",
"current_user",
".",
"get_id",
"(",
")",
",",
"}",
"if",
"'SENTRY_USER_ATTRS'"... | Implement custom getter. | [
"Implement",
"custom",
"getter",
"."
] | train | https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/sentry6.py#L21-L35 |
load-tools/netort | netort/data_manager/clients/luna.py | LunaClient.create_job | def create_job(self):
""" Create public Luna job
Returns:
job_id (basestring): Luna job id
"""
my_user_agent = None
try:
my_user_agent = pkg_resources.require('netort')[0].version
except pkg_resources.DistributionNotFound:
my_user_agent = 'DistributionNotFound'
finally:
headers = {
"User-Agent": "Uploader/{uploader_ua}, {upward_ua}".format(
upward_ua=self.meta.get('user_agent', ''),
uploader_ua=my_user_agent
)
}
req = requests.Request(
'POST',
"{api_address}{path}".format(
api_address=self.api_address,
path=self.create_job_path
),
headers=headers
)
req.data = {
'test_start': self.job.test_start
}
prepared_req = req.prepare()
logger.debug('Prepared create_job request:\n%s', pretty_print(prepared_req))
response = send_chunk(self.session, prepared_req)
logger.debug('Luna create job status: %s', response.status_code)
logger.debug('Answ data: %s', response.content)
job_id = response.content.decode('utf-8') if isinstance(response.content, bytes) else response.content
if not job_id:
self.failed.set()
raise ValueError('Luna returned answer without jobid: %s', response.content)
else:
logger.info('Luna job created: %s', job_id)
return job_id | python | def create_job(self):
""" Create public Luna job
Returns:
job_id (basestring): Luna job id
"""
my_user_agent = None
try:
my_user_agent = pkg_resources.require('netort')[0].version
except pkg_resources.DistributionNotFound:
my_user_agent = 'DistributionNotFound'
finally:
headers = {
"User-Agent": "Uploader/{uploader_ua}, {upward_ua}".format(
upward_ua=self.meta.get('user_agent', ''),
uploader_ua=my_user_agent
)
}
req = requests.Request(
'POST',
"{api_address}{path}".format(
api_address=self.api_address,
path=self.create_job_path
),
headers=headers
)
req.data = {
'test_start': self.job.test_start
}
prepared_req = req.prepare()
logger.debug('Prepared create_job request:\n%s', pretty_print(prepared_req))
response = send_chunk(self.session, prepared_req)
logger.debug('Luna create job status: %s', response.status_code)
logger.debug('Answ data: %s', response.content)
job_id = response.content.decode('utf-8') if isinstance(response.content, bytes) else response.content
if not job_id:
self.failed.set()
raise ValueError('Luna returned answer without jobid: %s', response.content)
else:
logger.info('Luna job created: %s', job_id)
return job_id | [
"def",
"create_job",
"(",
"self",
")",
":",
"my_user_agent",
"=",
"None",
"try",
":",
"my_user_agent",
"=",
"pkg_resources",
".",
"require",
"(",
"'netort'",
")",
"[",
"0",
"]",
".",
"version",
"except",
"pkg_resources",
".",
"DistributionNotFound",
":",
"my... | Create public Luna job
Returns:
job_id (basestring): Luna job id | [
"Create",
"public",
"Luna",
"job"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/clients/luna.py#L89-L130 |
load-tools/netort | netort/data_manager/manager.py | DataManager.new_metric | def new_metric(self, meta):
"""
Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric
"""
type_ = meta.get('type')
if not type_:
raise ValueError('Metric type should be defined.')
if type_ in available_metrics:
metric_obj = available_metrics[type_](meta, self.routing_queue) # create metric object
metric_meta = pd.DataFrame({metric_obj.local_id: meta}).T # create metric meta
self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta
self.metrics[metric_obj.local_id] = metric_obj # register metric object
# find subscribers for this metric
this_metric_subscribers = self.__reversed_filter(self.subscribers, meta)
if this_metric_subscribers.empty:
logger.debug('subscriber for metric %s not found', metric_obj.local_id)
else:
logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers)
# attach this metric id to discovered subscribers and select id <-> callbacks
this_metric_subscribers['id'] = metric_obj.local_id
found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id')
# add this metric callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(found_callbacks)
return metric_obj
else:
raise NotImplementedError('Unknown metric type: %s' % type_) | python | def new_metric(self, meta):
"""
Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric
"""
type_ = meta.get('type')
if not type_:
raise ValueError('Metric type should be defined.')
if type_ in available_metrics:
metric_obj = available_metrics[type_](meta, self.routing_queue) # create metric object
metric_meta = pd.DataFrame({metric_obj.local_id: meta}).T # create metric meta
self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta
self.metrics[metric_obj.local_id] = metric_obj # register metric object
# find subscribers for this metric
this_metric_subscribers = self.__reversed_filter(self.subscribers, meta)
if this_metric_subscribers.empty:
logger.debug('subscriber for metric %s not found', metric_obj.local_id)
else:
logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers)
# attach this metric id to discovered subscribers and select id <-> callbacks
this_metric_subscribers['id'] = metric_obj.local_id
found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id')
# add this metric callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(found_callbacks)
return metric_obj
else:
raise NotImplementedError('Unknown metric type: %s' % type_) | [
"def",
"new_metric",
"(",
"self",
",",
"meta",
")",
":",
"type_",
"=",
"meta",
".",
"get",
"(",
"'type'",
")",
"if",
"not",
"type_",
":",
"raise",
"ValueError",
"(",
"'Metric type should be defined.'",
")",
"if",
"type_",
"in",
"available_metrics",
":",
"m... | Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric | [
"Create",
"and",
"register",
"metric",
"find",
"subscribers",
"for",
"this",
"metric",
"(",
"using",
"meta",
"as",
"filter",
")",
"and",
"subscribe"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/manager.py#L172-L203 |
load-tools/netort | netort/data_manager/manager.py | DataManager.new_tank_metric | def new_tank_metric(self, _type, name, hostname=None, group=None, source=None, *kw):
"""
Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric
"""
if not _type:
raise ValueError('Metric type should be defined.')
if _type in available_metrics:
metric_obj = available_metrics[_type](None, self.routing_queue) # create metric object
metric_info = {'type': _type,
'source': source,
'name': name,
'hostname': hostname,
'group': group}
if kw is not None:
metric_info.update(kw)
metric_meta = pd.DataFrame({metric_obj.local_id: metric_info}).T # create metric meta
self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta
self.metrics[metric_obj.local_id] = metric_obj # register metric object
# find subscribers for this metric
this_metric_subscribers = self.__reversed_filter(self.subscribers, metric_info)
if this_metric_subscribers.empty:
logger.debug('subscriber for metric %s not found', metric_obj.local_id)
else:
logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers)
# attach this metric id to discovered subscribers and select id <-> callbacks
this_metric_subscribers['id'] = metric_obj.local_id
found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id')
# add this metric callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(found_callbacks)
return metric_obj
else:
raise NotImplementedError('Unknown metric type: %s' % _type) | python | def new_tank_metric(self, _type, name, hostname=None, group=None, source=None, *kw):
"""
Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric
"""
if not _type:
raise ValueError('Metric type should be defined.')
if _type in available_metrics:
metric_obj = available_metrics[_type](None, self.routing_queue) # create metric object
metric_info = {'type': _type,
'source': source,
'name': name,
'hostname': hostname,
'group': group}
if kw is not None:
metric_info.update(kw)
metric_meta = pd.DataFrame({metric_obj.local_id: metric_info}).T # create metric meta
self.metrics_meta = self.metrics_meta.append(metric_meta) # register metric meta
self.metrics[metric_obj.local_id] = metric_obj # register metric object
# find subscribers for this metric
this_metric_subscribers = self.__reversed_filter(self.subscribers, metric_info)
if this_metric_subscribers.empty:
logger.debug('subscriber for metric %s not found', metric_obj.local_id)
else:
logger.debug('Found subscribers for this metric, subscribing...: %s', this_metric_subscribers)
# attach this metric id to discovered subscribers and select id <-> callbacks
this_metric_subscribers['id'] = metric_obj.local_id
found_callbacks = this_metric_subscribers[['id', 'callback']].set_index('id')
# add this metric callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(found_callbacks)
return metric_obj
else:
raise NotImplementedError('Unknown metric type: %s' % _type) | [
"def",
"new_tank_metric",
"(",
"self",
",",
"_type",
",",
"name",
",",
"hostname",
"=",
"None",
",",
"group",
"=",
"None",
",",
"source",
"=",
"None",
",",
"*",
"kw",
")",
":",
"if",
"not",
"_type",
":",
"raise",
"ValueError",
"(",
"'Metric type should... | Create and register metric,
find subscribers for this metric (using meta as filter) and subscribe
Return:
metric (available_metrics[0]): one of Metric | [
"Create",
"and",
"register",
"metric",
"find",
"subscribers",
"for",
"this",
"metric",
"(",
"using",
"meta",
"as",
"filter",
")",
"and",
"subscribe"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/manager.py#L206-L243 |
load-tools/netort | netort/data_manager/manager.py | DataManager.subscribe | def subscribe(self, callback, filter_):
"""
Create and register metric subscriber,
find metrics for this subscriber (using filter_) and subscribe
Args:
callback (object method): subscriber's callback
filter_ (dict): filter dict
filter sample:
{'type': 'metrics', 'source': 'gun'}
"""
sub_id = "subscriber_{uuid}".format(uuid=uuid.uuid4())
# register subscriber in manager
sub = pd.DataFrame({sub_id: filter_}).T
sub['callback'] = callback
self.subscribers = self.subscribers.append(sub)
# find metrics for subscriber using `filter`
this_subscriber_metrics = self.__filter(self.metrics_meta, filter_)
if this_subscriber_metrics.empty:
logger.debug('Metrics for subscriber %s not found', sub_id)
else:
logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics)
# attach this sub callback to discovered metrics and select id <-> callbacks
this_subscriber_metrics['callback'] = callback
prepared_callbacks = this_subscriber_metrics[['callback']]
# add this subscriber callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(prepared_callbacks) | python | def subscribe(self, callback, filter_):
"""
Create and register metric subscriber,
find metrics for this subscriber (using filter_) and subscribe
Args:
callback (object method): subscriber's callback
filter_ (dict): filter dict
filter sample:
{'type': 'metrics', 'source': 'gun'}
"""
sub_id = "subscriber_{uuid}".format(uuid=uuid.uuid4())
# register subscriber in manager
sub = pd.DataFrame({sub_id: filter_}).T
sub['callback'] = callback
self.subscribers = self.subscribers.append(sub)
# find metrics for subscriber using `filter`
this_subscriber_metrics = self.__filter(self.metrics_meta, filter_)
if this_subscriber_metrics.empty:
logger.debug('Metrics for subscriber %s not found', sub_id)
else:
logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics)
# attach this sub callback to discovered metrics and select id <-> callbacks
this_subscriber_metrics['callback'] = callback
prepared_callbacks = this_subscriber_metrics[['callback']]
# add this subscriber callbacks to DataManager's callbacks
self.callbacks = self.callbacks.append(prepared_callbacks) | [
"def",
"subscribe",
"(",
"self",
",",
"callback",
",",
"filter_",
")",
":",
"sub_id",
"=",
"\"subscriber_{uuid}\"",
".",
"format",
"(",
"uuid",
"=",
"uuid",
".",
"uuid4",
"(",
")",
")",
"# register subscriber in manager",
"sub",
"=",
"pd",
".",
"DataFrame",
... | Create and register metric subscriber,
find metrics for this subscriber (using filter_) and subscribe
Args:
callback (object method): subscriber's callback
filter_ (dict): filter dict
filter sample:
{'type': 'metrics', 'source': 'gun'} | [
"Create",
"and",
"register",
"metric",
"subscriber",
"find",
"metrics",
"for",
"this",
"subscriber",
"(",
"using",
"filter_",
")",
"and",
"subscribe"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/manager.py#L245-L273 |
load-tools/netort | netort/data_manager/manager.py | DataManager.__filter | def __filter(filterable, filter_, logic_operation='and'):
""" filtering DataFrame using filter_ key-value conditions applying logic_operation
only find rows strictly fitting the filter_ criterion"""
condition = []
if not filter_:
return filterable
elif filter_.get('type') == '__ANY__':
return filterable
else:
for key, value in filter_.items():
condition.append('{key} == "{value}"'.format(key=key, value=value))
try:
res = filterable.query(" {operation} ".format(operation=logic_operation).join(condition))
except pd.core.computation.ops.UndefinedVariableError:
return pd.DataFrame()
else:
return res | python | def __filter(filterable, filter_, logic_operation='and'):
""" filtering DataFrame using filter_ key-value conditions applying logic_operation
only find rows strictly fitting the filter_ criterion"""
condition = []
if not filter_:
return filterable
elif filter_.get('type') == '__ANY__':
return filterable
else:
for key, value in filter_.items():
condition.append('{key} == "{value}"'.format(key=key, value=value))
try:
res = filterable.query(" {operation} ".format(operation=logic_operation).join(condition))
except pd.core.computation.ops.UndefinedVariableError:
return pd.DataFrame()
else:
return res | [
"def",
"__filter",
"(",
"filterable",
",",
"filter_",
",",
"logic_operation",
"=",
"'and'",
")",
":",
"condition",
"=",
"[",
"]",
"if",
"not",
"filter_",
":",
"return",
"filterable",
"elif",
"filter_",
".",
"get",
"(",
"'type'",
")",
"==",
"'__ANY__'",
"... | filtering DataFrame using filter_ key-value conditions applying logic_operation
only find rows strictly fitting the filter_ criterion | [
"filtering",
"DataFrame",
"using",
"filter_",
"key",
"-",
"value",
"conditions",
"applying",
"logic_operation",
"only",
"find",
"rows",
"strictly",
"fitting",
"the",
"filter_",
"criterion"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/manager.py#L279-L295 |
load-tools/netort | netort/data_manager/manager.py | DataManager.__reversed_filter | def __reversed_filter(filterable, filter_, logic_operation='and'):
""" reverse filtering DataFrame using filter_ key-value conditions applying logic_operation
find rows where existing filterable columns (and its values) fitting the filter_ criterion"""
condition = []
try:
subscribers_for_any = filterable.query('type == "__ANY__"')
except pd.core.computation.ops.UndefinedVariableError:
subscribers_for_any = pd.DataFrame()
if not filter_:
return filterable
else:
for existing_col in filterable:
for meta_tag, meta_value in filter_.items():
if meta_tag == existing_col:
condition.append('{key} == "{value}"'.format(key=meta_tag, value=meta_value))
try:
res = filterable.query(" {operation} ".format(operation=logic_operation).join(condition))
except pd.core.computation.ops.UndefinedVariableError:
return pd.DataFrame().append(subscribers_for_any)
else:
return res.append(subscribers_for_any) | python | def __reversed_filter(filterable, filter_, logic_operation='and'):
""" reverse filtering DataFrame using filter_ key-value conditions applying logic_operation
find rows where existing filterable columns (and its values) fitting the filter_ criterion"""
condition = []
try:
subscribers_for_any = filterable.query('type == "__ANY__"')
except pd.core.computation.ops.UndefinedVariableError:
subscribers_for_any = pd.DataFrame()
if not filter_:
return filterable
else:
for existing_col in filterable:
for meta_tag, meta_value in filter_.items():
if meta_tag == existing_col:
condition.append('{key} == "{value}"'.format(key=meta_tag, value=meta_value))
try:
res = filterable.query(" {operation} ".format(operation=logic_operation).join(condition))
except pd.core.computation.ops.UndefinedVariableError:
return pd.DataFrame().append(subscribers_for_any)
else:
return res.append(subscribers_for_any) | [
"def",
"__reversed_filter",
"(",
"filterable",
",",
"filter_",
",",
"logic_operation",
"=",
"'and'",
")",
":",
"condition",
"=",
"[",
"]",
"try",
":",
"subscribers_for_any",
"=",
"filterable",
".",
"query",
"(",
"'type == \"__ANY__\"'",
")",
"except",
"pd",
".... | reverse filtering DataFrame using filter_ key-value conditions applying logic_operation
find rows where existing filterable columns (and its values) fitting the filter_ criterion | [
"reverse",
"filtering",
"DataFrame",
"using",
"filter_",
"key",
"-",
"value",
"conditions",
"applying",
"logic_operation",
"find",
"rows",
"where",
"existing",
"filterable",
"columns",
"(",
"and",
"its",
"values",
")",
"fitting",
"the",
"filter_",
"criterion"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_manager/manager.py#L298-L318 |
inveniosoftware/invenio-logging | invenio_logging/sentry.py | InvenioLoggingSentry.install_handler | def install_handler(self, app):
"""Install log handler."""
from raven.contrib.celery import register_logger_signal, \
register_signal
from raven.contrib.flask import Sentry, make_client
from raven.handlers.logging import SentryHandler
# Installs sentry in app.extensions['sentry']
level = getattr(logging, app.config['LOGGING_SENTRY_LEVEL'])
# Get the Sentry class.
cls = app.config['LOGGING_SENTRY_CLASS']
if cls:
if isinstance(cls, six.string_types):
cls = import_string(cls)
else:
cls = Sentry
sentry = cls(
app,
logging=True,
level=level
)
app.logger.addHandler(SentryHandler(client=sentry.client, level=level))
# Capture warnings from warnings module
if app.config['LOGGING_SENTRY_PYWARNINGS']:
self.capture_pywarnings(
SentryHandler(sentry.client))
# Setup Celery logging to Sentry
if app.config['LOGGING_SENTRY_CELERY']:
try:
register_logger_signal(sentry.client, loglevel=level)
except TypeError:
# Compatibility mode for Raven<=5.1.0
register_logger_signal(sentry.client)
register_signal(sentry.client)
# Werkzeug only adds a stream handler if there's no other handlers
# defined, so when Sentry adds a log handler no output is
# received from Werkzeug unless we install a console handler
# here on the werkzeug logger.
if app.debug:
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler()) | python | def install_handler(self, app):
"""Install log handler."""
from raven.contrib.celery import register_logger_signal, \
register_signal
from raven.contrib.flask import Sentry, make_client
from raven.handlers.logging import SentryHandler
# Installs sentry in app.extensions['sentry']
level = getattr(logging, app.config['LOGGING_SENTRY_LEVEL'])
# Get the Sentry class.
cls = app.config['LOGGING_SENTRY_CLASS']
if cls:
if isinstance(cls, six.string_types):
cls = import_string(cls)
else:
cls = Sentry
sentry = cls(
app,
logging=True,
level=level
)
app.logger.addHandler(SentryHandler(client=sentry.client, level=level))
# Capture warnings from warnings module
if app.config['LOGGING_SENTRY_PYWARNINGS']:
self.capture_pywarnings(
SentryHandler(sentry.client))
# Setup Celery logging to Sentry
if app.config['LOGGING_SENTRY_CELERY']:
try:
register_logger_signal(sentry.client, loglevel=level)
except TypeError:
# Compatibility mode for Raven<=5.1.0
register_logger_signal(sentry.client)
register_signal(sentry.client)
# Werkzeug only adds a stream handler if there's no other handlers
# defined, so when Sentry adds a log handler no output is
# received from Werkzeug unless we install a console handler
# here on the werkzeug logger.
if app.debug:
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler()) | [
"def",
"install_handler",
"(",
"self",
",",
"app",
")",
":",
"from",
"raven",
".",
"contrib",
".",
"celery",
"import",
"register_logger_signal",
",",
"register_signal",
"from",
"raven",
".",
"contrib",
".",
"flask",
"import",
"Sentry",
",",
"make_client",
"fro... | Install log handler. | [
"Install",
"log",
"handler",
"."
] | train | https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/sentry.py#L56-L103 |
inveniosoftware/invenio-logging | invenio_logging/sentry.py | RequestIdProcessor.process | def process(self, data, **kwargs):
"""Process event data."""
data = super(RequestIdProcessor, self).process(data, **kwargs)
if g and hasattr(g, 'request_id'):
tags = data.get('tags', {})
tags['request_id'] = g.request_id
data['tags'] = tags
return data | python | def process(self, data, **kwargs):
"""Process event data."""
data = super(RequestIdProcessor, self).process(data, **kwargs)
if g and hasattr(g, 'request_id'):
tags = data.get('tags', {})
tags['request_id'] = g.request_id
data['tags'] = tags
return data | [
"def",
"process",
"(",
"self",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"super",
"(",
"RequestIdProcessor",
",",
"self",
")",
".",
"process",
"(",
"data",
",",
"*",
"*",
"kwargs",
")",
"if",
"g",
"and",
"hasattr",
"(",
"g",
",... | Process event data. | [
"Process",
"event",
"data",
"."
] | train | https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/sentry.py#L109-L116 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.get | def get(self, key):
"""
Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:return: The cache value
"""
value = self._redis.get(self._prefix + key)
if value is not None:
return self.unserialize(value) | python | def get(self, key):
"""
Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:return: The cache value
"""
value = self._redis.get(self._prefix + key)
if value is not None:
return self.unserialize(value) | [
"def",
"get",
"(",
"self",
",",
"key",
")",
":",
"value",
"=",
"self",
".",
"_redis",
".",
"get",
"(",
"self",
".",
"_prefix",
"+",
"key",
")",
"if",
"value",
"is",
"not",
"None",
":",
"return",
"self",
".",
"unserialize",
"(",
"value",
")"
] | Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:return: The cache value | [
"Retrieve",
"an",
"item",
"from",
"the",
"cache",
"by",
"key",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L27-L39 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.put | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
value = self.serialize(value)
minutes = max(1, minutes)
self._redis.setex(self._prefix + key, minutes * 60, value) | python | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
value = self.serialize(value)
minutes = max(1, minutes)
self._redis.setex(self._prefix + key, minutes * 60, value) | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"value",
",",
"minutes",
")",
":",
"value",
"=",
"self",
".",
"serialize",
"(",
"value",
")",
"minutes",
"=",
"max",
"(",
"1",
",",
"minutes",
")",
"self",
".",
"_redis",
".",
"setex",
"(",
"self",
".",... | Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"for",
"a",
"given",
"number",
"of",
"minutes",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L41-L58 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.increment | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
return self._redis.incrby(self._prefix + key, value) | python | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
return self._redis.incrby(self._prefix + key, value) | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_redis",
".",
"incrby",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool | [
"Increment",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L60-L72 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.decrement | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._redis.decr(self._prefix + key, value) | python | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._redis.decr(self._prefix + key, value) | [
"def",
"decrement",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_redis",
".",
"decr",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool | [
"Decrement",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L74-L86 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.forever | def forever(self, key, value):
"""
Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value to store
:type value: mixed
"""
value = self.serialize(value)
self._redis.set(self._prefix + key, value) | python | def forever(self, key, value):
"""
Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value to store
:type value: mixed
"""
value = self.serialize(value)
self._redis.set(self._prefix + key, value) | [
"def",
"forever",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"value",
"=",
"self",
".",
"serialize",
"(",
"value",
")",
"self",
".",
"_redis",
".",
"set",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value to store
:type value: mixed | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"indefinitely",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L88-L100 |
sdispater/cachy | cachy/stores/redis_store.py | RedisStore.forget | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
return bool(self._redis.delete(self._prefix + key)) | python | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
return bool(self._redis.delete(self._prefix + key)) | [
"def",
"forget",
"(",
"self",
",",
"key",
")",
":",
"return",
"bool",
"(",
"self",
".",
"_redis",
".",
"delete",
"(",
"self",
".",
"_prefix",
"+",
"key",
")",
")"
] | Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool | [
"Remove",
"an",
"item",
"from",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/redis_store.py#L102-L111 |
sdispater/cachy | cachy/repository.py | Repository.get | def get(self, key, default=None):
"""
Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:param default: The default value to return
:type default: mixed
:rtype: mixed
"""
val = self._store.get(key)
if val is None:
return value(default)
return val | python | def get(self, key, default=None):
"""
Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:param default: The default value to return
:type default: mixed
:rtype: mixed
"""
val = self._store.get(key)
if val is None:
return value(default)
return val | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"val",
"=",
"self",
".",
"_store",
".",
"get",
"(",
"key",
")",
"if",
"val",
"is",
"None",
":",
"return",
"value",
"(",
"default",
")",
"return",
"val"
] | Retrieve an item from the cache by key.
:param key: The cache key
:type key: str
:param default: The default value to return
:type default: mixed
:rtype: mixed | [
"Retrieve",
"an",
"item",
"from",
"the",
"cache",
"by",
"key",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/repository.py#L35-L52 |
sdispater/cachy | cachy/repository.py | Repository.put | def put(self, key, val, minutes):
"""
Store an item in the cache.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime
"""
minutes = self._get_minutes(minutes)
if minutes is not None:
self._store.put(key, val, minutes) | python | def put(self, key, val, minutes):
"""
Store an item in the cache.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime
"""
minutes = self._get_minutes(minutes)
if minutes is not None:
self._store.put(key, val, minutes) | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"val",
",",
"minutes",
")",
":",
"minutes",
"=",
"self",
".",
"_get_minutes",
"(",
"minutes",
")",
"if",
"minutes",
"is",
"not",
"None",
":",
"self",
".",
"_store",
".",
"put",
"(",
"key",
",",
"val",
"... | Store an item in the cache.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/repository.py#L72-L88 |
sdispater/cachy | cachy/repository.py | Repository.add | def add(self, key, val, minutes):
"""
Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime
:rtype: bool
"""
if hasattr(self._store, 'add'):
return self._store.add(key, val, self._get_minutes(minutes))
if not self.has(key):
self.put(key, val, minutes)
return True
return False | python | def add(self, key, val, minutes):
"""
Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime
:rtype: bool
"""
if hasattr(self._store, 'add'):
return self._store.add(key, val, self._get_minutes(minutes))
if not self.has(key):
self.put(key, val, minutes)
return True
return False | [
"def",
"add",
"(",
"self",
",",
"key",
",",
"val",
",",
"minutes",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"_store",
",",
"'add'",
")",
":",
"return",
"self",
".",
"_store",
".",
"add",
"(",
"key",
",",
"val",
",",
"self",
".",
"_get_minutes... | Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int|datetime
:rtype: bool | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"if",
"it",
"does",
"not",
"exist",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/repository.py#L90-L113 |
sdispater/cachy | cachy/repository.py | Repository._get_key | def _get_key(self, fn, args, kwargs):
"""
Calculate a cache key given a function, args and kwargs.
:param fn: The function
:type fn: callable or str
:param args: The function args
:type args: tuple
:param kwargs: The function kwargs
:type kwargs: dict
:rtype: str
"""
if args:
serialized_arguments = (
self._store.serialize(args[1:])
+ self._store.serialize([(k, kwargs[k]) for k in sorted(kwargs.keys())])
)
else:
serialized_arguments = self._store.serialize([(k, kwargs[k]) for k in sorted(kwargs.keys())])
if isinstance(fn, types.MethodType):
key = self._hash('%s.%s.%s'
% (fn.__self__.__class__.__name__,
args[0].__name__,
serialized_arguments))
elif isinstance(fn, types.FunctionType):
key = self._hash('%s.%s'
% (fn.__name__,
serialized_arguments))
else:
key = '%s:' % fn + self._hash(serialized_arguments)
return key | python | def _get_key(self, fn, args, kwargs):
"""
Calculate a cache key given a function, args and kwargs.
:param fn: The function
:type fn: callable or str
:param args: The function args
:type args: tuple
:param kwargs: The function kwargs
:type kwargs: dict
:rtype: str
"""
if args:
serialized_arguments = (
self._store.serialize(args[1:])
+ self._store.serialize([(k, kwargs[k]) for k in sorted(kwargs.keys())])
)
else:
serialized_arguments = self._store.serialize([(k, kwargs[k]) for k in sorted(kwargs.keys())])
if isinstance(fn, types.MethodType):
key = self._hash('%s.%s.%s'
% (fn.__self__.__class__.__name__,
args[0].__name__,
serialized_arguments))
elif isinstance(fn, types.FunctionType):
key = self._hash('%s.%s'
% (fn.__name__,
serialized_arguments))
else:
key = '%s:' % fn + self._hash(serialized_arguments)
return key | [
"def",
"_get_key",
"(",
"self",
",",
"fn",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"args",
":",
"serialized_arguments",
"=",
"(",
"self",
".",
"_store",
".",
"serialize",
"(",
"args",
"[",
"1",
":",
"]",
")",
"+",
"self",
".",
"_store",
".",
... | Calculate a cache key given a function, args and kwargs.
:param fn: The function
:type fn: callable or str
:param args: The function args
:type args: tuple
:param kwargs: The function kwargs
:type kwargs: dict
:rtype: str | [
"Calculate",
"a",
"cache",
"key",
"given",
"a",
"function",
"args",
"and",
"kwargs",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/repository.py#L262-L297 |
openprocurement/openprocurement.client.python | openprocurement_client/client.py | APIBaseClient.patch | def patch(self, path=None, payload=None, headers=None,
params_dict=None, **params):
""" HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("PATCH", path=path, payload=payload,
headers=headers, params_dict=params_dict, **params) | python | def patch(self, path=None, payload=None, headers=None,
params_dict=None, **params):
""" HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("PATCH", path=path, payload=payload,
headers=headers, params_dict=params_dict, **params) | [
"def",
"patch",
"(",
"self",
",",
"path",
"=",
"None",
",",
"payload",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"params_dict",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"request",
"(",
"\"PATCH\"",
",",
"path",
"=... | HTTP PATCH
- payload: string passed to the body of the request
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request | [
"HTTP",
"PATCH"
] | train | https://github.com/openprocurement/openprocurement.client.python/blob/5dcd0bfac53dc802ae8f144361586278e3cee5ac/openprocurement_client/client.py#L96-L108 |
openprocurement/openprocurement.client.python | openprocurement_client/client.py | APIBaseClient.delete | def delete(self, path=None, headers=None):
""" HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("DELETE", path=path, headers=headers) | python | def delete(self, path=None, headers=None):
""" HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request
"""
return self.request("DELETE", path=path, headers=headers) | [
"def",
"delete",
"(",
"self",
",",
"path",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"return",
"self",
".",
"request",
"(",
"\"DELETE\"",
",",
"path",
"=",
"path",
",",
"headers",
"=",
"headers",
")"
] | HTTP DELETE
- path: string additionnal path to the uri
- headers: dict, optionnal headers that will
be added to HTTP request.
- params: Optionnal parameterss added to the request | [
"HTTP",
"DELETE",
"-",
"path",
":",
"string",
"additionnal",
"path",
"to",
"the",
"uri",
"-",
"headers",
":",
"dict",
"optionnal",
"headers",
"that",
"will",
"be",
"added",
"to",
"HTTP",
"request",
".",
"-",
"params",
":",
"Optionnal",
"parameterss",
"adde... | train | https://github.com/openprocurement/openprocurement.client.python/blob/5dcd0bfac53dc802ae8f144361586278e3cee5ac/openprocurement_client/client.py#L110-L117 |
xlcteam/pynxc | pynxc/second_pass.py | SecondPassVisitor.flush_main | def flush_main(self):
"""Flushes the implicit main function if there is no main
function defined."""
if self.has_main:
return
self.in_main = True
self.write('task main()')
self.INDENT()
if self.debug:
print 'Flushing main:', self.fv.main
for node in self.fv.main:
self.v(node)
self.NEWLINE()
self.DEDENT()
self.in_main = False | python | def flush_main(self):
"""Flushes the implicit main function if there is no main
function defined."""
if self.has_main:
return
self.in_main = True
self.write('task main()')
self.INDENT()
if self.debug:
print 'Flushing main:', self.fv.main
for node in self.fv.main:
self.v(node)
self.NEWLINE()
self.DEDENT()
self.in_main = False | [
"def",
"flush_main",
"(",
"self",
")",
":",
"if",
"self",
".",
"has_main",
":",
"return",
"self",
".",
"in_main",
"=",
"True",
"self",
".",
"write",
"(",
"'task main()'",
")",
"self",
".",
"INDENT",
"(",
")",
"if",
"self",
".",
"debug",
":",
"print",... | Flushes the implicit main function if there is no main
function defined. | [
"Flushes",
"the",
"implicit",
"main",
"function",
"if",
"there",
"is",
"no",
"main",
"function",
"defined",
"."
] | train | https://github.com/xlcteam/pynxc/blob/8932d3a7c0962577c8ead220621f63f800e3b411/pynxc/second_pass.py#L659-L679 |
sdispater/cachy | cachy/stores/memcached_store.py | MemcachedStore.put | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
self._memcache.set(self._prefix + key, value, minutes * 60) | python | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
self._memcache.set(self._prefix + key, value, minutes * 60) | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"value",
",",
"minutes",
")",
":",
"self",
".",
"_memcache",
".",
"set",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
",",
"minutes",
"*",
"60",
")"
] | Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"for",
"a",
"given",
"number",
"of",
"minutes",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/memcached_store.py#L34-L47 |
sdispater/cachy | cachy/stores/memcached_store.py | MemcachedStore.add | def add(self, key, val, minutes):
"""
Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
:rtype: bool
"""
return self._memcache.add(self._prefix + key, val, minutes * 60) | python | def add(self, key, val, minutes):
"""
Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
:rtype: bool
"""
return self._memcache.add(self._prefix + key, val, minutes * 60) | [
"def",
"add",
"(",
"self",
",",
"key",
",",
"val",
",",
"minutes",
")",
":",
"return",
"self",
".",
"_memcache",
".",
"add",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"val",
",",
"minutes",
"*",
"60",
")"
] | Store an item in the cache if it does not exist.
:param key: The cache key
:type key: str
:param val: The cache value
:type val: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
:rtype: bool | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"if",
"it",
"does",
"not",
"exist",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/memcached_store.py#L49-L64 |
sdispater/cachy | cachy/stores/memcached_store.py | MemcachedStore.increment | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
return self._memcache.incr(self._prefix + key, value) | python | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
return self._memcache.incr(self._prefix + key, value) | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_memcache",
".",
"incr",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool | [
"Increment",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/memcached_store.py#L66-L78 |
sdispater/cachy | cachy/stores/memcached_store.py | MemcachedStore.decrement | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._memcache.decr(self._prefix + key, value) | python | def decrement(self, key, value=1):
"""
Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool
"""
return self._memcache.decr(self._prefix + key, value) | [
"def",
"decrement",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"return",
"self",
".",
"_memcache",
".",
"decr",
"(",
"self",
".",
"_prefix",
"+",
"key",
",",
"value",
")"
] | Decrement the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The decrement value
:type value: int
:rtype: int or bool | [
"Decrement",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/memcached_store.py#L80-L92 |
load-tools/netort | netort/data_processing.py | get_nowait_from_queue | def get_nowait_from_queue(queue):
""" Collect all immediately available items from a queue """
data = []
for _ in range(queue.qsize()):
try:
data.append(queue.get_nowait())
except q.Empty:
break
return data | python | def get_nowait_from_queue(queue):
""" Collect all immediately available items from a queue """
data = []
for _ in range(queue.qsize()):
try:
data.append(queue.get_nowait())
except q.Empty:
break
return data | [
"def",
"get_nowait_from_queue",
"(",
"queue",
")",
":",
"data",
"=",
"[",
"]",
"for",
"_",
"in",
"range",
"(",
"queue",
".",
"qsize",
"(",
")",
")",
":",
"try",
":",
"data",
".",
"append",
"(",
"queue",
".",
"get_nowait",
"(",
")",
")",
"except",
... | Collect all immediately available items from a queue | [
"Collect",
"all",
"immediately",
"available",
"items",
"from",
"a",
"queue"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/data_processing.py#L10-L18 |
sdispater/cachy | cachy/redis_tagged_cache.py | RedisTaggedCache.forever | def forever(self, key, value):
"""
Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value
:type value: mixed
"""
namespace = self._tags.get_namespace()
self._push_forever_keys(namespace, key)
self._store.forever(
'%s:%s' % (hashlib.sha1(encode(self._tags.get_namespace())).hexdigest(), key),
value
) | python | def forever(self, key, value):
"""
Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value
:type value: mixed
"""
namespace = self._tags.get_namespace()
self._push_forever_keys(namespace, key)
self._store.forever(
'%s:%s' % (hashlib.sha1(encode(self._tags.get_namespace())).hexdigest(), key),
value
) | [
"def",
"forever",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"namespace",
"=",
"self",
".",
"_tags",
".",
"get_namespace",
"(",
")",
"self",
".",
"_push_forever_keys",
"(",
"namespace",
",",
"key",
")",
"self",
".",
"_store",
".",
"forever",
"(",... | Store an item in the cache indefinitely.
:param key: The cache key
:type key: str
:param value: The value
:type value: mixed | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"indefinitely",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/redis_tagged_cache.py#L10-L27 |
sdispater/cachy | cachy/redis_tagged_cache.py | RedisTaggedCache._push_forever_keys | def _push_forever_keys(self, namespace, key):
"""
Store a copy of the full key for each namespace segment.
:type namespace: str
:type key: str
"""
full_key = '%s%s:%s' % (self.get_prefix(),
hashlib.sha1(encode(self._tags.get_namespace())).hexdigest(),
key)
for segment in namespace.split('|'):
self._store.connection().lpush(self._forever_key(segment), full_key) | python | def _push_forever_keys(self, namespace, key):
"""
Store a copy of the full key for each namespace segment.
:type namespace: str
:type key: str
"""
full_key = '%s%s:%s' % (self.get_prefix(),
hashlib.sha1(encode(self._tags.get_namespace())).hexdigest(),
key)
for segment in namespace.split('|'):
self._store.connection().lpush(self._forever_key(segment), full_key) | [
"def",
"_push_forever_keys",
"(",
"self",
",",
"namespace",
",",
"key",
")",
":",
"full_key",
"=",
"'%s%s:%s'",
"%",
"(",
"self",
".",
"get_prefix",
"(",
")",
",",
"hashlib",
".",
"sha1",
"(",
"encode",
"(",
"self",
".",
"_tags",
".",
"get_namespace",
... | Store a copy of the full key for each namespace segment.
:type namespace: str
:type key: str | [
"Store",
"a",
"copy",
"of",
"the",
"full",
"key",
"for",
"each",
"namespace",
"segment",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/redis_tagged_cache.py#L37-L49 |
sdispater/cachy | cachy/redis_tagged_cache.py | RedisTaggedCache._delete_forever_keys | def _delete_forever_keys(self):
"""
Delete all of the items that were stored forever.
"""
for segment in self._tags.get_namespace().split('|'):
segment = self._forever_key(segment)
self._delete_forever_values(segment)
self._store.connection().delete(segment) | python | def _delete_forever_keys(self):
"""
Delete all of the items that were stored forever.
"""
for segment in self._tags.get_namespace().split('|'):
segment = self._forever_key(segment)
self._delete_forever_values(segment)
self._store.connection().delete(segment) | [
"def",
"_delete_forever_keys",
"(",
"self",
")",
":",
"for",
"segment",
"in",
"self",
".",
"_tags",
".",
"get_namespace",
"(",
")",
".",
"split",
"(",
"'|'",
")",
":",
"segment",
"=",
"self",
".",
"_forever_key",
"(",
"segment",
")",
"self",
".",
"_del... | Delete all of the items that were stored forever. | [
"Delete",
"all",
"of",
"the",
"items",
"that",
"were",
"stored",
"forever",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/redis_tagged_cache.py#L51-L59 |
sdispater/cachy | cachy/redis_tagged_cache.py | RedisTaggedCache._delete_forever_values | def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) | python | def _delete_forever_values(self, forever_key):
"""
Delete all of the keys that have been stored forever.
:type forever_key: str
"""
forever = self._store.connection().lrange(forever_key, 0, -1)
if len(forever) > 0:
self._store.connection().delete(*forever) | [
"def",
"_delete_forever_values",
"(",
"self",
",",
"forever_key",
")",
":",
"forever",
"=",
"self",
".",
"_store",
".",
"connection",
"(",
")",
".",
"lrange",
"(",
"forever_key",
",",
"0",
",",
"-",
"1",
")",
"if",
"len",
"(",
"forever",
")",
">",
"0... | Delete all of the keys that have been stored forever.
:type forever_key: str | [
"Delete",
"all",
"of",
"the",
"keys",
"that",
"have",
"been",
"stored",
"forever",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/redis_tagged_cache.py#L61-L70 |
inveniosoftware/invenio-logging | invenio_logging/ext.py | InvenioLoggingBase.capture_pywarnings | def capture_pywarnings(handler):
"""Log python system warnings."""
logger = logging.getLogger('py.warnings')
# Check for previously installed handlers.
for h in logger.handlers:
if isinstance(h, handler.__class__):
return
logger.addHandler(handler)
logger.setLevel(logging.WARNING) | python | def capture_pywarnings(handler):
"""Log python system warnings."""
logger = logging.getLogger('py.warnings')
# Check for previously installed handlers.
for h in logger.handlers:
if isinstance(h, handler.__class__):
return
logger.addHandler(handler)
logger.setLevel(logging.WARNING) | [
"def",
"capture_pywarnings",
"(",
"handler",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'py.warnings'",
")",
"# Check for previously installed handlers.",
"for",
"h",
"in",
"logger",
".",
"handlers",
":",
"if",
"isinstance",
"(",
"h",
",",
"hand... | Log python system warnings. | [
"Log",
"python",
"system",
"warnings",
"."
] | train | https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/ext.py#L34-L42 |
sdispater/cachy | cachy/stores/dict_store.py | DictStore._get_payload | def _get_payload(self, key):
"""
Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict
"""
payload = self._storage.get(key)
# If the key does not exist, we return nothing
if not payload:
return (None, None)
expire = payload[0]
# If the current time is greater than expiration timestamps we will delete
# the entry
if round(time.time()) >= expire:
self.forget(key)
return (None, None)
data = payload[1]
# Next, we'll extract the number of minutes that are remaining for a cache
# so that we can properly retain the time for things like the increment
# operation that may be performed on the cache. We'll round this out.
time_ = math.ceil((expire - round(time.time())) / 60.)
return (data, time_) | python | def _get_payload(self, key):
"""
Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict
"""
payload = self._storage.get(key)
# If the key does not exist, we return nothing
if not payload:
return (None, None)
expire = payload[0]
# If the current time is greater than expiration timestamps we will delete
# the entry
if round(time.time()) >= expire:
self.forget(key)
return (None, None)
data = payload[1]
# Next, we'll extract the number of minutes that are remaining for a cache
# so that we can properly retain the time for things like the increment
# operation that may be performed on the cache. We'll round this out.
time_ = math.ceil((expire - round(time.time())) / 60.)
return (data, time_) | [
"def",
"_get_payload",
"(",
"self",
",",
"key",
")",
":",
"payload",
"=",
"self",
".",
"_storage",
".",
"get",
"(",
"key",
")",
"# If the key does not exist, we return nothing",
"if",
"not",
"payload",
":",
"return",
"(",
"None",
",",
"None",
")",
"expire",
... | Retrieve an item and expiry time from the cache by key.
:param key: The cache key
:type key: str
:rtype: dict | [
"Retrieve",
"an",
"item",
"and",
"expiry",
"time",
"from",
"the",
"cache",
"by",
"key",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/dict_store.py#L27-L58 |
sdispater/cachy | cachy/stores/dict_store.py | DictStore.put | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
self._storage[key] = (self._expiration(minutes), value) | python | def put(self, key, value, minutes):
"""
Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int
"""
self._storage[key] = (self._expiration(minutes), value) | [
"def",
"put",
"(",
"self",
",",
"key",
",",
"value",
",",
"minutes",
")",
":",
"self",
".",
"_storage",
"[",
"key",
"]",
"=",
"(",
"self",
".",
"_expiration",
"(",
"minutes",
")",
",",
"value",
")"
] | Store an item in the cache for a given number of minutes.
:param key: The cache key
:type key: str
:param value: The cache value
:type value: mixed
:param minutes: The lifetime in minutes of the cached value
:type minutes: int | [
"Store",
"an",
"item",
"in",
"the",
"cache",
"for",
"a",
"given",
"number",
"of",
"minutes",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/dict_store.py#L60-L73 |
sdispater/cachy | cachy/stores/dict_store.py | DictStore.increment | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
data, time_ = self._get_payload(key)
integer = int(data) + value
self.put(key, integer, int(time_))
return integer | python | def increment(self, key, value=1):
"""
Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool
"""
data, time_ = self._get_payload(key)
integer = int(data) + value
self.put(key, integer, int(time_))
return integer | [
"def",
"increment",
"(",
"self",
",",
"key",
",",
"value",
"=",
"1",
")",
":",
"data",
",",
"time_",
"=",
"self",
".",
"_get_payload",
"(",
"key",
")",
"integer",
"=",
"int",
"(",
"data",
")",
"+",
"value",
"self",
".",
"put",
"(",
"key",
",",
... | Increment the value of an item in the cache.
:param key: The cache key
:type key: str
:param value: The increment value
:type value: int
:rtype: int or bool | [
"Increment",
"the",
"value",
"of",
"an",
"item",
"in",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/dict_store.py#L75-L93 |
sdispater/cachy | cachy/stores/dict_store.py | DictStore.forget | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
if key in self._storage:
del self._storage[key]
return True
return False | python | def forget(self, key):
"""
Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool
"""
if key in self._storage:
del self._storage[key]
return True
return False | [
"def",
"forget",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"in",
"self",
".",
"_storage",
":",
"del",
"self",
".",
"_storage",
"[",
"key",
"]",
"return",
"True",
"return",
"False"
] | Remove an item from the cache.
:param key: The cache key
:type key: str
:rtype: bool | [
"Remove",
"an",
"item",
"from",
"the",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/stores/dict_store.py#L121-L135 |
fabaff/python-netdata | netdata/__init__.py | Netdata.get_data | async def get_data(self, resource):
"""Get detail for a resource from the data endpoint."""
url = '{}{}'.format(
self.base_url, self.endpoint.format(resource=resource))
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.info(
"Response from Netdata: %s", response.status)
data = await response.json()
_LOGGER.debug(data)
self.values = {k: v for k, v in zip(
data['labels'], data['data'][0])}
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError() | python | async def get_data(self, resource):
"""Get detail for a resource from the data endpoint."""
url = '{}{}'.format(
self.base_url, self.endpoint.format(resource=resource))
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.info(
"Response from Netdata: %s", response.status)
data = await response.json()
_LOGGER.debug(data)
self.values = {k: v for k, v in zip(
data['labels'], data['data'][0])}
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError() | [
"async",
"def",
"get_data",
"(",
"self",
",",
"resource",
")",
":",
"url",
"=",
"'{}{}'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"self",
".",
"endpoint",
".",
"format",
"(",
"resource",
"=",
"resource",
")",
")",
"try",
":",
"with",
"async... | Get detail for a resource from the data endpoint. | [
"Get",
"detail",
"for",
"a",
"resource",
"from",
"the",
"data",
"endpoint",
"."
] | train | https://github.com/fabaff/python-netdata/blob/bca5d58f84a0fc849b9bb16a00959a0b33d13a67/netdata/__init__.py#L39-L57 |
fabaff/python-netdata | netdata/__init__.py | Netdata.get_alarms | async def get_alarms(self):
"""Get alarms for a Netdata instance."""
url = '{}{}'.format(self.base_url, self.endpoint)
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.debug(
"Response from Netdata: %s", response.status)
data = await response.text()
_LOGGER.debug(data)
self.alarms = data
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError() | python | async def get_alarms(self):
"""Get alarms for a Netdata instance."""
url = '{}{}'.format(self.base_url, self.endpoint)
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(url)
_LOGGER.debug(
"Response from Netdata: %s", response.status)
data = await response.text()
_LOGGER.debug(data)
self.alarms = data
except (asyncio.TimeoutError, aiohttp.ClientError, socket.gaierror):
_LOGGER.error("Can not load data from Netdata")
raise exceptions.NetdataConnectionError() | [
"async",
"def",
"get_alarms",
"(",
"self",
")",
":",
"url",
"=",
"'{}{}'",
".",
"format",
"(",
"self",
".",
"base_url",
",",
"self",
".",
"endpoint",
")",
"try",
":",
"with",
"async_timeout",
".",
"timeout",
"(",
"5",
",",
"loop",
"=",
"self",
".",
... | Get alarms for a Netdata instance. | [
"Get",
"alarms",
"for",
"a",
"Netdata",
"instance",
"."
] | train | https://github.com/fabaff/python-netdata/blob/bca5d58f84a0fc849b9bb16a00959a0b33d13a67/netdata/__init__.py#L59-L75 |
inveniosoftware/invenio-logging | invenio_logging/utils.py | AddRequestIdFilter.filter | def filter(self, record):
"""If request_id is set in flask.g, add it to log record."""
if g and hasattr(g, 'request_id'):
record.request_id = g.request_id
return True | python | def filter(self, record):
"""If request_id is set in flask.g, add it to log record."""
if g and hasattr(g, 'request_id'):
record.request_id = g.request_id
return True | [
"def",
"filter",
"(",
"self",
",",
"record",
")",
":",
"if",
"g",
"and",
"hasattr",
"(",
"g",
",",
"'request_id'",
")",
":",
"record",
".",
"request_id",
"=",
"g",
".",
"request_id",
"return",
"True"
] | If request_id is set in flask.g, add it to log record. | [
"If",
"request_id",
"is",
"set",
"in",
"flask",
".",
"g",
"add",
"it",
"to",
"log",
"record",
"."
] | train | https://github.com/inveniosoftware/invenio-logging/blob/59ee171ad4f9809f62a822964b5c68e5be672dd8/invenio_logging/utils.py#L19-L23 |
load-tools/netort | netort/logging_and_signals.py | init_logging | def init_logging(log_filename, verbose, quiet):
"""Set up logging with default parameters:
* default console logging level is INFO
* ERROR, WARNING and CRITICAL are redirected to stderr
Args:
log_filename (str): if set, will write DEBUG log there
verbose (bool): DEBUG level in console, overrides 'quiet'
quiet (bool): WARNING level in console
"""
# TODO: consider making one verbosity parameter instead of two mutually exclusive
# TODO: default values for parameters
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
# add file handler if needed
if log_filename:
file_handler = logging.FileHandler(log_filename)
file_handler.setLevel(logging.DEBUG)
# TODO: initialize all formatters in the beginning of this function
file_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler)
# console stdout and stderr handlers
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
# formatters
fmt_verbose = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s",
datefmt='%Y-%m-%d,%H:%M:%S.%f'
)
fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s", "%H:%M:%S")
# set formatters and log levels
if verbose:
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt_verbose)
stderr_hdl.setFormatter(fmt_verbose)
elif quiet:
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
# TODO: do we really need these to be redirected?
# redirect ERROR, WARNING and CRITICAL to sterr
f_err = SingleLevelFilter(logging.ERROR, True)
f_warn = SingleLevelFilter(logging.WARNING, True)
f_crit = SingleLevelFilter(logging.CRITICAL, True)
console_handler.addFilter(f_err)
console_handler.addFilter(f_warn)
console_handler.addFilter(f_crit)
logger.addHandler(console_handler)
f_info = SingleLevelFilter(logging.INFO, True)
f_debug = SingleLevelFilter(logging.DEBUG, True)
stderr_hdl.addFilter(f_info)
stderr_hdl.addFilter(f_debug)
logger.addHandler(stderr_hdl) | python | def init_logging(log_filename, verbose, quiet):
"""Set up logging with default parameters:
* default console logging level is INFO
* ERROR, WARNING and CRITICAL are redirected to stderr
Args:
log_filename (str): if set, will write DEBUG log there
verbose (bool): DEBUG level in console, overrides 'quiet'
quiet (bool): WARNING level in console
"""
# TODO: consider making one verbosity parameter instead of two mutually exclusive
# TODO: default values for parameters
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
# add file handler if needed
if log_filename:
file_handler = logging.FileHandler(log_filename)
file_handler.setLevel(logging.DEBUG)
# TODO: initialize all formatters in the beginning of this function
file_handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
))
logger.addHandler(file_handler)
# console stdout and stderr handlers
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
# formatters
fmt_verbose = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s",
datefmt='%Y-%m-%d,%H:%M:%S.%f'
)
fmt_regular = logging.Formatter(
"%(asctime)s [%(levelname).4s] [%(filename).8s] %(message)s", "%H:%M:%S")
# set formatters and log levels
if verbose:
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt_verbose)
stderr_hdl.setFormatter(fmt_verbose)
elif quiet:
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
else:
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
# TODO: do we really need these to be redirected?
# redirect ERROR, WARNING and CRITICAL to sterr
f_err = SingleLevelFilter(logging.ERROR, True)
f_warn = SingleLevelFilter(logging.WARNING, True)
f_crit = SingleLevelFilter(logging.CRITICAL, True)
console_handler.addFilter(f_err)
console_handler.addFilter(f_warn)
console_handler.addFilter(f_crit)
logger.addHandler(console_handler)
f_info = SingleLevelFilter(logging.INFO, True)
f_debug = SingleLevelFilter(logging.DEBUG, True)
stderr_hdl.addFilter(f_info)
stderr_hdl.addFilter(f_debug)
logger.addHandler(stderr_hdl) | [
"def",
"init_logging",
"(",
"log_filename",
",",
"verbose",
",",
"quiet",
")",
":",
"# TODO: consider making one verbosity parameter instead of two mutually exclusive",
"# TODO: default values for parameters",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"''",
")",
"logge... | Set up logging with default parameters:
* default console logging level is INFO
* ERROR, WARNING and CRITICAL are redirected to stderr
Args:
log_filename (str): if set, will write DEBUG log there
verbose (bool): DEBUG level in console, overrides 'quiet'
quiet (bool): WARNING level in console | [
"Set",
"up",
"logging",
"with",
"default",
"parameters",
":",
"*",
"default",
"console",
"logging",
"level",
"is",
"INFO",
"*",
"ERROR",
"WARNING",
"and",
"CRITICAL",
"are",
"redirected",
"to",
"stderr"
] | train | https://github.com/load-tools/netort/blob/b5233a70cea74108857ea24ba5c37975057ca00f/netort/logging_and_signals.py#L34-L100 |
sdispater/cachy | cachy/cache_manager.py | CacheManager.store | def store(self, name=None):
"""
Get a cache store instance by name.
:param name: The cache store name
:type name: str
:rtype: Repository
"""
if name is None:
name = self.get_default_driver()
self._stores[name] = self._get(name)
return self._stores[name] | python | def store(self, name=None):
"""
Get a cache store instance by name.
:param name: The cache store name
:type name: str
:rtype: Repository
"""
if name is None:
name = self.get_default_driver()
self._stores[name] = self._get(name)
return self._stores[name] | [
"def",
"store",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"self",
".",
"get_default_driver",
"(",
")",
"self",
".",
"_stores",
"[",
"name",
"]",
"=",
"self",
".",
"_get",
"(",
"name",
")",
"ret... | Get a cache store instance by name.
:param name: The cache store name
:type name: str
:rtype: Repository | [
"Get",
"a",
"cache",
"store",
"instance",
"by",
"name",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L43-L57 |
sdispater/cachy | cachy/cache_manager.py | CacheManager._get | def _get(self, name):
"""
Attempt to get the store from the local cache.
:param name: The store name
:type name: str
:rtype: Repository
"""
return self._stores.get(name, self._resolve(name)) | python | def _get(self, name):
"""
Attempt to get the store from the local cache.
:param name: The store name
:type name: str
:rtype: Repository
"""
return self._stores.get(name, self._resolve(name)) | [
"def",
"_get",
"(",
"self",
",",
"name",
")",
":",
"return",
"self",
".",
"_stores",
".",
"get",
"(",
"name",
",",
"self",
".",
"_resolve",
"(",
"name",
")",
")"
] | Attempt to get the store from the local cache.
:param name: The store name
:type name: str
:rtype: Repository | [
"Attempt",
"to",
"get",
"the",
"store",
"from",
"the",
"local",
"cache",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L70-L79 |
sdispater/cachy | cachy/cache_manager.py | CacheManager._resolve | def _resolve(self, name):
"""
Resolve the given store
:param name: The store to resolve
:type name: str
:rtype: Repository
"""
config = self._get_config(name)
if not config:
raise RuntimeError('Cache store [%s] is not defined.' % name)
if config['driver'] in self._custom_creators:
repository = self._call_custom_creator(config)
else:
repository = getattr(self, '_create_%s_driver' % config['driver'])(config)
if 'serializer' in config:
serializer = self._resolve_serializer(config['serializer'])
else:
serializer = self._serializer
repository.get_store().set_serializer(serializer)
return repository | python | def _resolve(self, name):
"""
Resolve the given store
:param name: The store to resolve
:type name: str
:rtype: Repository
"""
config = self._get_config(name)
if not config:
raise RuntimeError('Cache store [%s] is not defined.' % name)
if config['driver'] in self._custom_creators:
repository = self._call_custom_creator(config)
else:
repository = getattr(self, '_create_%s_driver' % config['driver'])(config)
if 'serializer' in config:
serializer = self._resolve_serializer(config['serializer'])
else:
serializer = self._serializer
repository.get_store().set_serializer(serializer)
return repository | [
"def",
"_resolve",
"(",
"self",
",",
"name",
")",
":",
"config",
"=",
"self",
".",
"_get_config",
"(",
"name",
")",
"if",
"not",
"config",
":",
"raise",
"RuntimeError",
"(",
"'Cache store [%s] is not defined.'",
"%",
"name",
")",
"if",
"config",
"[",
"'dri... | Resolve the given store
:param name: The store to resolve
:type name: str
:rtype: Repository | [
"Resolve",
"the",
"given",
"store"
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L81-L107 |
sdispater/cachy | cachy/cache_manager.py | CacheManager._call_custom_creator | def _call_custom_creator(self, config):
"""
Call a custom driver creator.
:param config: The driver configuration
:type config: dict
:rtype: Repository
"""
creator = self._custom_creators[config['driver']](config)
if isinstance(creator, Store):
creator = self.repository(creator)
if not isinstance(creator, Repository):
raise RuntimeError('Custom creator should return a Repository instance.')
return creator | python | def _call_custom_creator(self, config):
"""
Call a custom driver creator.
:param config: The driver configuration
:type config: dict
:rtype: Repository
"""
creator = self._custom_creators[config['driver']](config)
if isinstance(creator, Store):
creator = self.repository(creator)
if not isinstance(creator, Repository):
raise RuntimeError('Custom creator should return a Repository instance.')
return creator | [
"def",
"_call_custom_creator",
"(",
"self",
",",
"config",
")",
":",
"creator",
"=",
"self",
".",
"_custom_creators",
"[",
"config",
"[",
"'driver'",
"]",
"]",
"(",
"config",
")",
"if",
"isinstance",
"(",
"creator",
",",
"Store",
")",
":",
"creator",
"="... | Call a custom driver creator.
:param config: The driver configuration
:type config: dict
:rtype: Repository | [
"Call",
"a",
"custom",
"driver",
"creator",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L109-L126 |
sdispater/cachy | cachy/cache_manager.py | CacheManager._create_file_driver | def _create_file_driver(self, config):
"""
Create an instance of the file cache driver.
:param config: The driver configuration
:type config: dict
:rtype: Repository
"""
kwargs = {
'directory': config['path']
}
if 'hash_type' in config:
kwargs['hash_type'] = config['hash_type']
return self.repository(FileStore(**kwargs)) | python | def _create_file_driver(self, config):
"""
Create an instance of the file cache driver.
:param config: The driver configuration
:type config: dict
:rtype: Repository
"""
kwargs = {
'directory': config['path']
}
if 'hash_type' in config:
kwargs['hash_type'] = config['hash_type']
return self.repository(FileStore(**kwargs)) | [
"def",
"_create_file_driver",
"(",
"self",
",",
"config",
")",
":",
"kwargs",
"=",
"{",
"'directory'",
":",
"config",
"[",
"'path'",
"]",
"}",
"if",
"'hash_type'",
"in",
"config",
":",
"kwargs",
"[",
"'hash_type'",
"]",
"=",
"config",
"[",
"'hash_type'",
... | Create an instance of the file cache driver.
:param config: The driver configuration
:type config: dict
:rtype: Repository | [
"Create",
"an",
"instance",
"of",
"the",
"file",
"cache",
"driver",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L139-L155 |
sdispater/cachy | cachy/cache_manager.py | CacheManager.get_default_driver | def get_default_driver(self):
"""
Get the default cache driver name.
:rtype: str
:raises: RuntimeError
"""
if 'default' in self._config:
return self._config['default']
if len(self._config['stores']) == 1:
return list(self._config['stores'].keys())[0]
raise RuntimeError('Missing "default" cache in configuration.') | python | def get_default_driver(self):
"""
Get the default cache driver name.
:rtype: str
:raises: RuntimeError
"""
if 'default' in self._config:
return self._config['default']
if len(self._config['stores']) == 1:
return list(self._config['stores'].keys())[0]
raise RuntimeError('Missing "default" cache in configuration.') | [
"def",
"get_default_driver",
"(",
"self",
")",
":",
"if",
"'default'",
"in",
"self",
".",
"_config",
":",
"return",
"self",
".",
"_config",
"[",
"'default'",
"]",
"if",
"len",
"(",
"self",
".",
"_config",
"[",
"'stores'",
"]",
")",
"==",
"1",
":",
"r... | Get the default cache driver name.
:rtype: str
:raises: RuntimeError | [
"Get",
"the",
"default",
"cache",
"driver",
"name",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L214-L228 |
sdispater/cachy | cachy/cache_manager.py | CacheManager._resolve_serializer | def _resolve_serializer(self, serializer):
"""
Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer
"""
if isinstance(serializer, Serializer):
return serializer
if serializer in self._serializers:
return self._serializers[serializer]
raise RuntimeError('Unsupported serializer') | python | def _resolve_serializer(self, serializer):
"""
Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer
"""
if isinstance(serializer, Serializer):
return serializer
if serializer in self._serializers:
return self._serializers[serializer]
raise RuntimeError('Unsupported serializer') | [
"def",
"_resolve_serializer",
"(",
"self",
",",
"serializer",
")",
":",
"if",
"isinstance",
"(",
"serializer",
",",
"Serializer",
")",
":",
"return",
"serializer",
"if",
"serializer",
"in",
"self",
".",
"_serializers",
":",
"return",
"self",
".",
"_serializers... | Resolve the given serializer.
:param serializer: The serializer to resolve
:type serializer: str or Serializer
:rtype: Serializer | [
"Resolve",
"the",
"given",
"serializer",
"."
] | train | https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/cache_manager.py#L255-L270 |
SamLau95/nbinteract | nbinteract/questions.py | multiple_choice | def multiple_choice(question, choices, answers):
"""
Generates a multiple choice question that allows the user to select an
answer choice and shows whether choice was correct.
Args:
question (str): Question text displayed above choices.
choices (list str): Answer choices that user can select.
answers (int | iterable int): Either an integer or iterable of
integers. Each integer in answers corresponds to the index of the
correct choice in `choices`.
Returns:
None
>>> multiple_choice(question="What is 10 + 2 * 5?",
... choices=['12', '60', '20'],
... answers=2) #doctest: +SKIP
<What is 10 + 2 * 5?>
<Button> <12>
<Button> <60>
<Button> <20> (Correct)
>>> multiple_choice(question="Select all prime numbers.",
... choices=['12', '3', '31'],
... answers=[1, 2]) #doctest: +SKIP
<Select all prime numbers.>
<Button> <12>
<Button> <3> (Correct)
<Button> <31> (Correct)
"""
if not isinstance(answers, (int, collections.Iterable)):
raise TypeError(
'The `answers` arg is expected to be of type '
'(int | iterable int) but got {} instead.'.format(type(answers))
)
@curry
def check_answer(index, button):
is_correct = (
index == answers if isinstance(answers, int) else index in answers
)
button.style.button_color = GREEN if is_correct else RED
answer_choices = []
for index, choice in enumerate(choices):
button = widgets.Button(
layout=widgets.Layout(width='20px', height='20px', padding='0')
)
button.on_click(check_answer(index))
button_and_question = widgets.HBox(
[button, widgets.HTML(TEXT_STYLE.format(choice))],
layout=widgets.Layout(align_items='center')
)
answer_choices.append(button_and_question)
question_html = [widgets.HTML(TEXT_STYLE.format(question))]
display(widgets.VBox(question_html + answer_choices)) | python | def multiple_choice(question, choices, answers):
"""
Generates a multiple choice question that allows the user to select an
answer choice and shows whether choice was correct.
Args:
question (str): Question text displayed above choices.
choices (list str): Answer choices that user can select.
answers (int | iterable int): Either an integer or iterable of
integers. Each integer in answers corresponds to the index of the
correct choice in `choices`.
Returns:
None
>>> multiple_choice(question="What is 10 + 2 * 5?",
... choices=['12', '60', '20'],
... answers=2) #doctest: +SKIP
<What is 10 + 2 * 5?>
<Button> <12>
<Button> <60>
<Button> <20> (Correct)
>>> multiple_choice(question="Select all prime numbers.",
... choices=['12', '3', '31'],
... answers=[1, 2]) #doctest: +SKIP
<Select all prime numbers.>
<Button> <12>
<Button> <3> (Correct)
<Button> <31> (Correct)
"""
if not isinstance(answers, (int, collections.Iterable)):
raise TypeError(
'The `answers` arg is expected to be of type '
'(int | iterable int) but got {} instead.'.format(type(answers))
)
@curry
def check_answer(index, button):
is_correct = (
index == answers if isinstance(answers, int) else index in answers
)
button.style.button_color = GREEN if is_correct else RED
answer_choices = []
for index, choice in enumerate(choices):
button = widgets.Button(
layout=widgets.Layout(width='20px', height='20px', padding='0')
)
button.on_click(check_answer(index))
button_and_question = widgets.HBox(
[button, widgets.HTML(TEXT_STYLE.format(choice))],
layout=widgets.Layout(align_items='center')
)
answer_choices.append(button_and_question)
question_html = [widgets.HTML(TEXT_STYLE.format(question))]
display(widgets.VBox(question_html + answer_choices)) | [
"def",
"multiple_choice",
"(",
"question",
",",
"choices",
",",
"answers",
")",
":",
"if",
"not",
"isinstance",
"(",
"answers",
",",
"(",
"int",
",",
"collections",
".",
"Iterable",
")",
")",
":",
"raise",
"TypeError",
"(",
"'The `answers` arg is expected to b... | Generates a multiple choice question that allows the user to select an
answer choice and shows whether choice was correct.
Args:
question (str): Question text displayed above choices.
choices (list str): Answer choices that user can select.
answers (int | iterable int): Either an integer or iterable of
integers. Each integer in answers corresponds to the index of the
correct choice in `choices`.
Returns:
None
>>> multiple_choice(question="What is 10 + 2 * 5?",
... choices=['12', '60', '20'],
... answers=2) #doctest: +SKIP
<What is 10 + 2 * 5?>
<Button> <12>
<Button> <60>
<Button> <20> (Correct)
>>> multiple_choice(question="Select all prime numbers.",
... choices=['12', '3', '31'],
... answers=[1, 2]) #doctest: +SKIP
<Select all prime numbers.>
<Button> <12>
<Button> <3> (Correct)
<Button> <31> (Correct) | [
"Generates",
"a",
"multiple",
"choice",
"question",
"that",
"allows",
"the",
"user",
"to",
"select",
"an",
"answer",
"choice",
"and",
"shows",
"whether",
"choice",
"was",
"correct",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/questions.py#L15-L75 |
SamLau95/nbinteract | nbinteract/questions.py | short_answer | def short_answer(question, answers, explanation=None):
"""
Generates a short answer question that allows user to input an answer in
a textbox and a submit button to check the answer.
Args:
question (str): The question being asked.
answers (str | list str | func): If a string, only that string will be
marked correct. If a list of string, any string in the list will be
marked correct. If a function, any input that causes the function
to return True will be marked correct.
explanation (str): The explanation to the question is displayed when
the user inputs the correct answer.
Returns:
None
>>> short_answer('What is 1 + 1?', '2',
... explanation='1+1 is 2') #doctest: +SKIP
<What is 1+1?>
<Input box, Submit button>
>>> short_answer('Enter the first name of a member of the Beatles.',
... ['John', 'Paul', 'George', 'Ringo']) #doctest: +SKIP
<Enter the first name of a member of the Beatles.>
<Input box, Submit button>
>>> short_answer('Enter an even number.',
... lambda x: int(x) % 2 == 0) #doctest: +SKIP
<Enter an even number.>
<Input box, Submit button>
"""
# Input textbox
textbox = widgets.Text(placeholder='Write your answer here')
# Submit button
submit_button = widgets.Button(description='Submit')
# Space right of the submit button to show checkmark/x-mark
visual_correct = widgets.HTML()
# Space below input line to display error if function call errored
error_space = widgets.HTML()
# Space below input line to display explanation if answer is correct
explain_space = widgets.HTML()
# correctness function linked to the submit button
def check_answer(_):
response = textbox.value
if isinstance(answers, collections.Callable):
try:
error_space.value = ''
correct = answers(response)
except Exception as e:
correct = False
error_space.value = 'Error in checking answer: {}'.format(e)
elif isinstance(answers, str):
correct = response == answers
elif isinstance(answers, collections.Iterable):
correct = response in answers
else:
raise TypeError('The `answers` arg is an incorrect type.')
visual_correct.value = CHECK_ICON if correct else X_ICON
if correct and explanation:
explain_space.value = explanation
submit_button.on_click(check_answer)
question_tag = widgets.HTML(TEXT_STYLE.format(question))
user_input_line = widgets.HBox([textbox, submit_button, visual_correct])
display(
widgets.VBox([
question_tag, user_input_line, error_space, explain_space
])
) | python | def short_answer(question, answers, explanation=None):
"""
Generates a short answer question that allows user to input an answer in
a textbox and a submit button to check the answer.
Args:
question (str): The question being asked.
answers (str | list str | func): If a string, only that string will be
marked correct. If a list of string, any string in the list will be
marked correct. If a function, any input that causes the function
to return True will be marked correct.
explanation (str): The explanation to the question is displayed when
the user inputs the correct answer.
Returns:
None
>>> short_answer('What is 1 + 1?', '2',
... explanation='1+1 is 2') #doctest: +SKIP
<What is 1+1?>
<Input box, Submit button>
>>> short_answer('Enter the first name of a member of the Beatles.',
... ['John', 'Paul', 'George', 'Ringo']) #doctest: +SKIP
<Enter the first name of a member of the Beatles.>
<Input box, Submit button>
>>> short_answer('Enter an even number.',
... lambda x: int(x) % 2 == 0) #doctest: +SKIP
<Enter an even number.>
<Input box, Submit button>
"""
# Input textbox
textbox = widgets.Text(placeholder='Write your answer here')
# Submit button
submit_button = widgets.Button(description='Submit')
# Space right of the submit button to show checkmark/x-mark
visual_correct = widgets.HTML()
# Space below input line to display error if function call errored
error_space = widgets.HTML()
# Space below input line to display explanation if answer is correct
explain_space = widgets.HTML()
# correctness function linked to the submit button
def check_answer(_):
response = textbox.value
if isinstance(answers, collections.Callable):
try:
error_space.value = ''
correct = answers(response)
except Exception as e:
correct = False
error_space.value = 'Error in checking answer: {}'.format(e)
elif isinstance(answers, str):
correct = response == answers
elif isinstance(answers, collections.Iterable):
correct = response in answers
else:
raise TypeError('The `answers` arg is an incorrect type.')
visual_correct.value = CHECK_ICON if correct else X_ICON
if correct and explanation:
explain_space.value = explanation
submit_button.on_click(check_answer)
question_tag = widgets.HTML(TEXT_STYLE.format(question))
user_input_line = widgets.HBox([textbox, submit_button, visual_correct])
display(
widgets.VBox([
question_tag, user_input_line, error_space, explain_space
])
) | [
"def",
"short_answer",
"(",
"question",
",",
"answers",
",",
"explanation",
"=",
"None",
")",
":",
"# Input textbox",
"textbox",
"=",
"widgets",
".",
"Text",
"(",
"placeholder",
"=",
"'Write your answer here'",
")",
"# Submit button",
"submit_button",
"=",
"widget... | Generates a short answer question that allows user to input an answer in
a textbox and a submit button to check the answer.
Args:
question (str): The question being asked.
answers (str | list str | func): If a string, only that string will be
marked correct. If a list of string, any string in the list will be
marked correct. If a function, any input that causes the function
to return True will be marked correct.
explanation (str): The explanation to the question is displayed when
the user inputs the correct answer.
Returns:
None
>>> short_answer('What is 1 + 1?', '2',
... explanation='1+1 is 2') #doctest: +SKIP
<What is 1+1?>
<Input box, Submit button>
>>> short_answer('Enter the first name of a member of the Beatles.',
... ['John', 'Paul', 'George', 'Ringo']) #doctest: +SKIP
<Enter the first name of a member of the Beatles.>
<Input box, Submit button>
>>> short_answer('Enter an even number.',
... lambda x: int(x) % 2 == 0) #doctest: +SKIP
<Enter an even number.>
<Input box, Submit button> | [
"Generates",
"a",
"short",
"answer",
"question",
"that",
"allows",
"user",
"to",
"input",
"an",
"answer",
"in",
"a",
"textbox",
"and",
"a",
"submit",
"button",
"to",
"check",
"the",
"answer",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/questions.py#L88-L159 |
SamLau95/nbinteract | nbinteract/exporters.py | publish | def publish(spec, nb_name, template='full', save_first=True):
"""
Converts nb_name to an HTML file. Preserves widget functionality.
Outputs a link to download HTML file after conversion if called in a
notebook environment.
Equivalent to running `nbinteract ${spec} ${nb_name}` on the command line.
Args:
spec (str): BinderHub spec for Jupyter image. Must be in the format:
`${username}/${repo}/${branch}`.
nb_name (str): Complete name of the notebook file to convert. Can be a
relative path (eg. './foo/test.ipynb').
Kwargs:
template (str): Template to use for conversion. Valid templates:
- 'full': Outputs a complete standalone HTML page with default
styling. Automatically loads the nbinteract JS library.
- 'partial': Outputs an HTML partial that can be embedded in
another page. Automatically loads the nbinteract JS library but
has no styling.
- 'plain': Outputs an HTML partial used to embed in an HTML page
where the nbinteract JS library is already loaded. Does not load
JS library or styling
save_first (bool): If True, saves the currently opened notebook before
converting nb_name. Used to ensure notebook is written to
filesystem before starting conversion. Does nothing if not in a
notebook environment.
Returns:
None
"""
if not os.path.isfile(nb_name):
raise ValueError(
"{} isn't a path to a file. Double check your "
"filename and try again.".format(nb_name)
)
if save_first:
_save_nb(nb_name)
print('Converting notebook...')
try:
check_output(
['nbinteract', '--template', template, '--spec', spec, nb_name],
stderr=STDOUT
)
except CalledProcessError as err:
logging.warning(
ERROR_MESSAGE.format(
filename=nb_name, error=str(err.output, 'utf-8')
)
)
return
html_filename = os.path.splitext(nb_name)[0] + '.html'
display(Markdown(CONVERT_SUCCESS_MD.format(url=html_filename))) | python | def publish(spec, nb_name, template='full', save_first=True):
"""
Converts nb_name to an HTML file. Preserves widget functionality.
Outputs a link to download HTML file after conversion if called in a
notebook environment.
Equivalent to running `nbinteract ${spec} ${nb_name}` on the command line.
Args:
spec (str): BinderHub spec for Jupyter image. Must be in the format:
`${username}/${repo}/${branch}`.
nb_name (str): Complete name of the notebook file to convert. Can be a
relative path (eg. './foo/test.ipynb').
Kwargs:
template (str): Template to use for conversion. Valid templates:
- 'full': Outputs a complete standalone HTML page with default
styling. Automatically loads the nbinteract JS library.
- 'partial': Outputs an HTML partial that can be embedded in
another page. Automatically loads the nbinteract JS library but
has no styling.
- 'plain': Outputs an HTML partial used to embed in an HTML page
where the nbinteract JS library is already loaded. Does not load
JS library or styling
save_first (bool): If True, saves the currently opened notebook before
converting nb_name. Used to ensure notebook is written to
filesystem before starting conversion. Does nothing if not in a
notebook environment.
Returns:
None
"""
if not os.path.isfile(nb_name):
raise ValueError(
"{} isn't a path to a file. Double check your "
"filename and try again.".format(nb_name)
)
if save_first:
_save_nb(nb_name)
print('Converting notebook...')
try:
check_output(
['nbinteract', '--template', template, '--spec', spec, nb_name],
stderr=STDOUT
)
except CalledProcessError as err:
logging.warning(
ERROR_MESSAGE.format(
filename=nb_name, error=str(err.output, 'utf-8')
)
)
return
html_filename = os.path.splitext(nb_name)[0] + '.html'
display(Markdown(CONVERT_SUCCESS_MD.format(url=html_filename))) | [
"def",
"publish",
"(",
"spec",
",",
"nb_name",
",",
"template",
"=",
"'full'",
",",
"save_first",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"nb_name",
")",
":",
"raise",
"ValueError",
"(",
"\"{} isn't a path to a file. Doub... | Converts nb_name to an HTML file. Preserves widget functionality.
Outputs a link to download HTML file after conversion if called in a
notebook environment.
Equivalent to running `nbinteract ${spec} ${nb_name}` on the command line.
Args:
spec (str): BinderHub spec for Jupyter image. Must be in the format:
`${username}/${repo}/${branch}`.
nb_name (str): Complete name of the notebook file to convert. Can be a
relative path (eg. './foo/test.ipynb').
Kwargs:
template (str): Template to use for conversion. Valid templates:
- 'full': Outputs a complete standalone HTML page with default
styling. Automatically loads the nbinteract JS library.
- 'partial': Outputs an HTML partial that can be embedded in
another page. Automatically loads the nbinteract JS library but
has no styling.
- 'plain': Outputs an HTML partial used to embed in an HTML page
where the nbinteract JS library is already loaded. Does not load
JS library or styling
save_first (bool): If True, saves the currently opened notebook before
converting nb_name. Used to ensure notebook is written to
filesystem before starting conversion. Does nothing if not in a
notebook environment.
Returns:
None | [
"Converts",
"nb_name",
"to",
"an",
"HTML",
"file",
".",
"Preserves",
"widget",
"functionality",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/exporters.py#L154-L215 |
SamLau95/nbinteract | nbinteract/exporters.py | _save_nb | def _save_nb(nb_name):
"""
Attempts to save notebook. If unsuccessful, shows a warning.
"""
display(Javascript('IPython.notebook.save_checkpoint();'))
display(Javascript('IPython.notebook.save_notebook();'))
print('Saving notebook...', end=' ')
if _wait_for_save(nb_name):
print("Saved '{}'.".format(nb_name))
else:
logging.warning(
"Could not save your notebook (timed out waiting for "
"IPython save). Make sure your notebook is saved "
"and export again."
) | python | def _save_nb(nb_name):
"""
Attempts to save notebook. If unsuccessful, shows a warning.
"""
display(Javascript('IPython.notebook.save_checkpoint();'))
display(Javascript('IPython.notebook.save_notebook();'))
print('Saving notebook...', end=' ')
if _wait_for_save(nb_name):
print("Saved '{}'.".format(nb_name))
else:
logging.warning(
"Could not save your notebook (timed out waiting for "
"IPython save). Make sure your notebook is saved "
"and export again."
) | [
"def",
"_save_nb",
"(",
"nb_name",
")",
":",
"display",
"(",
"Javascript",
"(",
"'IPython.notebook.save_checkpoint();'",
")",
")",
"display",
"(",
"Javascript",
"(",
"'IPython.notebook.save_notebook();'",
")",
")",
"print",
"(",
"'Saving notebook...'",
",",
"end",
"... | Attempts to save notebook. If unsuccessful, shows a warning. | [
"Attempts",
"to",
"save",
"notebook",
".",
"If",
"unsuccessful",
"shows",
"a",
"warning",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/exporters.py#L218-L233 |
SamLau95/nbinteract | nbinteract/exporters.py | _wait_for_save | def _wait_for_save(nb_name, timeout=5):
"""Waits for nb_name to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(nb_name)
start_time = time.time()
while time.time() < start_time + timeout:
if (
os.path.getmtime(nb_name) > modification_time
and os.path.getsize(nb_name) > 0
):
return True
time.sleep(0.2)
return False | python | def _wait_for_save(nb_name, timeout=5):
"""Waits for nb_name to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise.
"""
modification_time = os.path.getmtime(nb_name)
start_time = time.time()
while time.time() < start_time + timeout:
if (
os.path.getmtime(nb_name) > modification_time
and os.path.getsize(nb_name) > 0
):
return True
time.sleep(0.2)
return False | [
"def",
"_wait_for_save",
"(",
"nb_name",
",",
"timeout",
"=",
"5",
")",
":",
"modification_time",
"=",
"os",
".",
"path",
".",
"getmtime",
"(",
"nb_name",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
... | Waits for nb_name to update, waiting up to TIMEOUT seconds.
Returns True if a save was detected, and False otherwise. | [
"Waits",
"for",
"nb_name",
"to",
"update",
"waiting",
"up",
"to",
"TIMEOUT",
"seconds",
".",
"Returns",
"True",
"if",
"a",
"save",
"was",
"detected",
"and",
"False",
"otherwise",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/exporters.py#L236-L249 |
SamLau95/nbinteract | nbinteract/util.py | maybe_call | def maybe_call(maybe_fn, kwargs: dict, prefix: str = None) -> 'Any':
"""
If maybe_fn is a function, get its arguments from kwargs and call it, also
searching for prefixed kwargs if prefix is specified. Otherwise, return
maybe_fn.
Used to allow both functions and iterables to be passed into plotting
functions.
>>> def square(x): return x * x
>>> maybe_call(square, {'x': 10})
100
>>> data = [1, 2, 3]
>>> maybe_call(data, {'x': 10})
[1, 2, 3]
"""
if not callable(maybe_fn):
return maybe_fn
args = get_fn_args(maybe_fn, kwargs, prefix=prefix)
return maybe_fn(**args) | python | def maybe_call(maybe_fn, kwargs: dict, prefix: str = None) -> 'Any':
"""
If maybe_fn is a function, get its arguments from kwargs and call it, also
searching for prefixed kwargs if prefix is specified. Otherwise, return
maybe_fn.
Used to allow both functions and iterables to be passed into plotting
functions.
>>> def square(x): return x * x
>>> maybe_call(square, {'x': 10})
100
>>> data = [1, 2, 3]
>>> maybe_call(data, {'x': 10})
[1, 2, 3]
"""
if not callable(maybe_fn):
return maybe_fn
args = get_fn_args(maybe_fn, kwargs, prefix=prefix)
return maybe_fn(**args) | [
"def",
"maybe_call",
"(",
"maybe_fn",
",",
"kwargs",
":",
"dict",
",",
"prefix",
":",
"str",
"=",
"None",
")",
"->",
"'Any'",
":",
"if",
"not",
"callable",
"(",
"maybe_fn",
")",
":",
"return",
"maybe_fn",
"args",
"=",
"get_fn_args",
"(",
"maybe_fn",
",... | If maybe_fn is a function, get its arguments from kwargs and call it, also
searching for prefixed kwargs if prefix is specified. Otherwise, return
maybe_fn.
Used to allow both functions and iterables to be passed into plotting
functions.
>>> def square(x): return x * x
>>> maybe_call(square, {'x': 10})
100
>>> data = [1, 2, 3]
>>> maybe_call(data, {'x': 10})
[1, 2, 3] | [
"If",
"maybe_fn",
"is",
"a",
"function",
"get",
"its",
"arguments",
"from",
"kwargs",
"and",
"call",
"it",
"also",
"searching",
"for",
"prefixed",
"kwargs",
"if",
"prefix",
"is",
"specified",
".",
"Otherwise",
"return",
"maybe_fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L12-L33 |
SamLau95/nbinteract | nbinteract/util.py | maybe_curry | def maybe_curry(maybe_fn, first_arg) -> 'Function | Any':
"""
If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn.
"""
if not callable(maybe_fn):
return maybe_fn
return tz.curry(maybe_fn)(first_arg) | python | def maybe_curry(maybe_fn, first_arg) -> 'Function | Any':
"""
If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn.
"""
if not callable(maybe_fn):
return maybe_fn
return tz.curry(maybe_fn)(first_arg) | [
"def",
"maybe_curry",
"(",
"maybe_fn",
",",
"first_arg",
")",
"->",
"'Function | Any'",
":",
"if",
"not",
"callable",
"(",
"maybe_fn",
")",
":",
"return",
"maybe_fn",
"return",
"tz",
".",
"curry",
"(",
"maybe_fn",
")",
"(",
"first_arg",
")"
] | If maybe_fn is a function, curries it and passes in first_arg. Otherwise
returns maybe_fn. | [
"If",
"maybe_fn",
"is",
"a",
"function",
"curries",
"it",
"and",
"passes",
"in",
"first_arg",
".",
"Otherwise",
"returns",
"maybe_fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L36-L43 |
SamLau95/nbinteract | nbinteract/util.py | get_fn_args | def get_fn_args(fn, kwargs: dict, prefix: str = None):
"""
Given function and a dict of kwargs return a dict containing only the args
used by the function.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if a required arg is missing from the kwargs.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> def foo(a, b=10): return a + b
>>> p(get_fn_args(foo, kwargs))
{'a': 1, 'b': 2}
>>> def bar(a, b, d): return a + b + d
>>> p(get_fn_args(bar, kwargs, prefix='x'))
{'a': 1, 'b': 2, 'd': 4}
>>> p(get_fn_args(bar, kwargs))
Traceback (most recent call last):
ValueError: The following args are missing for the function bar: ['d']
"""
all_args = get_all_args(fn)
required_args = get_required_args(fn)
fn_kwargs = pick_kwargs(kwargs, all_args, prefix)
missing_args = [arg for arg in required_args if arg not in fn_kwargs]
if missing_args:
raise ValueError(
'The following args are missing for the function '
'{}: {}.'.format(fn.__name__, missing_args)
)
return fn_kwargs | python | def get_fn_args(fn, kwargs: dict, prefix: str = None):
"""
Given function and a dict of kwargs return a dict containing only the args
used by the function.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if a required arg is missing from the kwargs.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> def foo(a, b=10): return a + b
>>> p(get_fn_args(foo, kwargs))
{'a': 1, 'b': 2}
>>> def bar(a, b, d): return a + b + d
>>> p(get_fn_args(bar, kwargs, prefix='x'))
{'a': 1, 'b': 2, 'd': 4}
>>> p(get_fn_args(bar, kwargs))
Traceback (most recent call last):
ValueError: The following args are missing for the function bar: ['d']
"""
all_args = get_all_args(fn)
required_args = get_required_args(fn)
fn_kwargs = pick_kwargs(kwargs, all_args, prefix)
missing_args = [arg for arg in required_args if arg not in fn_kwargs]
if missing_args:
raise ValueError(
'The following args are missing for the function '
'{}: {}.'.format(fn.__name__, missing_args)
)
return fn_kwargs | [
"def",
"get_fn_args",
"(",
"fn",
",",
"kwargs",
":",
"dict",
",",
"prefix",
":",
"str",
"=",
"None",
")",
":",
"all_args",
"=",
"get_all_args",
"(",
"fn",
")",
"required_args",
"=",
"get_required_args",
"(",
"fn",
")",
"fn_kwargs",
"=",
"pick_kwargs",
"(... | Given function and a dict of kwargs return a dict containing only the args
used by the function.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if a required arg is missing from the kwargs.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> def foo(a, b=10): return a + b
>>> p(get_fn_args(foo, kwargs))
{'a': 1, 'b': 2}
>>> def bar(a, b, d): return a + b + d
>>> p(get_fn_args(bar, kwargs, prefix='x'))
{'a': 1, 'b': 2, 'd': 4}
>>> p(get_fn_args(bar, kwargs))
Traceback (most recent call last):
ValueError: The following args are missing for the function bar: ['d'] | [
"Given",
"function",
"and",
"a",
"dict",
"of",
"kwargs",
"return",
"a",
"dict",
"containing",
"only",
"the",
"args",
"used",
"by",
"the",
"function",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L51-L88 |
SamLau95/nbinteract | nbinteract/util.py | get_all_args | def get_all_args(fn) -> list:
"""
Returns a list of all arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_all_args(foo)
['x', 'y', 'z']
"""
sig = inspect.signature(fn)
return list(sig.parameters) | python | def get_all_args(fn) -> list:
"""
Returns a list of all arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_all_args(foo)
['x', 'y', 'z']
"""
sig = inspect.signature(fn)
return list(sig.parameters) | [
"def",
"get_all_args",
"(",
"fn",
")",
"->",
"list",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"fn",
")",
"return",
"list",
"(",
"sig",
".",
"parameters",
")"
] | Returns a list of all arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_all_args(foo)
['x', 'y', 'z'] | [
"Returns",
"a",
"list",
"of",
"all",
"arguments",
"for",
"the",
"function",
"fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L91-L100 |
SamLau95/nbinteract | nbinteract/util.py | get_required_args | def get_required_args(fn) -> list:
"""
Returns a list of required arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_required_args(foo)
['x', 'y']
>>> def bar(x, y=100, *args, **kwargs): return x
>>> get_required_args(bar)
['x']
"""
sig = inspect.signature(fn)
return [
name for name, param in sig.parameters.items()
if param.default == inspect._empty and param.kind not in VAR_ARGS
] | python | def get_required_args(fn) -> list:
"""
Returns a list of required arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_required_args(foo)
['x', 'y']
>>> def bar(x, y=100, *args, **kwargs): return x
>>> get_required_args(bar)
['x']
"""
sig = inspect.signature(fn)
return [
name for name, param in sig.parameters.items()
if param.default == inspect._empty and param.kind not in VAR_ARGS
] | [
"def",
"get_required_args",
"(",
"fn",
")",
"->",
"list",
":",
"sig",
"=",
"inspect",
".",
"signature",
"(",
"fn",
")",
"return",
"[",
"name",
"for",
"name",
",",
"param",
"in",
"sig",
".",
"parameters",
".",
"items",
"(",
")",
"if",
"param",
".",
... | Returns a list of required arguments for the function fn.
>>> def foo(x, y, z=100): return x + y + z
>>> get_required_args(foo)
['x', 'y']
>>> def bar(x, y=100, *args, **kwargs): return x
>>> get_required_args(bar)
['x'] | [
"Returns",
"a",
"list",
"of",
"required",
"arguments",
"for",
"the",
"function",
"fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L103-L119 |
SamLau95/nbinteract | nbinteract/util.py | pick_kwargs | def pick_kwargs(kwargs: dict, required_args: list, prefix: str = None):
"""
Given a dict of kwargs and a list of required_args, return a dict
containing only the args in required_args.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> p(pick_kwargs(kwargs, ['a', 'd']))
{'a': 1}
>>> p(pick_kwargs(kwargs, ['a', 'd'], prefix='x'))
{'a': 1, 'd': 4}
>>> pick_kwargs({'a': 1, 'x__a': 2}, ['a'], prefix='x')
Traceback (most recent call last):
ValueError: Both prefixed and unprefixed args were specified for the
following parameters: ['a']
"""
picked = {k: v for k, v in kwargs.items() if k in required_args}
prefixed = {}
if prefix:
prefix = prefix + '__'
prefixed = {
_remove_prefix(k, prefix): v
for k, v in kwargs.items()
if k.startswith(prefix)
and _remove_prefix(k, prefix) in required_args
}
conflicting_args = [k for k in picked if k in prefixed]
if conflicting_args:
raise ValueError(
'Both prefixed and unprefixed args were specified '
'for the following parameters: {}'.format(conflicting_args)
)
return tz.merge(picked, prefixed) | python | def pick_kwargs(kwargs: dict, required_args: list, prefix: str = None):
"""
Given a dict of kwargs and a list of required_args, return a dict
containing only the args in required_args.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> p(pick_kwargs(kwargs, ['a', 'd']))
{'a': 1}
>>> p(pick_kwargs(kwargs, ['a', 'd'], prefix='x'))
{'a': 1, 'd': 4}
>>> pick_kwargs({'a': 1, 'x__a': 2}, ['a'], prefix='x')
Traceback (most recent call last):
ValueError: Both prefixed and unprefixed args were specified for the
following parameters: ['a']
"""
picked = {k: v for k, v in kwargs.items() if k in required_args}
prefixed = {}
if prefix:
prefix = prefix + '__'
prefixed = {
_remove_prefix(k, prefix): v
for k, v in kwargs.items()
if k.startswith(prefix)
and _remove_prefix(k, prefix) in required_args
}
conflicting_args = [k for k in picked if k in prefixed]
if conflicting_args:
raise ValueError(
'Both prefixed and unprefixed args were specified '
'for the following parameters: {}'.format(conflicting_args)
)
return tz.merge(picked, prefixed) | [
"def",
"pick_kwargs",
"(",
"kwargs",
":",
"dict",
",",
"required_args",
":",
"list",
",",
"prefix",
":",
"str",
"=",
"None",
")",
":",
"picked",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
"... | Given a dict of kwargs and a list of required_args, return a dict
containing only the args in required_args.
If prefix is specified, also search for args that begin with '{prefix}__'.
Removes prefix in returned dict.
Raises ValueError if both prefixed and unprefixed arg are given in kwargs.
>>> from pprint import pprint as p # Use pprint to sort dict keys
>>> kwargs = {'a': 1, 'b': 2, 'c': 3, 'x__d': 4}
>>> p(pick_kwargs(kwargs, ['a', 'd']))
{'a': 1}
>>> p(pick_kwargs(kwargs, ['a', 'd'], prefix='x'))
{'a': 1, 'd': 4}
>>> pick_kwargs({'a': 1, 'x__a': 2}, ['a'], prefix='x')
Traceback (most recent call last):
ValueError: Both prefixed and unprefixed args were specified for the
following parameters: ['a'] | [
"Given",
"a",
"dict",
"of",
"kwargs",
"and",
"a",
"list",
"of",
"required_args",
"return",
"a",
"dict",
"containing",
"only",
"the",
"args",
"in",
"required_args",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/util.py#L122-L163 |
SamLau95/nbinteract | nbinteract/plotting.py | _update_option_docstring | def _update_option_docstring(func, allowed, indent=' ' * 3):
"""
Updates docstring of func, filling in appearance of {options} with a
description of the options.
>>> def f(): '''{options}'''
>>> f = _update_option_docstring(f, ['title', 'xlim'])
>>> print(f.__doc__)
options (dict): Options for the plot. Available options:
<BLANKLINE>
title: Title of the plot
xlim: Tuple containing (lower, upper) for x-axis
<BLANKLINE>
"""
if not (func.__doc__ and '{options}' in func.__doc__):
return func
descriptions = [option + ': ' + option_doc[option] for option in allowed]
full_desc = ('\n' + indent).join(descriptions)
func.__doc__ = func.__doc__.format(
options=options_docstring.format(desc=full_desc)
)
return func | python | def _update_option_docstring(func, allowed, indent=' ' * 3):
"""
Updates docstring of func, filling in appearance of {options} with a
description of the options.
>>> def f(): '''{options}'''
>>> f = _update_option_docstring(f, ['title', 'xlim'])
>>> print(f.__doc__)
options (dict): Options for the plot. Available options:
<BLANKLINE>
title: Title of the plot
xlim: Tuple containing (lower, upper) for x-axis
<BLANKLINE>
"""
if not (func.__doc__ and '{options}' in func.__doc__):
return func
descriptions = [option + ': ' + option_doc[option] for option in allowed]
full_desc = ('\n' + indent).join(descriptions)
func.__doc__ = func.__doc__.format(
options=options_docstring.format(desc=full_desc)
)
return func | [
"def",
"_update_option_docstring",
"(",
"func",
",",
"allowed",
",",
"indent",
"=",
"' '",
"*",
"3",
")",
":",
"if",
"not",
"(",
"func",
".",
"__doc__",
"and",
"'{options}'",
"in",
"func",
".",
"__doc__",
")",
":",
"return",
"func",
"descriptions",
"=... | Updates docstring of func, filling in appearance of {options} with a
description of the options.
>>> def f(): '''{options}'''
>>> f = _update_option_docstring(f, ['title', 'xlim'])
>>> print(f.__doc__)
options (dict): Options for the plot. Available options:
<BLANKLINE>
title: Title of the plot
xlim: Tuple containing (lower, upper) for x-axis
<BLANKLINE> | [
"Updates",
"docstring",
"of",
"func",
"filling",
"in",
"appearance",
"of",
"{",
"options",
"}",
"with",
"a",
"description",
"of",
"the",
"options",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L98-L120 |
SamLau95/nbinteract | nbinteract/plotting.py | use_options | def use_options(allowed):
"""
Decorator that logs warnings when unpermitted options are passed into its
wrapped function.
Requires that wrapped function has an keyword-only argument named
`options`. If wrapped function has {options} in its docstring, fills in
with the docs for allowed options.
Args:
allowed (list str): list of option keys allowed. If the wrapped
function is called with an option not in allowed, log a warning.
All values in allowed must also be present in `defaults`.
Returns:
Wrapped function with options validation.
>>> @use_options(['title'])
... def test(*, options={}): return options['title']
>>> test(options={'title': 'Hello'})
'Hello'
>>> # test(options={'not_allowed': 123}) # Also logs error message
''
"""
def update_docstring(f):
_update_option_docstring(f, allowed)
@functools.wraps(f)
def check_options(*args, **kwargs):
options = kwargs.get('options', {})
not_allowed = [
option for option in options if option not in allowed
# Don't validate private options
and not option.startswith('_')
]
if not_allowed:
logging.warning(
'The following options are not supported by '
'this function and will likely result in '
'undefined behavior: {}.'.format(not_allowed)
)
return f(*args, **kwargs)
return check_options
return update_docstring | python | def use_options(allowed):
"""
Decorator that logs warnings when unpermitted options are passed into its
wrapped function.
Requires that wrapped function has an keyword-only argument named
`options`. If wrapped function has {options} in its docstring, fills in
with the docs for allowed options.
Args:
allowed (list str): list of option keys allowed. If the wrapped
function is called with an option not in allowed, log a warning.
All values in allowed must also be present in `defaults`.
Returns:
Wrapped function with options validation.
>>> @use_options(['title'])
... def test(*, options={}): return options['title']
>>> test(options={'title': 'Hello'})
'Hello'
>>> # test(options={'not_allowed': 123}) # Also logs error message
''
"""
def update_docstring(f):
_update_option_docstring(f, allowed)
@functools.wraps(f)
def check_options(*args, **kwargs):
options = kwargs.get('options', {})
not_allowed = [
option for option in options if option not in allowed
# Don't validate private options
and not option.startswith('_')
]
if not_allowed:
logging.warning(
'The following options are not supported by '
'this function and will likely result in '
'undefined behavior: {}.'.format(not_allowed)
)
return f(*args, **kwargs)
return check_options
return update_docstring | [
"def",
"use_options",
"(",
"allowed",
")",
":",
"def",
"update_docstring",
"(",
"f",
")",
":",
"_update_option_docstring",
"(",
"f",
",",
"allowed",
")",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"check_options",
"(",
"*",
"args",
",",
"*",
... | Decorator that logs warnings when unpermitted options are passed into its
wrapped function.
Requires that wrapped function has an keyword-only argument named
`options`. If wrapped function has {options} in its docstring, fills in
with the docs for allowed options.
Args:
allowed (list str): list of option keys allowed. If the wrapped
function is called with an option not in allowed, log a warning.
All values in allowed must also be present in `defaults`.
Returns:
Wrapped function with options validation.
>>> @use_options(['title'])
... def test(*, options={}): return options['title']
>>> test(options={'title': 'Hello'})
'Hello'
>>> # test(options={'not_allowed': 123}) # Also logs error message
'' | [
"Decorator",
"that",
"logs",
"warnings",
"when",
"unpermitted",
"options",
"are",
"passed",
"into",
"its",
"wrapped",
"function",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L123-L172 |
SamLau95/nbinteract | nbinteract/plotting.py | hist | def hist(hist_function, *, options={}, **interact_params):
"""
Generates an interactive histogram that allows users to change the
parameters of the input hist_function.
Args:
hist_function (Array | (*args -> Array int | Array float)):
Function that takes in parameters to interact with and returns an
array of numbers. These numbers will be plotted in the resulting
histogram.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of `hist_function`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def gen_random(n_points):
... return np.random.normal(size=n_points)
>>> hist(gen_random, n_points=(0, 1000, 10))
VBox(...)
"""
params = {
'marks': [{
'sample': _array_or_placeholder(hist_function),
'bins': _get_option('bins'),
'normalized': _get_option('normalized'),
'scales': (
lambda opts: {'sample': opts['x_sc'], 'count': opts['y_sc']}
),
}],
}
fig = options.get('_fig', False) or _create_fig(options=options)
[hist] = _create_marks(
fig=fig, marks=[bq.Hist], options=options, params=params
)
_add_marks(fig, [hist])
def wrapped(**interact_params):
hist.sample = util.maybe_call(hist_function, interact_params)
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | python | def hist(hist_function, *, options={}, **interact_params):
"""
Generates an interactive histogram that allows users to change the
parameters of the input hist_function.
Args:
hist_function (Array | (*args -> Array int | Array float)):
Function that takes in parameters to interact with and returns an
array of numbers. These numbers will be plotted in the resulting
histogram.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of `hist_function`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def gen_random(n_points):
... return np.random.normal(size=n_points)
>>> hist(gen_random, n_points=(0, 1000, 10))
VBox(...)
"""
params = {
'marks': [{
'sample': _array_or_placeholder(hist_function),
'bins': _get_option('bins'),
'normalized': _get_option('normalized'),
'scales': (
lambda opts: {'sample': opts['x_sc'], 'count': opts['y_sc']}
),
}],
}
fig = options.get('_fig', False) or _create_fig(options=options)
[hist] = _create_marks(
fig=fig, marks=[bq.Hist], options=options, params=params
)
_add_marks(fig, [hist])
def wrapped(**interact_params):
hist.sample = util.maybe_call(hist_function, interact_params)
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | [
"def",
"hist",
"(",
"hist_function",
",",
"*",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"interact_params",
")",
":",
"params",
"=",
"{",
"'marks'",
":",
"[",
"{",
"'sample'",
":",
"_array_or_placeholder",
"(",
"hist_function",
")",
",",
"'bins'",
... | Generates an interactive histogram that allows users to change the
parameters of the input hist_function.
Args:
hist_function (Array | (*args -> Array int | Array float)):
Function that takes in parameters to interact with and returns an
array of numbers. These numbers will be plotted in the resulting
histogram.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of `hist_function`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def gen_random(n_points):
... return np.random.normal(size=n_points)
>>> hist(gen_random, n_points=(0, 1000, 10))
VBox(...) | [
"Generates",
"an",
"interactive",
"histogram",
"that",
"allows",
"users",
"to",
"change",
"the",
"parameters",
"of",
"the",
"input",
"hist_function",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L182-L229 |
SamLau95/nbinteract | nbinteract/plotting.py | bar | def bar(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive bar chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for categories of bar chart.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the categories on
the x-axis of the bar chart.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for heights of bars.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the heights of the bars on the
y-axis.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> bar(['a', 'b', 'c'], [4, 7, 10])
VBox(...)
>>> def categories(n): return np.arange(n)
>>> def heights(xs, offset):
... return xs + offset
>>> bar(categories, heights, n=(0, 10), offset=(1, 10))
VBox(...)
>>> def multiply(xs, n):
... return xs * n
>>> bar(categories, multiply, x__n=(0, 10), y__n=(1, 10))
VBox(...)
"""
params = {
'marks': [{
'x': _array_or_placeholder(x_fn, PLACEHOLDER_RANGE),
'y': _array_or_placeholder(y_fn)
}]
}
fig = options.get('_fig', False) or _create_fig(
x_sc=bq.OrdinalScale, options=options
)
[bar] = _create_marks(
fig=fig, marks=[bq.Bars], options=options, params=params
)
_add_marks(fig, [bar])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
bar.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
bar.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | python | def bar(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive bar chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for categories of bar chart.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the categories on
the x-axis of the bar chart.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for heights of bars.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the heights of the bars on the
y-axis.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> bar(['a', 'b', 'c'], [4, 7, 10])
VBox(...)
>>> def categories(n): return np.arange(n)
>>> def heights(xs, offset):
... return xs + offset
>>> bar(categories, heights, n=(0, 10), offset=(1, 10))
VBox(...)
>>> def multiply(xs, n):
... return xs * n
>>> bar(categories, multiply, x__n=(0, 10), y__n=(1, 10))
VBox(...)
"""
params = {
'marks': [{
'x': _array_or_placeholder(x_fn, PLACEHOLDER_RANGE),
'y': _array_or_placeholder(y_fn)
}]
}
fig = options.get('_fig', False) or _create_fig(
x_sc=bq.OrdinalScale, options=options
)
[bar] = _create_marks(
fig=fig, marks=[bq.Bars], options=options, params=params
)
_add_marks(fig, [bar])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
bar.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
bar.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | [
"def",
"bar",
"(",
"x_fn",
",",
"y_fn",
",",
"*",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"interact_params",
")",
":",
"params",
"=",
"{",
"'marks'",
":",
"[",
"{",
"'x'",
":",
"_array_or_placeholder",
"(",
"x_fn",
",",
"PLACEHOLDER_RANGE",
")"... | Generates an interactive bar chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for categories of bar chart.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the categories on
the x-axis of the bar chart.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for heights of bars.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the heights of the bars on the
y-axis.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> bar(['a', 'b', 'c'], [4, 7, 10])
VBox(...)
>>> def categories(n): return np.arange(n)
>>> def heights(xs, offset):
... return xs + offset
>>> bar(categories, heights, n=(0, 10), offset=(1, 10))
VBox(...)
>>> def multiply(xs, n):
... return xs * n
>>> bar(categories, multiply, x__n=(0, 10), y__n=(1, 10))
VBox(...) | [
"Generates",
"an",
"interactive",
"bar",
"chart",
"that",
"allows",
"users",
"to",
"change",
"the",
"parameters",
"of",
"the",
"inputs",
"x_fn",
"and",
"y_fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L235-L305 |
SamLau95/nbinteract | nbinteract/plotting.py | scatter_drag | def scatter_drag(
x_points: 'Array',
y_points: 'Array',
*,
fig=None,
show_eqn=True,
options={}
):
"""
Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...)
"""
params = {
'marks': [{
'x': x_points,
'y': y_points,
'enable_move': True,
}, {
'colors': [GOLDENROD],
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat, lin] = _create_marks(
fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params
)
_add_marks(fig, [scat, lin])
equation = widgets.Label()
# create line fit to data and display equation
def update_line(change=None):
x_sc = scat.scales['x']
lin.x = [
x_sc.min if x_sc.min is not None else np.min(scat.x),
x_sc.max if x_sc.max is not None else np.max(scat.x),
]
poly = np.polyfit(scat.x, scat.y, deg=1)
lin.y = np.polyval(poly, lin.x)
if show_eqn:
equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1])
update_line()
scat.observe(update_line, names=['x', 'y'])
return widgets.VBox([equation, fig]) | python | def scatter_drag(
x_points: 'Array',
y_points: 'Array',
*,
fig=None,
show_eqn=True,
options={}
):
"""
Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...)
"""
params = {
'marks': [{
'x': x_points,
'y': y_points,
'enable_move': True,
}, {
'colors': [GOLDENROD],
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat, lin] = _create_marks(
fig=fig, marks=[bq.Scatter, bq.Lines], options=options, params=params
)
_add_marks(fig, [scat, lin])
equation = widgets.Label()
# create line fit to data and display equation
def update_line(change=None):
x_sc = scat.scales['x']
lin.x = [
x_sc.min if x_sc.min is not None else np.min(scat.x),
x_sc.max if x_sc.max is not None else np.max(scat.x),
]
poly = np.polyfit(scat.x, scat.y, deg=1)
lin.y = np.polyval(poly, lin.x)
if show_eqn:
equation.value = 'y = {:.2f}x + {:.2f}'.format(poly[0], poly[1])
update_line()
scat.observe(update_line, names=['x', 'y'])
return widgets.VBox([equation, fig]) | [
"def",
"scatter_drag",
"(",
"x_points",
":",
"'Array'",
",",
"y_points",
":",
"'Array'",
",",
"*",
",",
"fig",
"=",
"None",
",",
"show_eqn",
"=",
"True",
",",
"options",
"=",
"{",
"}",
")",
":",
"params",
"=",
"{",
"'marks'",
":",
"[",
"{",
"'x'",
... | Generates an interactive scatter plot with the best fit line plotted over
the points. The points can be dragged by the user and the line will
automatically update.
Args:
x_points (Array Number): x-values of points to plot
y_points (Array Number): y-values of points to plot
Kwargs:
show_eqn (bool): If True (default), displays the best fit line's
equation above the scatterplot.
{options}
Returns:
VBox with two children: the equation widget and the figure.
>>> xs = np.arange(10)
>>> ys = np.arange(10) + np.random.rand(10)
>>> scatter_drag(xs, ys)
VBox(...) | [
"Generates",
"an",
"interactive",
"scatter",
"plot",
"with",
"the",
"best",
"fit",
"line",
"plotted",
"over",
"the",
"points",
".",
"The",
"points",
"can",
"be",
"dragged",
"by",
"the",
"user",
"and",
"the",
"line",
"will",
"automatically",
"update",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L312-L377 |
SamLau95/nbinteract | nbinteract/plotting.py | scatter | def scatter(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive scatter chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the scatter plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the
scatter plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def x_values(n): return np.random.choice(100, n)
>>> def y_values(xs): return np.random.choice(100, len(xs))
>>> scatter(x_values, y_values, n=(0,200))
VBox(...)
"""
params = {
'marks': [{
'x': _array_or_placeholder(x_fn),
'y': _array_or_placeholder(y_fn),
'marker': _get_option('marker'),
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat] = _create_marks(
fig=fig, marks=[bq.Scatter], options=options, params=params
)
_add_marks(fig, [scat])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
scat.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
scat.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | python | def scatter(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive scatter chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the scatter plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the
scatter plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def x_values(n): return np.random.choice(100, n)
>>> def y_values(xs): return np.random.choice(100, len(xs))
>>> scatter(x_values, y_values, n=(0,200))
VBox(...)
"""
params = {
'marks': [{
'x': _array_or_placeholder(x_fn),
'y': _array_or_placeholder(y_fn),
'marker': _get_option('marker'),
}]
}
fig = options.get('_fig', False) or _create_fig(options=options)
[scat] = _create_marks(
fig=fig, marks=[bq.Scatter], options=options, params=params
)
_add_marks(fig, [scat])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
scat.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
scat.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | [
"def",
"scatter",
"(",
"x_fn",
",",
"y_fn",
",",
"*",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"interact_params",
")",
":",
"params",
"=",
"{",
"'marks'",
":",
"[",
"{",
"'x'",
":",
"_array_or_placeholder",
"(",
"x_fn",
")",
",",
"'y'",
":",
... | Generates an interactive scatter chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the scatter plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the
scatter plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> def x_values(n): return np.random.choice(100, n)
>>> def y_values(xs): return np.random.choice(100, len(xs))
>>> scatter(x_values, y_values, n=(0,200))
VBox(...) | [
"Generates",
"an",
"interactive",
"scatter",
"chart",
"that",
"allows",
"users",
"to",
"change",
"the",
"parameters",
"of",
"the",
"inputs",
"x_fn",
"and",
"y_fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L384-L444 |
SamLau95/nbinteract | nbinteract/plotting.py | line | def line(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...)
"""
fig = options.get('_fig', False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | python | def line(x_fn, y_fn, *, options={}, **interact_params):
"""
Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...)
"""
fig = options.get('_fig', False) or _create_fig(options=options)
[line] = (_create_marks(fig=fig, marks=[bq.Lines], options=options))
_add_marks(fig, [line])
def wrapped(**interact_params):
x_data = util.maybe_call(x_fn, interact_params, prefix='x')
line.x = x_data
y_bound = util.maybe_curry(y_fn, x_data)
line.y = util.maybe_call(y_bound, interact_params, prefix='y')
controls = widgets.interactive(wrapped, **interact_params)
return widgets.VBox([controls, fig]) | [
"def",
"line",
"(",
"x_fn",
",",
"y_fn",
",",
"*",
",",
"options",
"=",
"{",
"}",
",",
"*",
"*",
"interact_params",
")",
":",
"fig",
"=",
"options",
".",
"get",
"(",
"'_fig'",
",",
"False",
")",
"or",
"_create_fig",
"(",
"options",
"=",
"options",
... | Generates an interactive line chart that allows users to change the
parameters of the inputs x_fn and y_fn.
Args:
x_fn (Array | (*args -> Array str | Array int | Array float)):
If array, uses array values for x-coordinates.
If function, must take parameters to interact with and return an
array of strings or numbers. These will become the x-coordinates
of the line plot.
y_fn (Array | (Array, *args -> Array int | Array float)):
If array, uses array values for y-coordinates.
If function, must take in the output of x_fn as its first parameter
and optionally other parameters to interact with. Must return an
array of numbers. These will become the y-coordinates of the line
plot.
Kwargs:
{options}
interact_params (dict): Keyword arguments in the same format as
`ipywidgets.interact`. One argument is required for each argument
of both `x_fn` and `y_fn`. If `x_fn` and `y_fn` have conflicting
parameter names, prefix the corresponding kwargs with `x__` and
`y__`.
Returns:
VBox with two children: the interactive controls and the figure.
>>> line([1, 2, 3], [4, 7, 10])
VBox(...)
>>> def x_values(max): return np.arange(0, max)
>>> def y_values(xs, sd):
... return xs + np.random.normal(len(xs), scale=sd)
>>> line(x_values, y_values, max=(10, 50), sd=(1, 10))
VBox(...) | [
"Generates",
"an",
"interactive",
"line",
"chart",
"that",
"allows",
"users",
"to",
"change",
"the",
"parameters",
"of",
"the",
"inputs",
"x_fn",
"and",
"y_fn",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L451-L506 |
SamLau95/nbinteract | nbinteract/plotting.py | _merge_with_defaults | def _merge_with_defaults(params):
"""
Performs a 2-level deep merge of params with _default_params with corrent
merging of params for each mark.
This is a bit complicated since params['marks'] is a list and we need to
make sure each mark gets the default params.
"""
marks_params = [
tz.merge(default, param) for default, param in
zip(itertools.repeat(_default_params['marks']), params['marks'])
] if 'marks' in params else [_default_params['marks']]
merged_without_marks = tz.merge_with(
tz.merge, tz.dissoc(_default_params, 'marks'),
tz.dissoc(params, 'marks')
)
return tz.merge(merged_without_marks, {'marks': marks_params}) | python | def _merge_with_defaults(params):
"""
Performs a 2-level deep merge of params with _default_params with corrent
merging of params for each mark.
This is a bit complicated since params['marks'] is a list and we need to
make sure each mark gets the default params.
"""
marks_params = [
tz.merge(default, param) for default, param in
zip(itertools.repeat(_default_params['marks']), params['marks'])
] if 'marks' in params else [_default_params['marks']]
merged_without_marks = tz.merge_with(
tz.merge, tz.dissoc(_default_params, 'marks'),
tz.dissoc(params, 'marks')
)
return tz.merge(merged_without_marks, {'marks': marks_params}) | [
"def",
"_merge_with_defaults",
"(",
"params",
")",
":",
"marks_params",
"=",
"[",
"tz",
".",
"merge",
"(",
"default",
",",
"param",
")",
"for",
"default",
",",
"param",
"in",
"zip",
"(",
"itertools",
".",
"repeat",
"(",
"_default_params",
"[",
"'marks'",
... | Performs a 2-level deep merge of params with _default_params with corrent
merging of params for each mark.
This is a bit complicated since params['marks'] is a list and we need to
make sure each mark gets the default params. | [
"Performs",
"a",
"2",
"-",
"level",
"deep",
"merge",
"of",
"params",
"with",
"_default_params",
"with",
"corrent",
"merging",
"of",
"params",
"for",
"each",
"mark",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L636-L654 |
SamLau95/nbinteract | nbinteract/plotting.py | _create_fig | def _create_fig(
*,
x_sc=bq.LinearScale,
y_sc=bq.LinearScale,
x_ax=bq.Axis,
y_ax=bq.Axis,
fig=bq.Figure,
options={},
params={}
):
"""
Initializes scales and axes for a bqplot figure and returns the resulting
blank figure. Each plot component is passed in as a class. The plot options
should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { plot_component: { trait: value, ... } }
For example, to change the grid lines of the x-axis:
params={ 'x_ax': {'grid_lines' : 'solid'} }
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'x_ax': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
x_sc = x_sc(**_call_params(params['x_sc'], options))
y_sc = y_sc(**_call_params(params['y_sc'], options))
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
x_ax = x_ax(**_call_params(params['x_ax'], options))
y_ax = y_ax(**_call_params(params['y_ax'], options))
options = tz.merge(options, {'x_ax': x_ax, 'y_ax': y_ax, 'marks': []})
fig = fig(**_call_params(params['fig'], options))
return fig | python | def _create_fig(
*,
x_sc=bq.LinearScale,
y_sc=bq.LinearScale,
x_ax=bq.Axis,
y_ax=bq.Axis,
fig=bq.Figure,
options={},
params={}
):
"""
Initializes scales and axes for a bqplot figure and returns the resulting
blank figure. Each plot component is passed in as a class. The plot options
should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { plot_component: { trait: value, ... } }
For example, to change the grid lines of the x-axis:
params={ 'x_ax': {'grid_lines' : 'solid'} }
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'x_ax': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
x_sc = x_sc(**_call_params(params['x_sc'], options))
y_sc = y_sc(**_call_params(params['y_sc'], options))
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
x_ax = x_ax(**_call_params(params['x_ax'], options))
y_ax = y_ax(**_call_params(params['y_ax'], options))
options = tz.merge(options, {'x_ax': x_ax, 'y_ax': y_ax, 'marks': []})
fig = fig(**_call_params(params['fig'], options))
return fig | [
"def",
"_create_fig",
"(",
"*",
",",
"x_sc",
"=",
"bq",
".",
"LinearScale",
",",
"y_sc",
"=",
"bq",
".",
"LinearScale",
",",
"x_ax",
"=",
"bq",
".",
"Axis",
",",
"y_ax",
"=",
"bq",
".",
"Axis",
",",
"fig",
"=",
"bq",
".",
"Figure",
",",
"options"... | Initializes scales and axes for a bqplot figure and returns the resulting
blank figure. Each plot component is passed in as a class. The plot options
should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { plot_component: { trait: value, ... } }
For example, to change the grid lines of the x-axis:
params={ 'x_ax': {'grid_lines' : 'solid'} }
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'x_ax': {'scale': lambda opts: opts['x_sc'] } } | [
"Initializes",
"scales",
"and",
"axes",
"for",
"a",
"bqplot",
"figure",
"and",
"returns",
"the",
"resulting",
"blank",
"figure",
".",
"Each",
"plot",
"component",
"is",
"passed",
"in",
"as",
"a",
"class",
".",
"The",
"plot",
"options",
"should",
"be",
"pas... | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L657-L694 |
SamLau95/nbinteract | nbinteract/plotting.py | _create_marks | def _create_marks(fig, marks=[bq.Mark], options={}, params={}):
"""
Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
# Although fig provides scale_x and scale_y properties, the scales on the
# axes are the only ones that are actually used.
x_ax, y_ax = fig.axes
x_sc, y_sc = x_ax.scale, y_ax.scale
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
marks = [
mark_cls(**_call_params(mark_params, options))
for mark_cls, mark_params in zip(marks, params['marks'])
]
return marks | python | def _create_marks(fig, marks=[bq.Mark], options={}, params={}):
"""
Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } }
"""
params = _merge_with_defaults(params)
# Although fig provides scale_x and scale_y properties, the scales on the
# axes are the only ones that are actually used.
x_ax, y_ax = fig.axes
x_sc, y_sc = x_ax.scale, y_ax.scale
options = tz.merge(options, {'x_sc': x_sc, 'y_sc': y_sc})
marks = [
mark_cls(**_call_params(mark_params, options))
for mark_cls, mark_params in zip(marks, params['marks'])
]
return marks | [
"def",
"_create_marks",
"(",
"fig",
",",
"marks",
"=",
"[",
"bq",
".",
"Mark",
"]",
",",
"options",
"=",
"{",
"}",
",",
"params",
"=",
"{",
"}",
")",
":",
"params",
"=",
"_merge_with_defaults",
"(",
"params",
")",
"# Although fig provides scale_x and scale... | Initializes and returns marks for a figure as a list. Each mark is passed
in as a class. The plot options should be passed into options.
Any additional parameters to initialize plot components are passed into
params as a dict of { 'mark': [{ trait: value, ... }, ...] }
For example, when initializing two marks you can assign different colors to
each one:
params={
'marks': [
{'colors': [DARK_BLUE]},
{'colors': [GOLDENROD]},
]
}
If the param value is a function, it will be called with the options dict
augmented with all previously created plot elements. This permits
dependencies on plot elements:
params={ 'marks': {'scale': lambda opts: opts['x_sc'] } } | [
"Initializes",
"and",
"returns",
"marks",
"for",
"a",
"figure",
"as",
"a",
"list",
".",
"Each",
"mark",
"is",
"passed",
"in",
"as",
"a",
"class",
".",
"The",
"plot",
"options",
"should",
"be",
"passed",
"into",
"options",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L697-L733 |
SamLau95/nbinteract | nbinteract/plotting.py | _array_or_placeholder | def _array_or_placeholder(
maybe_iterable, placeholder=PLACEHOLDER_ZEROS
) -> np.array:
"""
Return maybe_iterable's contents or a placeholder array.
Used to give bqplot its required initial points to plot even if we're using
a function to generate points.
"""
if isinstance(maybe_iterable, collections.Iterable):
return np.array([i for i in maybe_iterable])
return placeholder | python | def _array_or_placeholder(
maybe_iterable, placeholder=PLACEHOLDER_ZEROS
) -> np.array:
"""
Return maybe_iterable's contents or a placeholder array.
Used to give bqplot its required initial points to plot even if we're using
a function to generate points.
"""
if isinstance(maybe_iterable, collections.Iterable):
return np.array([i for i in maybe_iterable])
return placeholder | [
"def",
"_array_or_placeholder",
"(",
"maybe_iterable",
",",
"placeholder",
"=",
"PLACEHOLDER_ZEROS",
")",
"->",
"np",
".",
"array",
":",
"if",
"isinstance",
"(",
"maybe_iterable",
",",
"collections",
".",
"Iterable",
")",
":",
"return",
"np",
".",
"array",
"("... | Return maybe_iterable's contents or a placeholder array.
Used to give bqplot its required initial points to plot even if we're using
a function to generate points. | [
"Return",
"maybe_iterable",
"s",
"contents",
"or",
"a",
"placeholder",
"array",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/plotting.py#L743-L754 |
SamLau95/nbinteract | nbinteract/cli.py | binder_spec_from_github_url | def binder_spec_from_github_url(github_url):
"""
Converts GitHub origin into a Binder spec.
For example:
[email protected]:SamLau95/nbinteract.git -> SamLau95/nbinteract/master
https://github.com/Calebs97/riemann_book -> Calebs97/riemann_book/master
"""
tokens = re.split(r'/|:', github_url.replace('.git', ''))
# The username and reponame are the last two tokens
return '{}/{}/master'.format(tokens[-2], tokens[-1]) | python | def binder_spec_from_github_url(github_url):
"""
Converts GitHub origin into a Binder spec.
For example:
[email protected]:SamLau95/nbinteract.git -> SamLau95/nbinteract/master
https://github.com/Calebs97/riemann_book -> Calebs97/riemann_book/master
"""
tokens = re.split(r'/|:', github_url.replace('.git', ''))
# The username and reponame are the last two tokens
return '{}/{}/master'.format(tokens[-2], tokens[-1]) | [
"def",
"binder_spec_from_github_url",
"(",
"github_url",
")",
":",
"tokens",
"=",
"re",
".",
"split",
"(",
"r'/|:'",
",",
"github_url",
".",
"replace",
"(",
"'.git'",
",",
"''",
")",
")",
"# The username and reponame are the last two tokens",
"return",
"'{}/{}/maste... | Converts GitHub origin into a Binder spec.
For example:
[email protected]:SamLau95/nbinteract.git -> SamLau95/nbinteract/master
https://github.com/Calebs97/riemann_book -> Calebs97/riemann_book/master | [
"Converts",
"GitHub",
"origin",
"into",
"a",
"Binder",
"spec",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L93-L103 |
SamLau95/nbinteract | nbinteract/cli.py | yes_or_no | def yes_or_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(
'{}[nbinteract]{} {}{}'.format(BLUE, NOCOLOR, question, prompt)
)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write(
"Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n"
) | python | def yes_or_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(
'{}[nbinteract]{} {}{}'.format(BLUE, NOCOLOR, question, prompt)
)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write(
"Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n"
) | [
"def",
"yes_or_no",
"(",
"question",
",",
"default",
"=",
"\"yes\"",
")",
":",
"valid",
"=",
"{",
"\"yes\"",
":",
"True",
",",
"\"y\"",
":",
"True",
",",
"\"ye\"",
":",
"True",
",",
"\"no\"",
":",
"False",
",",
"\"n\"",
":",
"False",
"}",
"if",
"de... | Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no". | [
"Ask",
"a",
"yes",
"/",
"no",
"question",
"via",
"input",
"()",
"and",
"return",
"their",
"answer",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L126-L159 |
SamLau95/nbinteract | nbinteract/cli.py | main | def main():
"""
Parses command line options and runs nbinteract.
"""
arguments = docopt(__doc__)
if arguments['init']:
return_code = init()
sys.exit(return_code)
run_converter(arguments) | python | def main():
"""
Parses command line options and runs nbinteract.
"""
arguments = docopt(__doc__)
if arguments['init']:
return_code = init()
sys.exit(return_code)
run_converter(arguments) | [
"def",
"main",
"(",
")",
":",
"arguments",
"=",
"docopt",
"(",
"__doc__",
")",
"if",
"arguments",
"[",
"'init'",
"]",
":",
"return_code",
"=",
"init",
"(",
")",
"sys",
".",
"exit",
"(",
"return_code",
")",
"run_converter",
"(",
"arguments",
")"
] | Parses command line options and runs nbinteract. | [
"Parses",
"command",
"line",
"options",
"and",
"runs",
"nbinteract",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L162-L171 |
SamLau95/nbinteract | nbinteract/cli.py | run_converter | def run_converter(arguments):
"""
Converts notebooks to HTML files. Returns list of output file paths
"""
# Get spec from config file
if os.path.isfile(CONFIG_FILE):
with open(CONFIG_FILE, encoding='utf-8') as f:
config = json.load(f)
arguments['--spec'] = arguments['--spec'] or config['spec']
check_arguments(arguments)
notebooks = flatmap(
expand_folder,
arguments['NOTEBOOKS'],
recursive=arguments['--recursive']
)
exporter = init_exporter(
extract_images=arguments['--images'],
spec=arguments['--spec'],
template_file=arguments['--template'],
button_at_top=(not arguments['--no-top-button']),
execute=arguments['--execute'],
)
log('Converting notebooks to HTML...')
output_files = []
for notebook in notebooks:
output_file = convert(
notebook,
exporter=exporter,
output_folder=arguments['--output'],
images_folder=arguments['--images']
)
output_files.append(output_file)
log('Converted {} to {}'.format(notebook, output_file))
log('Done!')
if arguments['--images']:
log('Resulting images located in {}'.format(arguments['--images']))
return output_files | python | def run_converter(arguments):
"""
Converts notebooks to HTML files. Returns list of output file paths
"""
# Get spec from config file
if os.path.isfile(CONFIG_FILE):
with open(CONFIG_FILE, encoding='utf-8') as f:
config = json.load(f)
arguments['--spec'] = arguments['--spec'] or config['spec']
check_arguments(arguments)
notebooks = flatmap(
expand_folder,
arguments['NOTEBOOKS'],
recursive=arguments['--recursive']
)
exporter = init_exporter(
extract_images=arguments['--images'],
spec=arguments['--spec'],
template_file=arguments['--template'],
button_at_top=(not arguments['--no-top-button']),
execute=arguments['--execute'],
)
log('Converting notebooks to HTML...')
output_files = []
for notebook in notebooks:
output_file = convert(
notebook,
exporter=exporter,
output_folder=arguments['--output'],
images_folder=arguments['--images']
)
output_files.append(output_file)
log('Converted {} to {}'.format(notebook, output_file))
log('Done!')
if arguments['--images']:
log('Resulting images located in {}'.format(arguments['--images']))
return output_files | [
"def",
"run_converter",
"(",
"arguments",
")",
":",
"# Get spec from config file",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"CONFIG_FILE",
")",
":",
"with",
"open",
"(",
"CONFIG_FILE",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"config",
"="... | Converts notebooks to HTML files. Returns list of output file paths | [
"Converts",
"notebooks",
"to",
"HTML",
"files",
".",
"Returns",
"list",
"of",
"output",
"file",
"paths"
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L174-L218 |
SamLau95/nbinteract | nbinteract/cli.py | init | def init():
'''
Initializes git repo for nbinteract.
1. Checks for requirements.txt or Dockerfile, offering to create a
requirements.txt if needed.
2. Sets the Binder spec using the `origin` git remote in .nbinteract.json.
3. Prints a Binder URL so the user can debug their image if needed.
'''
log('Initializing folder for nbinteract.')
log()
log('Checking to see if this folder is the root folder of a git project.')
if os.path.isdir('.git'):
log("Looks like we're in the root of a git project.")
else:
error(
"This folder doesn't look like the root of a git project. "
"Please rerun nbinteract init in the top-level folder of a "
"git project."
)
return ERROR
log()
log('Checking for requirements.txt or Dockerfile.')
if os.path.isfile('Dockerfile'):
log(
'Dockerfile found. Note that Binder will use the Dockerfile '
'instead of the requirements.txt file, so you should make sure '
'your Dockerfile follows the format in {docker_docs}'
.format(docker_docs=DOCKER_DOCS)
)
elif os.path.isfile('requirements.txt'):
log('requirements.txt found.')
else:
log('No requirements.txt file found.')
yes = yes_or_no(
'Would you like to create a sample requirements.txt file?'
)
if yes:
# TODO(sam): Don't hard-code requirements.txt
with open('requirements.txt', 'w', encoding='utf-8') as f:
f.write(DEFAULT_REQUIREMENTS_TXT)
log(
'Created requirements.txt. Edit this file now to include the '
'rest of your dependencies, then rerun nbinteract init.'
)
return SUCCESS
else:
log(
'Please manually create a requirements.txt file, then rerun '
'nbinteract init.'
)
return SUCCESS
log()
log('Generating .nbinteract.json file...')
if os.path.isfile(CONFIG_FILE):
log(
".nbinteract.json already exists, skipping generation. If you'd "
"like to regenerate the file, remove .nbinteract.json and rerun "
"this command."
)
log()
log("Initialization success!")
return SUCCESS
try:
github_origin = str(
subprocess.check_output(
'git remote get-url origin',
stderr=subprocess.STDOUT,
shell=True
), 'utf-8'
).strip()
except subprocess.CalledProcessError as e:
error(
"No git remote called origin found. Please set up your project's"
"origin remote to point to a GitHub URL.\ngit error: {}".format(e)
)
return ERROR
if 'github' not in github_origin:
error(
"Your project's origin remote {} doesn't look like a github "
"URL. This may cause issues with Binder, so please double check "
"your .nbinteract.json file after this script finishes. "
"Continuing as planned..."
)
binder_spec = binder_spec_from_github_url(github_origin)
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump({'spec': binder_spec}, f, indent=4)
log('Created .nbinteract.json file successfully')
log()
log(
'Initialization complete! Now, you should make a git commit with the '
'files created by in this process and push your commits to GitHub.'
)
log()
log(
'After you push, you should visit {} and verify that your Binder '
'image successfully starts.'.format(BINDER_BASE_URL + binder_spec)
) | python | def init():
'''
Initializes git repo for nbinteract.
1. Checks for requirements.txt or Dockerfile, offering to create a
requirements.txt if needed.
2. Sets the Binder spec using the `origin` git remote in .nbinteract.json.
3. Prints a Binder URL so the user can debug their image if needed.
'''
log('Initializing folder for nbinteract.')
log()
log('Checking to see if this folder is the root folder of a git project.')
if os.path.isdir('.git'):
log("Looks like we're in the root of a git project.")
else:
error(
"This folder doesn't look like the root of a git project. "
"Please rerun nbinteract init in the top-level folder of a "
"git project."
)
return ERROR
log()
log('Checking for requirements.txt or Dockerfile.')
if os.path.isfile('Dockerfile'):
log(
'Dockerfile found. Note that Binder will use the Dockerfile '
'instead of the requirements.txt file, so you should make sure '
'your Dockerfile follows the format in {docker_docs}'
.format(docker_docs=DOCKER_DOCS)
)
elif os.path.isfile('requirements.txt'):
log('requirements.txt found.')
else:
log('No requirements.txt file found.')
yes = yes_or_no(
'Would you like to create a sample requirements.txt file?'
)
if yes:
# TODO(sam): Don't hard-code requirements.txt
with open('requirements.txt', 'w', encoding='utf-8') as f:
f.write(DEFAULT_REQUIREMENTS_TXT)
log(
'Created requirements.txt. Edit this file now to include the '
'rest of your dependencies, then rerun nbinteract init.'
)
return SUCCESS
else:
log(
'Please manually create a requirements.txt file, then rerun '
'nbinteract init.'
)
return SUCCESS
log()
log('Generating .nbinteract.json file...')
if os.path.isfile(CONFIG_FILE):
log(
".nbinteract.json already exists, skipping generation. If you'd "
"like to regenerate the file, remove .nbinteract.json and rerun "
"this command."
)
log()
log("Initialization success!")
return SUCCESS
try:
github_origin = str(
subprocess.check_output(
'git remote get-url origin',
stderr=subprocess.STDOUT,
shell=True
), 'utf-8'
).strip()
except subprocess.CalledProcessError as e:
error(
"No git remote called origin found. Please set up your project's"
"origin remote to point to a GitHub URL.\ngit error: {}".format(e)
)
return ERROR
if 'github' not in github_origin:
error(
"Your project's origin remote {} doesn't look like a github "
"URL. This may cause issues with Binder, so please double check "
"your .nbinteract.json file after this script finishes. "
"Continuing as planned..."
)
binder_spec = binder_spec_from_github_url(github_origin)
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump({'spec': binder_spec}, f, indent=4)
log('Created .nbinteract.json file successfully')
log()
log(
'Initialization complete! Now, you should make a git commit with the '
'files created by in this process and push your commits to GitHub.'
)
log()
log(
'After you push, you should visit {} and verify that your Binder '
'image successfully starts.'.format(BINDER_BASE_URL + binder_spec)
) | [
"def",
"init",
"(",
")",
":",
"log",
"(",
"'Initializing folder for nbinteract.'",
")",
"log",
"(",
")",
"log",
"(",
"'Checking to see if this folder is the root folder of a git project.'",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"'.git'",
")",
":",
"log"... | Initializes git repo for nbinteract.
1. Checks for requirements.txt or Dockerfile, offering to create a
requirements.txt if needed.
2. Sets the Binder spec using the `origin` git remote in .nbinteract.json.
3. Prints a Binder URL so the user can debug their image if needed. | [
"Initializes",
"git",
"repo",
"for",
"nbinteract",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L221-L325 |
SamLau95/nbinteract | nbinteract/cli.py | expand_folder | def expand_folder(notebook_or_folder, recursive=False):
"""
If notebook_or_folder is a folder, returns a list containing all notebooks
in the folder. Otherwise, returns a list containing the notebook name.
If recursive is True, recurses into subdirectories.
"""
is_file = os.path.isfile(notebook_or_folder)
is_dir = os.path.isdir(notebook_or_folder)
if not (is_file or is_dir):
raise ValueError(
'{} is neither an existing file nor a folder.'
.format(notebook_or_folder)
)
if is_file:
return [notebook_or_folder]
# Now we know the input is a directory
if not recursive:
return glob('{}/*.ipynb'.format(notebook_or_folder))
# Recursive case
return [
os.path.join(folder, filename)
for folder, _, filenames in os.walk(notebook_or_folder)
# Skip folders that start with .
if not os.path.basename(folder).startswith('.')
for filename in fnmatch.filter(filenames, '*.ipynb')
] | python | def expand_folder(notebook_or_folder, recursive=False):
"""
If notebook_or_folder is a folder, returns a list containing all notebooks
in the folder. Otherwise, returns a list containing the notebook name.
If recursive is True, recurses into subdirectories.
"""
is_file = os.path.isfile(notebook_or_folder)
is_dir = os.path.isdir(notebook_or_folder)
if not (is_file or is_dir):
raise ValueError(
'{} is neither an existing file nor a folder.'
.format(notebook_or_folder)
)
if is_file:
return [notebook_or_folder]
# Now we know the input is a directory
if not recursive:
return glob('{}/*.ipynb'.format(notebook_or_folder))
# Recursive case
return [
os.path.join(folder, filename)
for folder, _, filenames in os.walk(notebook_or_folder)
# Skip folders that start with .
if not os.path.basename(folder).startswith('.')
for filename in fnmatch.filter(filenames, '*.ipynb')
] | [
"def",
"expand_folder",
"(",
"notebook_or_folder",
",",
"recursive",
"=",
"False",
")",
":",
"is_file",
"=",
"os",
".",
"path",
".",
"isfile",
"(",
"notebook_or_folder",
")",
"is_dir",
"=",
"os",
".",
"path",
".",
"isdir",
"(",
"notebook_or_folder",
")",
"... | If notebook_or_folder is a folder, returns a list containing all notebooks
in the folder. Otherwise, returns a list containing the notebook name.
If recursive is True, recurses into subdirectories. | [
"If",
"notebook_or_folder",
"is",
"a",
"folder",
"returns",
"a",
"list",
"containing",
"all",
"notebooks",
"in",
"the",
"folder",
".",
"Otherwise",
"returns",
"a",
"list",
"containing",
"the",
"notebook",
"name",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L360-L389 |
SamLau95/nbinteract | nbinteract/cli.py | init_exporter | def init_exporter(extract_images, execute, **exporter_config):
"""
Returns an initialized exporter.
"""
config = Config(InteractExporter=exporter_config)
preprocessors = []
if extract_images:
# Use ExtractOutputPreprocessor to extract the images to separate files
preprocessors.append(
'nbconvert.preprocessors.ExtractOutputPreprocessor'
)
if execute:
# Use the NbiExecutePreprocessor to correctly generate widget output
# for interact() calls.
preprocessors.append('nbinteract.preprocessors.NbiExecutePreprocessor')
config.InteractExporter.preprocessors = preprocessors
exporter = InteractExporter(config=config)
return exporter | python | def init_exporter(extract_images, execute, **exporter_config):
"""
Returns an initialized exporter.
"""
config = Config(InteractExporter=exporter_config)
preprocessors = []
if extract_images:
# Use ExtractOutputPreprocessor to extract the images to separate files
preprocessors.append(
'nbconvert.preprocessors.ExtractOutputPreprocessor'
)
if execute:
# Use the NbiExecutePreprocessor to correctly generate widget output
# for interact() calls.
preprocessors.append('nbinteract.preprocessors.NbiExecutePreprocessor')
config.InteractExporter.preprocessors = preprocessors
exporter = InteractExporter(config=config)
return exporter | [
"def",
"init_exporter",
"(",
"extract_images",
",",
"execute",
",",
"*",
"*",
"exporter_config",
")",
":",
"config",
"=",
"Config",
"(",
"InteractExporter",
"=",
"exporter_config",
")",
"preprocessors",
"=",
"[",
"]",
"if",
"extract_images",
":",
"# Use ExtractO... | Returns an initialized exporter. | [
"Returns",
"an",
"initialized",
"exporter",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L392-L412 |
SamLau95/nbinteract | nbinteract/cli.py | make_exporter_resources | def make_exporter_resources(nb_name, out_folder, images_folder=None):
"""
Creates resources dict for the exporter
"""
resources = defaultdict(str)
resources['metadata'] = defaultdict(str)
resources['metadata']['name'] = nb_name
resources['metadata']['path'] = out_folder
# This results in images like AB_5_1.png for a notebook called AB.ipynb
resources['unique_key'] = nb_name
resources['output_files_dir'] = images_folder
return resources | python | def make_exporter_resources(nb_name, out_folder, images_folder=None):
"""
Creates resources dict for the exporter
"""
resources = defaultdict(str)
resources['metadata'] = defaultdict(str)
resources['metadata']['name'] = nb_name
resources['metadata']['path'] = out_folder
# This results in images like AB_5_1.png for a notebook called AB.ipynb
resources['unique_key'] = nb_name
resources['output_files_dir'] = images_folder
return resources | [
"def",
"make_exporter_resources",
"(",
"nb_name",
",",
"out_folder",
",",
"images_folder",
"=",
"None",
")",
":",
"resources",
"=",
"defaultdict",
"(",
"str",
")",
"resources",
"[",
"'metadata'",
"]",
"=",
"defaultdict",
"(",
"str",
")",
"resources",
"[",
"'... | Creates resources dict for the exporter | [
"Creates",
"resources",
"dict",
"for",
"the",
"exporter"
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L415-L428 |
SamLau95/nbinteract | nbinteract/cli.py | convert | def convert(notebook_path, exporter, output_folder=None, images_folder=None):
"""
Converts notebook into an HTML file, outputting notebooks into
output_folder if set and images into images_folder if set.
Returns the path to the resulting HTML file.
"""
if output_folder:
os.makedirs(output_folder, exist_ok=True)
if images_folder:
os.makedirs(images_folder, exist_ok=True)
# Computes notebooks/ch1 and <name>.ipynb from notebooks/ch1/<name>.ipynb
path, filename = os.path.split(notebook_path)
# Computes <name> from <name>.ipynb
basename, _ = os.path.splitext(filename)
# Computes <name>.html from notebooks/<name>.ipynb
outfile_name = basename + '.html'
# If output_folder is not set, we default to the original folder of the
# notebook.
out_folder = path if not output_folder else output_folder
outfile_path = os.path.join(out_folder, outfile_name)
notebook = nbformat.read(notebook_path, as_version=4)
html, resources = exporter.from_notebook_node(
notebook,
resources=make_exporter_resources(basename, out_folder, images_folder),
)
# Write out HTML
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(html)
# Write out images. If images_folder wasn't specified, resources['outputs']
# is None so this loop won't run
for image_path, image_data in resources.get('outputs', {}).items():
with open(image_path, 'wb') as outimage:
outimage.write(image_data)
return outfile_path | python | def convert(notebook_path, exporter, output_folder=None, images_folder=None):
"""
Converts notebook into an HTML file, outputting notebooks into
output_folder if set and images into images_folder if set.
Returns the path to the resulting HTML file.
"""
if output_folder:
os.makedirs(output_folder, exist_ok=True)
if images_folder:
os.makedirs(images_folder, exist_ok=True)
# Computes notebooks/ch1 and <name>.ipynb from notebooks/ch1/<name>.ipynb
path, filename = os.path.split(notebook_path)
# Computes <name> from <name>.ipynb
basename, _ = os.path.splitext(filename)
# Computes <name>.html from notebooks/<name>.ipynb
outfile_name = basename + '.html'
# If output_folder is not set, we default to the original folder of the
# notebook.
out_folder = path if not output_folder else output_folder
outfile_path = os.path.join(out_folder, outfile_name)
notebook = nbformat.read(notebook_path, as_version=4)
html, resources = exporter.from_notebook_node(
notebook,
resources=make_exporter_resources(basename, out_folder, images_folder),
)
# Write out HTML
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(html)
# Write out images. If images_folder wasn't specified, resources['outputs']
# is None so this loop won't run
for image_path, image_data in resources.get('outputs', {}).items():
with open(image_path, 'wb') as outimage:
outimage.write(image_data)
return outfile_path | [
"def",
"convert",
"(",
"notebook_path",
",",
"exporter",
",",
"output_folder",
"=",
"None",
",",
"images_folder",
"=",
"None",
")",
":",
"if",
"output_folder",
":",
"os",
".",
"makedirs",
"(",
"output_folder",
",",
"exist_ok",
"=",
"True",
")",
"if",
"imag... | Converts notebook into an HTML file, outputting notebooks into
output_folder if set and images into images_folder if set.
Returns the path to the resulting HTML file. | [
"Converts",
"notebook",
"into",
"an",
"HTML",
"file",
"outputting",
"notebooks",
"into",
"output_folder",
"if",
"set",
"and",
"images",
"into",
"images_folder",
"if",
"set",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/nbinteract/cli.py#L431-L472 |
SamLau95/nbinteract | docs/convert_notebooks_to_html_partial.py | convert_notebooks_to_html_partial | def convert_notebooks_to_html_partial(notebook_paths, url_map):
"""
Converts notebooks in notebook_paths to HTML partials
"""
for notebook_path in notebook_paths:
# Computes <name>.ipynb from notebooks/01/<name>.ipynb
path, filename = os.path.split(notebook_path)
# Computes examples from notebooks/examples
chapter = os.path.split(path)[1] if os.sep in path else ''
# Computes <name> from <name>.ipynb
basename, _ = os.path.splitext(filename)
# Computes <name>.html from notebooks/<name>.ipynb
outfile_name = basename + '.html'
# This results in images like AB_5_1.png for a notebook called AB.ipynb
unique_image_key = basename
# This sets the img tag URL in the rendered HTML.
output_files_dir = '/' + NOTEBOOK_IMAGE_DIR
# Path to output final HTML file
outfile_path = os.path.join(chapter, outfile_name)
if chapter:
os.makedirs(chapter, exist_ok=True)
extract_output_config = {
'unique_key': unique_image_key,
'output_files_dir': output_files_dir,
}
notebook = nbformat.read(notebook_path, 4)
notebook.cells.insert(0, _preamble_cell(path))
html, resources = html_exporter.from_notebook_node(
notebook,
resources=extract_output_config,
)
if outfile_path not in url_map:
print(
'[Warning]: {} not found in _data/toc.yml. This page will '
'not appear in the textbook table of contents.'
.format(outfile_path)
)
prev_page = url_map.get(outfile_path, {}).get('prev', 'false')
next_page = url_map.get(outfile_path, {}).get('next', 'false')
final_output = wrapper.format(
html=html,
prev_page=prev_page,
next_page=next_page,
)
# Write out HTML
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(final_output)
# Write out images
for relative_path, image_data in resources['outputs'].items():
image_name = os.path.basename(relative_path)
final_image_path = os.path.join(NOTEBOOK_IMAGE_DIR, image_name)
with open(final_image_path, 'wb') as outimage:
outimage.write(image_data)
print(outfile_path + " written.") | python | def convert_notebooks_to_html_partial(notebook_paths, url_map):
"""
Converts notebooks in notebook_paths to HTML partials
"""
for notebook_path in notebook_paths:
# Computes <name>.ipynb from notebooks/01/<name>.ipynb
path, filename = os.path.split(notebook_path)
# Computes examples from notebooks/examples
chapter = os.path.split(path)[1] if os.sep in path else ''
# Computes <name> from <name>.ipynb
basename, _ = os.path.splitext(filename)
# Computes <name>.html from notebooks/<name>.ipynb
outfile_name = basename + '.html'
# This results in images like AB_5_1.png for a notebook called AB.ipynb
unique_image_key = basename
# This sets the img tag URL in the rendered HTML.
output_files_dir = '/' + NOTEBOOK_IMAGE_DIR
# Path to output final HTML file
outfile_path = os.path.join(chapter, outfile_name)
if chapter:
os.makedirs(chapter, exist_ok=True)
extract_output_config = {
'unique_key': unique_image_key,
'output_files_dir': output_files_dir,
}
notebook = nbformat.read(notebook_path, 4)
notebook.cells.insert(0, _preamble_cell(path))
html, resources = html_exporter.from_notebook_node(
notebook,
resources=extract_output_config,
)
if outfile_path not in url_map:
print(
'[Warning]: {} not found in _data/toc.yml. This page will '
'not appear in the textbook table of contents.'
.format(outfile_path)
)
prev_page = url_map.get(outfile_path, {}).get('prev', 'false')
next_page = url_map.get(outfile_path, {}).get('next', 'false')
final_output = wrapper.format(
html=html,
prev_page=prev_page,
next_page=next_page,
)
# Write out HTML
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(final_output)
# Write out images
for relative_path, image_data in resources['outputs'].items():
image_name = os.path.basename(relative_path)
final_image_path = os.path.join(NOTEBOOK_IMAGE_DIR, image_name)
with open(final_image_path, 'wb') as outimage:
outimage.write(image_data)
print(outfile_path + " written.") | [
"def",
"convert_notebooks_to_html_partial",
"(",
"notebook_paths",
",",
"url_map",
")",
":",
"for",
"notebook_path",
"in",
"notebook_paths",
":",
"# Computes <name>.ipynb from notebooks/01/<name>.ipynb",
"path",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"("... | Converts notebooks in notebook_paths to HTML partials | [
"Converts",
"notebooks",
"in",
"notebook_paths",
"to",
"HTML",
"partials"
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/docs/convert_notebooks_to_html_partial.py#L73-L135 |
SamLau95/nbinteract | docs/convert_notebooks_to_html_partial.py | _preamble_cell | def _preamble_cell(path):
"""
This cell is inserted at the start of each notebook to set the working
directory to the correct folder.
"""
code = dedent(
'''
# HIDDEN
# Clear previously defined variables
%reset -f
'''.format(path)
)
return nbformat.v4.new_code_cell(source=code) | python | def _preamble_cell(path):
"""
This cell is inserted at the start of each notebook to set the working
directory to the correct folder.
"""
code = dedent(
'''
# HIDDEN
# Clear previously defined variables
%reset -f
'''.format(path)
)
return nbformat.v4.new_code_cell(source=code) | [
"def",
"_preamble_cell",
"(",
"path",
")",
":",
"code",
"=",
"dedent",
"(",
"'''\n # HIDDEN\n # Clear previously defined variables\n %reset -f\n '''",
".",
"format",
"(",
"path",
")",
")",
"return",
"nbformat",
".",
"v4",
".",
"new_code_cell",
"(",
"source... | This cell is inserted at the start of each notebook to set the working
directory to the correct folder. | [
"This",
"cell",
"is",
"inserted",
"at",
"the",
"start",
"of",
"each",
"notebook",
"to",
"set",
"the",
"working",
"directory",
"to",
"the",
"correct",
"folder",
"."
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/docs/convert_notebooks_to_html_partial.py#L138-L150 |
SamLau95/nbinteract | docs/convert_notebooks_to_html_partial.py | generate_url_map | def generate_url_map(yaml_path=TOC_PATH) -> dict:
"""
Generates mapping from each URL to its previous and next URLs in the
textbook. The dictionary looks like:
{
'ch/10/some_page.html' : {
'prev': 'ch/09/foo.html',
'next': 'ch/10/bar.html',
},
...
}
"""
with open(yaml_path) as f:
data = yaml.load(f)
pipeline = [
t.remove(_not_internal_link),
flatmap(_flatten_sections),
t.map(t.get('url')), list, _sliding_three,
t.map(_adj_pages),
t.merge()
]
return t.pipe(data, *pipeline) | python | def generate_url_map(yaml_path=TOC_PATH) -> dict:
"""
Generates mapping from each URL to its previous and next URLs in the
textbook. The dictionary looks like:
{
'ch/10/some_page.html' : {
'prev': 'ch/09/foo.html',
'next': 'ch/10/bar.html',
},
...
}
"""
with open(yaml_path) as f:
data = yaml.load(f)
pipeline = [
t.remove(_not_internal_link),
flatmap(_flatten_sections),
t.map(t.get('url')), list, _sliding_three,
t.map(_adj_pages),
t.merge()
]
return t.pipe(data, *pipeline) | [
"def",
"generate_url_map",
"(",
"yaml_path",
"=",
"TOC_PATH",
")",
"->",
"dict",
":",
"with",
"open",
"(",
"yaml_path",
")",
"as",
"f",
":",
"data",
"=",
"yaml",
".",
"load",
"(",
"f",
")",
"pipeline",
"=",
"[",
"t",
".",
"remove",
"(",
"_not_interna... | Generates mapping from each URL to its previous and next URLs in the
textbook. The dictionary looks like:
{
'ch/10/some_page.html' : {
'prev': 'ch/09/foo.html',
'next': 'ch/10/bar.html',
},
...
} | [
"Generates",
"mapping",
"from",
"each",
"URL",
"to",
"its",
"previous",
"and",
"next",
"URLs",
"in",
"the",
"textbook",
".",
"The",
"dictionary",
"looks",
"like",
":"
] | train | https://github.com/SamLau95/nbinteract/blob/9f346452283831aad3f4416c04879f1d187ec3b7/docs/convert_notebooks_to_html_partial.py#L197-L220 |
kylef/refract.py | refract/registry.py | Registry.find_element_class | def find_element_class(self, element_name):
"""
Finds an element class for the given element name contained within the
registry.
Returns Element when there is no matching element subclass.
>>> registry.find_element_class('string')
String
>>> registry.find_element_class('unknown')
Element
"""
for element in self.elements:
if element.element == element_name:
return element
return Element | python | def find_element_class(self, element_name):
"""
Finds an element class for the given element name contained within the
registry.
Returns Element when there is no matching element subclass.
>>> registry.find_element_class('string')
String
>>> registry.find_element_class('unknown')
Element
"""
for element in self.elements:
if element.element == element_name:
return element
return Element | [
"def",
"find_element_class",
"(",
"self",
",",
"element_name",
")",
":",
"for",
"element",
"in",
"self",
".",
"elements",
":",
"if",
"element",
".",
"element",
"==",
"element_name",
":",
"return",
"element",
"return",
"Element"
] | Finds an element class for the given element name contained within the
registry.
Returns Element when there is no matching element subclass.
>>> registry.find_element_class('string')
String
>>> registry.find_element_class('unknown')
Element | [
"Finds",
"an",
"element",
"class",
"for",
"the",
"given",
"element",
"name",
"contained",
"within",
"the",
"registry",
"."
] | train | https://github.com/kylef/refract.py/blob/f58ddf619038b580ab50c2e7f867d59d153eabbb/refract/registry.py#L39-L57 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | _transform | def _transform(transformer_chain: Sequence[Tuple[DataTransformer, Type]], data: S, context: PipelineContext = None) -> T:
"""Transform data to a new type.
Args:
transformer_chain: A sequence of (transformer, type) pairs to convert the data.
data: The data to be transformed.
context: The context of the transformations (mutable).
Returns:
The transformed data.
"""
for transformer, target_type in transformer_chain:
# noinspection PyTypeChecker
data = transformer.transform(target_type, data, context)
return data | python | def _transform(transformer_chain: Sequence[Tuple[DataTransformer, Type]], data: S, context: PipelineContext = None) -> T:
"""Transform data to a new type.
Args:
transformer_chain: A sequence of (transformer, type) pairs to convert the data.
data: The data to be transformed.
context: The context of the transformations (mutable).
Returns:
The transformed data.
"""
for transformer, target_type in transformer_chain:
# noinspection PyTypeChecker
data = transformer.transform(target_type, data, context)
return data | [
"def",
"_transform",
"(",
"transformer_chain",
":",
"Sequence",
"[",
"Tuple",
"[",
"DataTransformer",
",",
"Type",
"]",
"]",
",",
"data",
":",
"S",
",",
"context",
":",
"PipelineContext",
"=",
"None",
")",
"->",
"T",
":",
"for",
"transformer",
",",
"targ... | Transform data to a new type.
Args:
transformer_chain: A sequence of (transformer, type) pairs to convert the data.
data: The data to be transformed.
context: The context of the transformations (mutable).
Returns:
The transformed data. | [
"Transform",
"data",
"to",
"a",
"new",
"type",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L100-L114 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | _SinkHandler.put | def put(self, item: T, context: PipelineContext = None) -> None:
"""Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
item: The objects to be inserted into the data sink.
context: The context of the insertion (mutable).
"""
LOGGER.info("Converting item \"{item}\" for sink \"{sink}\"".format(item=item, sink=self._sink))
item = self._transform(data=item, context=context)
LOGGER.info("Puting item \"{item}\" into sink \"{sink}\"".format(item=item, sink=self._sink))
self._sink.put(self._store_type, item, context) | python | def put(self, item: T, context: PipelineContext = None) -> None:
"""Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
item: The objects to be inserted into the data sink.
context: The context of the insertion (mutable).
"""
LOGGER.info("Converting item \"{item}\" for sink \"{sink}\"".format(item=item, sink=self._sink))
item = self._transform(data=item, context=context)
LOGGER.info("Puting item \"{item}\" into sink \"{sink}\"".format(item=item, sink=self._sink))
self._sink.put(self._store_type, item, context) | [
"def",
"put",
"(",
"self",
",",
"item",
":",
"T",
",",
"context",
":",
"PipelineContext",
"=",
"None",
")",
"->",
"None",
":",
"LOGGER",
".",
"info",
"(",
"\"Converting item \\\"{item}\\\" for sink \\\"{sink}\\\"\"",
".",
"format",
"(",
"item",
"=",
"item",
... | Puts an objects into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
item: The objects to be inserted into the data sink.
context: The context of the insertion (mutable). | [
"Puts",
"an",
"objects",
"into",
"the",
"data",
"sink",
".",
"The",
"objects",
"may",
"be",
"transformed",
"into",
"a",
"new",
"type",
"for",
"insertion",
"if",
"necessary",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L130-L140 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | _SinkHandler.put_many | def put_many(self, items: Iterable[T], context: PipelineContext = None) -> None:
"""Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data sink.
context: The context of the insertions (mutable).
"""
LOGGER.info("Creating transform generator for items \"{items}\" for sink \"{sink}\"".format(items=items, sink=self._sink))
transform_generator = (self._transform(data=item, context=context) for item in items)
LOGGER.info("Putting transform generator for items \"{items}\" into sink \"{sink}\"".format(items=items, sink=self._sink))
self._sink.put_many(self._store_type, transform_generator, context) | python | def put_many(self, items: Iterable[T], context: PipelineContext = None) -> None:
"""Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data sink.
context: The context of the insertions (mutable).
"""
LOGGER.info("Creating transform generator for items \"{items}\" for sink \"{sink}\"".format(items=items, sink=self._sink))
transform_generator = (self._transform(data=item, context=context) for item in items)
LOGGER.info("Putting transform generator for items \"{items}\" into sink \"{sink}\"".format(items=items, sink=self._sink))
self._sink.put_many(self._store_type, transform_generator, context) | [
"def",
"put_many",
"(",
"self",
",",
"items",
":",
"Iterable",
"[",
"T",
"]",
",",
"context",
":",
"PipelineContext",
"=",
"None",
")",
"->",
"None",
":",
"LOGGER",
".",
"info",
"(",
"\"Creating transform generator for items \\\"{items}\\\" for sink \\\"{sink}\\\"\"... | Puts multiple objects of the same type into the data sink. The objects may be transformed into a new type for insertion if necessary.
Args:
items: An iterable (e.g. list) of objects to be inserted into the data sink.
context: The context of the insertions (mutable). | [
"Puts",
"multiple",
"objects",
"of",
"the",
"same",
"type",
"into",
"the",
"data",
"sink",
".",
"The",
"objects",
"may",
"be",
"transformed",
"into",
"a",
"new",
"type",
"for",
"insertion",
"if",
"necessary",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L142-L152 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | _SourceHandler.get | def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T:
"""Gets a query from the data source.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
"""
result = self._source.get(self._source_type, deepcopy(query), context)
LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put(result, context)
LOGGER.info("Converting result \"{result}\" to request type".format(result=result))
result = self._transform(data=result, context=context)
LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put(result, context)
return result | python | def get(self, query: Mapping[str, Any], context: PipelineContext = None) -> T:
"""Gets a query from the data source.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
"""
result = self._source.get(self._source_type, deepcopy(query), context)
LOGGER.info("Got result \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
LOGGER.info("Sending result \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put(result, context)
LOGGER.info("Converting result \"{result}\" to request type".format(result=result))
result = self._transform(data=result, context=context)
LOGGER.info("Sending result \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put(result, context)
return result | [
"def",
"get",
"(",
"self",
",",
"query",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"context",
":",
"PipelineContext",
"=",
"None",
")",
"->",
"T",
":",
"result",
"=",
"self",
".",
"_source",
".",
"get",
"(",
"self",
".",
"_source_type",
","... | Gets a query from the data source.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object. | [
"Gets",
"a",
"query",
"from",
"the",
"data",
"source",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L170-L199 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | _SourceHandler.get_many | def get_many(self, query: Mapping[str, Any], context: PipelineContext = None, streaming: bool = False) -> Iterable[T]:
"""Gets a query from the data source, where the query contains multiple elements to be extracted.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
"""
result = self._source.get_many(self._source_type, deepcopy(query), context)
LOGGER.info("Got results \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
if not streaming:
LOGGER.info("Non-streaming get_many request. Ensuring results \"{result}\" are a Iterable".format(result=result))
result = list(result)
LOGGER.info("Sending results \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put_many(result, context)
LOGGER.info("Converting results \"{result}\" to request type".format(result=result))
result = [self._transform(data=item, context=context) for item in result]
LOGGER.info("Sending results \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put_many(result, context)
return result
else:
LOGGER.info("Streaming get_many request. Returning result generator for results \"{result}\"".format(result=result))
return self._get_many_generator(result) | python | def get_many(self, query: Mapping[str, Any], context: PipelineContext = None, streaming: bool = False) -> Iterable[T]:
"""Gets a query from the data source, where the query contains multiple elements to be extracted.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
"""
result = self._source.get_many(self._source_type, deepcopy(query), context)
LOGGER.info("Got results \"{result}\" from query \"{query}\" of source \"{source}\"".format(result=result, query=query, source=self._source))
if not streaming:
LOGGER.info("Non-streaming get_many request. Ensuring results \"{result}\" are a Iterable".format(result=result))
result = list(result)
LOGGER.info("Sending results \"{result}\" to sinks before converting".format(result=result))
for sink in self._before_transform:
sink.put_many(result, context)
LOGGER.info("Converting results \"{result}\" to request type".format(result=result))
result = [self._transform(data=item, context=context) for item in result]
LOGGER.info("Sending results \"{result}\" to sinks after converting".format(result=result))
for sink in self._after_transform:
sink.put_many(result, context)
return result
else:
LOGGER.info("Streaming get_many request. Returning result generator for results \"{result}\"".format(result=result))
return self._get_many_generator(result) | [
"def",
"get_many",
"(",
"self",
",",
"query",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"context",
":",
"PipelineContext",
"=",
"None",
",",
"streaming",
":",
"bool",
"=",
"False",
")",
"->",
"Iterable",
"[",
"T",
"]",
":",
"result",
"=",
"... | Gets a query from the data source, where the query contains multiple elements to be extracted.
1) Extracts the query from the data source.
2) Inserts the result into any data sinks.
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True. | [
"Gets",
"a",
"query",
"from",
"the",
"data",
"source",
"where",
"the",
"query",
"contains",
"multiple",
"elements",
"to",
"be",
"extracted",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L216-L253 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | DataPipeline.get | def get(self, type: Type[T], query: Mapping[str, Any]) -> T:
"""Gets a query from the data pipeline.
1) Extracts the query the sequence of data sources.
2) Inserts the result into the data sinks (if appropriate).
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
"""
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get(query, context)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!") | python | def get(self, type: Type[T], query: Mapping[str, Any]) -> T:
"""Gets a query from the data pipeline.
1) Extracts the query the sequence of data sources.
2) Inserts the result into the data sinks (if appropriate).
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object.
"""
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get(query, context)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!") | [
"def",
"get",
"(",
"self",
",",
"type",
":",
"Type",
"[",
"T",
"]",
",",
"query",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"T",
":",
"LOGGER",
".",
"info",
"(",
"\"Getting SourceHandlers for \\\"{type}\\\"\"",
".",
"format",
"(",
"type",... | Gets a query from the data pipeline.
1) Extracts the query the sequence of data sources.
2) Inserts the result into the data sinks (if appropriate).
3) Transforms the result into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested.
context: The context for the extraction (mutable).
Returns:
The requested object. | [
"Gets",
"a",
"query",
"from",
"the",
"data",
"pipeline",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L424-L463 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | DataPipeline.get_many | def get_many(self, type: Type[T], query: Mapping[str, Any], streaming: bool = False) -> Iterable[T]:
"""Gets a query from the data pipeline, which contains a request for multiple objects.
1) Extracts the query the sequence of data sources.
2) Inserts the results into the data sinks (if appropriate).
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
"""
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get_many(query, context, streaming)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!") | python | def get_many(self, type: Type[T], query: Mapping[str, Any], streaming: bool = False) -> Iterable[T]:
"""Gets a query from the data pipeline, which contains a request for multiple objects.
1) Extracts the query the sequence of data sources.
2) Inserts the results into the data sinks (if appropriate).
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True.
"""
LOGGER.info("Getting SourceHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._get_types[type]
except KeyError:
try:
LOGGER.info("Building new SourceHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._get_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
if handlers is None:
raise NoConversionError("No source can provide \"{type}\"".format(type=type.__name__))
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Querying SourceHandlers for \"{type}\"".format(type=type.__name__))
for handler in handlers:
try:
return handler.get_many(query, context, streaming)
except NotFoundError:
pass
raise NotFoundError("No source returned a query result!") | [
"def",
"get_many",
"(",
"self",
",",
"type",
":",
"Type",
"[",
"T",
"]",
",",
"query",
":",
"Mapping",
"[",
"str",
",",
"Any",
"]",
",",
"streaming",
":",
"bool",
"=",
"False",
")",
"->",
"Iterable",
"[",
"T",
"]",
":",
"LOGGER",
".",
"info",
"... | Gets a query from the data pipeline, which contains a request for multiple objects.
1) Extracts the query the sequence of data sources.
2) Inserts the results into the data sinks (if appropriate).
3) Transforms the results into the requested type if it wasn't already.
4) Inserts the transformed result into any data sinks.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
streaming: Specifies whether the results should be returned as a generator (default False).
Returns:
The requested objects or a generator of the objects if streaming is True. | [
"Gets",
"a",
"query",
"from",
"the",
"data",
"pipeline",
"which",
"contains",
"a",
"request",
"for",
"multiple",
"objects",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L465-L505 |
meraki-analytics/datapipelines-python | datapipelines/pipelines.py | DataPipeline.put | def put(self, type: Type[T], item: T) -> None:
"""Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary.
Args:
item: The object to be inserted into the data pipeline.
"""
LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._put_types[type]
except KeyError:
try:
LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._put_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Sending item \"{item}\" to SourceHandlers".format(item=item))
if handlers is not None:
for handler in handlers:
handler.put(item, context) | python | def put(self, type: Type[T], item: T) -> None:
"""Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary.
Args:
item: The object to be inserted into the data pipeline.
"""
LOGGER.info("Getting SinkHandlers for \"{type}\"".format(type=type.__name__))
try:
handlers = self._put_types[type]
except KeyError:
try:
LOGGER.info("Building new SinkHandlers for \"{type}\"".format(type=type.__name__))
handlers = self._put_handlers(type)
except NoConversionError:
handlers = None
self._get_types[type] = handlers
LOGGER.info("Creating new PipelineContext")
context = self._new_context()
LOGGER.info("Sending item \"{item}\" to SourceHandlers".format(item=item))
if handlers is not None:
for handler in handlers:
handler.put(item, context) | [
"def",
"put",
"(",
"self",
",",
"type",
":",
"Type",
"[",
"T",
"]",
",",
"item",
":",
"T",
")",
"->",
"None",
":",
"LOGGER",
".",
"info",
"(",
"\"Getting SinkHandlers for \\\"{type}\\\"\"",
".",
"format",
"(",
"type",
"=",
"type",
".",
"__name__",
")",... | Puts an objects into the data pipeline. The object may be transformed into a new type for insertion if necessary.
Args:
item: The object to be inserted into the data pipeline. | [
"Puts",
"an",
"objects",
"into",
"the",
"data",
"pipeline",
".",
"The",
"object",
"may",
"be",
"transformed",
"into",
"a",
"new",
"type",
"for",
"insertion",
"if",
"necessary",
"."
] | train | https://github.com/meraki-analytics/datapipelines-python/blob/dc38d7976a012039a15d67cd8b07ae77eb1e4a4c/datapipelines/pipelines.py#L507-L530 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.