code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def get_es_requirements(es_version):
'''Get the requirements string for elasticsearch-py library
Returns a suitable requirements string for the elsaticsearch-py library
according to the elasticsearch version to be supported (es_version)'''
# accepts version range in the form `2.x`
es_version = es_... | Get the requirements string for elasticsearch-py library
Returns a suitable requirements string for the elsaticsearch-py library
according to the elasticsearch version to be supported (es_version) |
def help_center_article_subscriptions(self, article_id, locale=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-article-subscriptions"
api_path = "/api/v2/help_center/articles/{article_id}/subscriptions.json"
api_path = api_path.format(article_id=artic... | https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#list-article-subscriptions |
def on_create_view(self):
""" Trigger the click
"""
d = self.declaration
changed = not d.condition
if changed:
d.condition = True
view = self.get_view()
if changed:
self.ready.set_result(True)
return view | Trigger the click |
def required_fields(self):
"""The normal required fields (eg, no magic fields like _id are included)"""
return {f:v for f, v in self.normal_fields.items() if v.required} | The normal required fields (eg, no magic fields like _id are included) |
def updateVocalAuto(self, component, files):
"""Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-p... | Updates the auto-parameter with selected *component* to have
*files*. Adds auto-parameter if not already present. The auto-parameter is expected to have only one selected
component (the one given). If length of files < 1, removes the
auto-parameter from the model.
:param component: Comp... |
def parse_annotation(code):
"""Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@')
"""
module = ast.parse(code)
assert type(module) is ast.Module, 'internal error #1'
assert len(module.body) == 1, 'Annotation contains more than one expression'
ass... | Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@') |
def value_validate(self, value):
"""
Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this.
"""
if not isinst... | Converts the input single value into the expected Python data type,
raising django.core.exceptions.ValidationError if the data can't be
converted. Returns the converted value. Subclasses should override
this. |
def _write(
df,
filename=None,
schema='newick',
taxon_col='uid',
taxon_annotations=[],
node_col='uid',
node_annotations=[],
branch_lengths=True,
**kwargs
):
"""Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
Dat... | Write a phylopandas tree DataFrame to various formats.
Parameters
----------
df : DataFrame
DataFrame containing tree data.
filename : str
filepath to write out tree. If None, will return string.
schema : str
tree format to write out.
taxon_col : str (optional)
... |
def remove(self, items, working_tree=False, **kwargs):
"""Remove the given items from the index and optionally from
the working tree as well.
:param items:
Multiple types of items are supported which may be be freely mixed.
- path string
Remove the given... | Remove the given items from the index and optionally from
the working tree as well.
:param items:
Multiple types of items are supported which may be be freely mixed.
- path string
Remove the given path at all stages. If it is a directory, you must
... |
def _compute_total_chunks(self, chunk_size):
# type: (Descriptor, int) -> int
"""Compute total number of chunks for entity
:param Descriptor self: this
:param int chunk_size: chunk size
:rtype: int
:return: num chunks
"""
try:
if self._src_bloc... | Compute total number of chunks for entity
:param Descriptor self: this
:param int chunk_size: chunk size
:rtype: int
:return: num chunks |
def persist(self):
"""
Banana banana
"""
if self.app.dry:
return
for proj in self.subprojects.values():
proj.persist() | Banana banana |
def locate(cls):
"""Locates the active PEX bootstrap.
:rtype: :class:`Bootstrap`
"""
if cls._INSTANCE is None:
bootstrap_path = __file__
module_import_path = __name__.split('.')
# For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import
# path... | Locates the active PEX bootstrap.
:rtype: :class:`Bootstrap` |
def auto_track_url(track):
"""
Automatically sets the bigDataUrl for `track`.
Requirements:
* the track must be fully connected, such that its root is a Hub object
* the root Hub object must have the Hub.url attribute set
* the track must have the `source` attribute set
"""
... | Automatically sets the bigDataUrl for `track`.
Requirements:
* the track must be fully connected, such that its root is a Hub object
* the root Hub object must have the Hub.url attribute set
* the track must have the `source` attribute set |
def btc_tx_witness_strip( tx_serialized ):
"""
Strip the witness information from a serialized transaction
"""
if not btc_tx_is_segwit(tx_serialized):
# already strippped
return tx_serialized
tx = btc_tx_deserialize(tx_serialized)
for inp in tx['ins']:
del inp['witn... | Strip the witness information from a serialized transaction |
def delete(self, *args, **kwargs):
"""
This method implements retries for object deletion.
"""
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).delete(*args, **kwargs)
except django.db.utils.OperationalError:
... | This method implements retries for object deletion. |
def unquoted(self):
"""
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
key = s... | Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword. |
def mutate(self,p_i,func_set,term_set): #, max_depth=2
"""point mutation, addition, removal"""
self.point_mutate(p_i,func_set,term_set) | point mutation, addition, removal |
def createGroups(self, configFiles, dateTimeFormat=None):
"""Parses a JSON configuration file to create groups.
Args:
configFiles (list): A list of JSON files on disk containing
configuration data for creating groups.
dateTimeFormat (str): A valid date formatting... | Parses a JSON configuration file to create groups.
Args:
configFiles (list): A list of JSON files on disk containing
configuration data for creating groups.
dateTimeFormat (str): A valid date formatting directive, as understood
by :py:meth:`datetime.datet... |
def get_char_weights(doc_weighted_spans, preserve_density=None):
# type: (DocWeightedSpans, Optional[bool]) -> np.ndarray
""" Return character weights for a text document with highlighted features.
If preserve_density is True, then color for longer fragments will be
less intensive than for shorter fragm... | Return character weights for a text document with highlighted features.
If preserve_density is True, then color for longer fragments will be
less intensive than for shorter fragments, so that "sum" of intensities
will correspond to feature weight.
If preserve_density is None, then it's value is taken fr... |
def interactive(plugin):
'''A run mode for the CLI that runs the plugin in a loop based on user
input.
'''
items = [item for item in once(plugin) if not item.get_played()]
parent_stack = [] # Keep track of parents so we can have a '..' option
selected_item = get_user_choice(items)
while se... | A run mode for the CLI that runs the plugin in a loop based on user
input. |
def get_kubernetes_configuration(self, mount_point='kubernetes'):
"""GET /auth/<mount_point>/config
:param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes".
:type mount_point: str.
:return: Parsed JSON response from the config GET req... | GET /auth/<mount_point>/config
:param mount_point: The "path" the k8s auth backend was mounted on. Vault currently defaults to "kubernetes".
:type mount_point: str.
:return: Parsed JSON response from the config GET request
:rtype: dict. |
def setQuickColor( self, color ):
"""
Sets the quick color for the palette to the given color.
:param color | <QColor>
"""
colorset = XPaletteColorSet()
colorset.setPalette(QPalette(color))
self.setColorSet(colorset) | Sets the quick color for the palette to the given color.
:param color | <QColor> |
def batch_write_input(structures, vasp_input_set=MPRelaxSet, output_dir=".",
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format out... | Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet class that creates
vasp input files from structures. Not... |
def parse_config(self, device=None, profile=None, native=None, attrs=None):
"""
Parse native configuration and load it into the corresponding models. Only models
that have been added to the root object will be parsed.
If ``native`` is passed to the method that's what we will parse, othe... | Parse native configuration and load it into the corresponding models. Only models
that have been added to the root object will be parsed.
If ``native`` is passed to the method that's what we will parse, otherwise, we will use the
``device`` to retrieve it.
Args:
device (Net... |
def read_node(self, name, **kwargs): # noqa: E501
"""read_node # noqa: E501
read the specified Node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_node(name, async_re... | read_node # noqa: E501
read the specified Node # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_node(name, async_req=True)
>>> result = thread.get()
:param asyn... |
def list_database_names(self, session=None):
"""Get a list of the names of all databases on the connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionadded:: 3.6
"""
return [doc["name"]
... | Get a list of the names of all databases on the connected server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionadded:: 3.6 |
def emitRecordMiddleClicked(self, item):
"""
Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem>
"""
# emit that the record has been double clicked
if isinstance(... | Emits the record clicked signal for the given item, provided the
signals are not currently blocked.
:param item | <QTreeWidgetItem> |
def toggle_autojump():
"""Toggles Autojump"""
if not autojump_enabled():
with open(AUTOJUMP_FILE, 'w+') as ajfile:
ajfile.write("enabled")
else:
os.remove(AUTOJUMP_FILE) | Toggles Autojump |
def all(self, audience=None, page=None, per_page=None, include_totals=False, client_id=None):
"""Retrieves all client grants.
Args:
audience (str, optional): URL encoded audience of a Resource Server
to filter
page (int, optional): The result's page number (zero... | Retrieves all client grants.
Args:
audience (str, optional): URL encoded audience of a Resource Server
to filter
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_tota... |
def visit_attribute(self, node):
"""check that the accessed attribute exists
to avoid too much false positives for now, we'll consider the code as
correct if a single of the inferred nodes has the accessed attribute.
function/method, super call and metaclasses are ignored
"""
... | check that the accessed attribute exists
to avoid too much false positives for now, we'll consider the code as
correct if a single of the inferred nodes has the accessed attribute.
function/method, super call and metaclasses are ignored |
def add_field(self, field_instance_or_string):
"""
Appends a field, can be a :class:`~es_fluent.fields.Field` or string.
"""
if isinstance(field_instance_or_string, basestring):
field_instance = Field(field_instance_or_string)
elif isinstance(field_instance_or_string,... | Appends a field, can be a :class:`~es_fluent.fields.Field` or string. |
def camelize(word):
"""Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
"""
return ''.join(w[0].upper() + w[1:]
for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' ')) | Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string. |
def _is_instance(type_to_check, element, condition="any", deep=False):
"""
-----
Brief
-----
Function that verifies when "all" or "any" elements of the list "element" have the type
specified in "type_to_check" input.
-----------
Description
-----------
In some biosignalsnotebook... | -----
Brief
-----
Function that verifies when "all" or "any" elements of the list "element" have the type
specified in "type_to_check" input.
-----------
Description
-----------
In some biosignalsnotebooks functions their implementation is extremely dependent on a specific
criterion... |
def lookup_field_class(self, field, obj=None, default=None):
"""
Looks up any additional class we should include when rendering this field
"""
css = ""
# is there a class specified for this field
if field in self.field_config and 'class' in self.field_config[field]:
... | Looks up any additional class we should include when rendering this field |
def validateOneElement(self, doc, elem):
"""Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
fo... | Try to validate a single element and it's attributes,
basically it does the following checks as described by the
XML-1.0 recommendation: - [ VC: Element Valid ] - [ VC:
Required Attribute ] Then call xmlValidateOneAttribute()
for each attribute present. The ID/IDREF checkings ar... |
def set_tempo(self, bpm):
"""Convert the bpm to a midi event and write it to the track_data."""
self.bpm = bpm
self.track_data += self.set_tempo_event(self.bpm) | Convert the bpm to a midi event and write it to the track_data. |
def check_pre_approval_notification(self, code):
""" check a notification by its code """
response = self.get(
url=self.config.PRE_APPROVAL_NOTIFICATION_URL % code)
return PagSeguroPreApprovalNotificationResponse(
response.content, self.config) | check a notification by its code |
def _CheckStorageFile(self, storage_file_path): # pylint: disable=arguments-differ
"""Checks if the storage file path is valid.
Args:
storage_file_path (str): path of the storage file.
Raises:
BadConfigOption: if the storage file path is invalid.
"""
if os.path.exists(storage_file_pat... | Checks if the storage file path is valid.
Args:
storage_file_path (str): path of the storage file.
Raises:
BadConfigOption: if the storage file path is invalid. |
def onPublish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
'''
log.debug("msg={payload}", payload=payload) | Callback Receiving messages from publisher |
def parse_stdout(self, filelike):
"""Parse the formulae from the content written by the script to standard out.
:param filelike: filelike object of stdout
:returns: an exit code in case of an error, None otherwise
"""
from aiida.orm import Dict
formulae = {}
con... | Parse the formulae from the content written by the script to standard out.
:param filelike: filelike object of stdout
:returns: an exit code in case of an error, None otherwise |
def audit_1_15(self):
"""1.15 Ensure IAM policies are attached only to groups or roles (Scored)"""
for policy in resources.iam.policies.all():
self.assertEqual(len(list(policy.attached_users.all())), 0, "{} has users attached to it".format(policy)) | 1.15 Ensure IAM policies are attached only to groups or roles (Scored) |
def _integrate_plugins():
"""Integrate plugins to the context"""
import sys
from airflow.plugins_manager import macros_modules
for macros_module in macros_modules:
sys.modules[macros_module.__name__] = macros_module
globals()[macros_module._name] = macros_module | Integrate plugins to the context |
def expect_file_line_regex_match_count_to_be_between(self,
regex,
expected_min_count=0,
expected_max_count=None,
... | Expect the number of times a regular expression appears on each line of
a file to be between a maximum and minimum value.
Args:
regex: \
A string that can be compiled as valid regular expression to match
expected_min_count (None or nonnegative integer): \
... |
def addGenotype(
self, genotype_id, genotype_label,
genotype_type=None,
genotype_description=None
):
"""
If a genotype_type is not supplied,
we will default to 'intrinsic_genotype'
:param genotype_id:
:param genotype_label:
:param g... | If a genotype_type is not supplied,
we will default to 'intrinsic_genotype'
:param genotype_id:
:param genotype_label:
:param genotype_type:
:param genotype_description:
:return: |
def viewinfo(self, postinfo):
'''
View the info
'''
out_json = {
'uid': postinfo.uid,
'time_update': postinfo.time_update,
'title': postinfo.title,
'cnt_html': tornado.escape.xhtml_unescape(postinfo.cnt_html),
}
self.write(... | View the info |
def get_random(self):
"""
Returns a random statement from the database.
"""
import random
Statement = self.get_model('statement')
session = self.Session()
count = self.count()
if count < 1:
raise self.EmptyDatabaseException()
random_... | Returns a random statement from the database. |
def calc_el_lz_v1(self):
"""Calculate lake evaporation.
Required control parameters:
|NmbZones|
|ZoneType|
|TTIce|
Required derived parameters:
|RelZoneArea|
Required fluxes sequences:
|TC|
|EPC|
Updated state sequence:
|LZ|
Basic equa... | Calculate lake evaporation.
Required control parameters:
|NmbZones|
|ZoneType|
|TTIce|
Required derived parameters:
|RelZoneArea|
Required fluxes sequences:
|TC|
|EPC|
Updated state sequence:
|LZ|
Basic equations:
:math:`\\frac{dLZ... |
def _grabix_index(data):
"""Create grabix index of bgzip input file.
grabix does not allow specification of output file, so symlink the original
file into a transactional directory.
"""
in_file = data["bgzip_file"]
config = data["config"]
grabix = config_utils.get_program("grabix", config)
... | Create grabix index of bgzip input file.
grabix does not allow specification of output file, so symlink the original
file into a transactional directory. |
def from_proto(cls, repeated_split_infos):
"""Returns a new SplitDict initialized from the `repeated_split_infos`."""
split_dict = cls()
for split_info_proto in repeated_split_infos:
split_info = SplitInfo()
split_info.CopyFrom(split_info_proto)
split_dict.add(split_info)
return split_... | Returns a new SplitDict initialized from the `repeated_split_infos`. |
def mark_clean(self, entity):
"""
Marks the given entity as CLEAN.
This is done when an entity is loaded fresh from the repository or
after a commit.
"""
state = EntityState.get_state(entity)
state.status = ENTITY_STATUS.CLEAN
state.is_persisted = True | Marks the given entity as CLEAN.
This is done when an entity is loaded fresh from the repository or
after a commit. |
def get_smtp_mail(self):
"""
Returns the SMTP formatted email, as it may be passed to sendmail.
:rtype: string
:return: The SMTP formatted mail.
"""
header = self.get_smtp_header()
body = self.get_body().replace('\n', '\r\n')
return header + '\r\n' + bod... | Returns the SMTP formatted email, as it may be passed to sendmail.
:rtype: string
:return: The SMTP formatted mail. |
def _all_tag(self):
"""Return the all tag of the Glances/Docker configuration file.
# By default, Glances only display running containers
# Set the following key to True to display all containers
all=True
"""
all_tag = self.get_conf_value('all')
if len(all_tag) =... | Return the all tag of the Glances/Docker configuration file.
# By default, Glances only display running containers
# Set the following key to True to display all containers
all=True |
def nifti_copy(filename,prefix=None,gzip=True):
''' creates a ``.nii`` copy of the given dataset and returns the filename as a string'''
# I know, my argument ``prefix`` clobbers the global method... but it makes my arguments look nice and clean
if prefix==None:
prefix = filename
nifti_filename ... | creates a ``.nii`` copy of the given dataset and returns the filename as a string |
def memory_write32(self, addr, data, zone=None):
"""Writes words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of words to write
zone (str): optional memory zone to access
... | Writes words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of words to write
zone (str): optional memory zone to access
Returns:
Number of words written to target.
... |
def clean(self):
""" Check user has cookies enabled
"""
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError("Cookies must be enabled.")
return self.cleaned_data | Check user has cookies enabled |
def count(self):
"""
Count the number of distinct results of the wrapped query.
@return: an L{int} representing the number of distinct results.
"""
if not self.query.store.autocommit:
self.query.store.checkpoint()
target = ', '.join([
tableClass.s... | Count the number of distinct results of the wrapped query.
@return: an L{int} representing the number of distinct results. |
def GetBatchJobHelper(self, version=sorted(_SERVICE_MAP.keys())[-1],
server=None):
"""Returns a BatchJobHelper to work with the BatchJobService.
This is a convenience method. It is functionally identical to calling
BatchJobHelper(adwords_client, version).
Args:
[optio... | Returns a BatchJobHelper to work with the BatchJobService.
This is a convenience method. It is functionally identical to calling
BatchJobHelper(adwords_client, version).
Args:
[optional]
version: A string identifying the AdWords version to connect to. This
defaults to what is cur... |
def set_palette_name(self, palette_name):
"""If the given palette matches an existing one, shows it in the
combobox
"""
combo = self.get_widget('palette_name')
found = False
log.debug("wanting palette: %r", palette_name)
for i in combo.get_model():
if ... | If the given palette matches an existing one, shows it in the
combobox |
def _domain_differs(self, href):
""" Check that a link is not on the same domain as the source URL """
target = utils.get_domain(href)
if not target:
return False
origin = utils.get_domain(self.url)
return target != origin | Check that a link is not on the same domain as the source URL |
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor,
gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5):
""" Vel factory function """
return TrpoPolicyGradient(
max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy... | Vel factory function |
def pipe_xpathfetchpage(context=None, _INPUT=None, conf=None, **kwargs):
"""A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object cont... | A source that fetches the content of a given website as DOM nodes or a
string. Loopable.
context : pipe2py.Context object
_INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL -- url object contain the URL to download
xpath -- xpath to extract
html5 -- use htm... |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'location') and self.location is not None:
_dict['location'] = self.location._to_dict()
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self... | Return a json dictionary representing this model. |
def get_parent_aligned_annotation(self, ref_id):
"""" Give the aligment annotation that a reference annotation belongs to directly, or indirectly through other
reference annotations.
:param str ref_id: Id of a reference annotation.
:raises KeyError: If no annotation exists with the id or... | Give the aligment annotation that a reference annotation belongs to directly, or indirectly through other
reference annotations.
:param str ref_id: Id of a reference annotation.
:raises KeyError: If no annotation exists with the id or if it belongs to an alignment annotation.
:returns: T... |
def add_jump(self, name, min, max, num, warp=None, var_type=float):
""" An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int.
... | An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int. |
def connect(config_dir=None, optional_config_files=None, cron_cfg="cron"):
""" Initialize everything for interactive use.
Returns a ready-to-use RtorrentEngine object.
"""
from pyrocore.scripts.base import ScriptBase
from pyrocore.util import load_config
ScriptBase.setup(cron_cfg=cron_cfg)... | Initialize everything for interactive use.
Returns a ready-to-use RtorrentEngine object. |
def open_fileswitcher_dlg(self):
"""Open file list management dialog box"""
if not self.tabs.count():
return
if self.fileswitcher_dlg is not None and \
self.fileswitcher_dlg.is_visible:
self.fileswitcher_dlg.hide()
self.fileswitcher_dlg.is_vis... | Open file list management dialog box |
def modify_order(self, modify_order_op, order_id, qty, price, adjust_limit=0, trd_env=TrdEnv.REAL, acc_id=0, acc_index=0):
"""
详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
... | 详细说明见基类接口说明,但有以下不同:不支持改单。 可撤单。删除订单是本地操作。
:param modify_order_op:
:param order_id:
:param qty:
:param price:
:param adjust_limit:
:param trd_env:
:param acc_id:
:return: |
def my_main(context):
""" The starting point for your app."""
print('starting MyApp...')
if context['debug']:
print('Context:')
for k in context:
print('Key: {}\nValue: {}'.format(k, context[k]))
print('Done!')
return 0 | The starting point for your app. |
def generate_random_string(size=6, chars=string.ascii_uppercase + string.digits):
"""Generate random string.
:param size: Length of the returned string. Default is 6.
:param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits.
:type size: int
:type chars: str
... | Generate random string.
:param size: Length of the returned string. Default is 6.
:param chars: List of the usable characters. Default is string.ascii_uppercase + string.digits.
:type size: int
:type chars: str
:return: The random string.
:rtype: str |
def pbkdf2(seed: str or bytes, dk_len: int) -> bytes:
"""
Derive one key from a seed.
:param seed: the secret pass phrase to generate the keys from.
:param dk_len: the length in bytes of every derived key.
:return:
"""
key = b''
index = 1
bytes_seed = str_to_bytes(seed)
while le... | Derive one key from a seed.
:param seed: the secret pass phrase to generate the keys from.
:param dk_len: the length in bytes of every derived key.
:return: |
def build_url(base_url, partial_url):
"""
Makes sure the URL is built properly.
>>> urllib.parse.urljoin('https://test.com/1/', '2/3')
https://test.com/1/2/3
>>> urllib.parse.urljoin('https://test.com/1/', '/2/3')
https://test.com/2/3
>>> urllib.parse.urljoin('ht... | Makes sure the URL is built properly.
>>> urllib.parse.urljoin('https://test.com/1/', '2/3')
https://test.com/1/2/3
>>> urllib.parse.urljoin('https://test.com/1/', '/2/3')
https://test.com/2/3
>>> urllib.parse.urljoin('https://test.com/1', '2/3')
https://test.com/2/3' |
def input_validate_yubikey_secret(data, name='data'):
""" Input validation for YHSM_YubiKeySecret or string. """
if isinstance(data, pyhsm.aead_cmd.YHSM_YubiKeySecret):
data = data.pack()
return input_validate_str(data, name) | Input validation for YHSM_YubiKeySecret or string. |
def _clear_surface(self, surface, rect=None):
""" Clear the buffer, taking in account colorkey or alpha
:return:
"""
clear_color = self._rgb_clear_color if self._clear_color is None else self._clear_color
surface.fill(clear_color, rect) | Clear the buffer, taking in account colorkey or alpha
:return: |
def random_tickers(
length, n_tickers, endswith=None, letters=None, slicer=itertools.islice
):
"""Generate a length-n_tickers list of unique random ticker symbols.
Parameters
----------
length : int
The length of each ticker string.
n_tickers : int
Number of tickers to... | Generate a length-n_tickers list of unique random ticker symbols.
Parameters
----------
length : int
The length of each ticker string.
n_tickers : int
Number of tickers to generate.
endswith : str, default None
Specify the ending element(s) of each ticker (for examp... |
def radius_server_host_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElem... | Auto Generated Code |
def _Open(self, path_spec, mode='rb'):
"""Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to op... | Opens the file system defined by path specification.
Args:
path_spec (PathSpec): path specification.
mode (Optional[str]): file access mode. The default is 'rb' which
represents read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the ... |
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
return self._accessor.open(self, flags, mode) | Open the file pointed by this path and return a file descriptor,
as os.open() does. |
def is_cython_function(fn):
"""Checks if a function is compiled w/Cython."""
if hasattr(fn, "__func__"):
fn = fn.__func__ # Class method, static method
name = type(fn).__name__
return (
name == "method_descriptor"
or name == "cython_function_or_method"
or name == "builti... | Checks if a function is compiled w/Cython. |
def duration(self):
"""Get or set the duration of the event.
| Will return a timedelta object.
| May be set to anything that timedelta() understands.
| May be set with a dict ({"days":2, "hours":6}).
| If set to a non null value, removes any already
existing end ... | Get or set the duration of the event.
| Will return a timedelta object.
| May be set to anything that timedelta() understands.
| May be set with a dict ({"days":2, "hours":6}).
| If set to a non null value, removes any already
existing end time. |
def get_annotation_values(graph, annotation: str) -> Set[str]:
"""Get all values for the given annotation.
:param pybel.BELGraph graph: A BEL graph
:param annotation: The annotation to summarize
:return: A set of all annotation values
"""
return set(iter_annotation_values(graph, annotation)) | Get all values for the given annotation.
:param pybel.BELGraph graph: A BEL graph
:param annotation: The annotation to summarize
:return: A set of all annotation values |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
super(MapNotifyMessage, self).sanitize()
# The first bit after the Type field in a Map-Notify message is
# allocated as the "I" bit. I bit i... | Check if the current settings conform to the LISP specifications and
fix them where possible. |
def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than th... | Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.wher... |
def updateHistory(self, activeCells, forceOutput=False):
"""
Computes one cycle of the Union Pooler algorithm. Return the union SDR
Parameters:
----------------------------
@param activeCells: A list that stores indices of active cells
@param forceOutput: if True, a union will be created withou... | Computes one cycle of the Union Pooler algorithm. Return the union SDR
Parameters:
----------------------------
@param activeCells: A list that stores indices of active cells
@param forceOutput: if True, a union will be created without regard to
minHistory |
def resurrect(self, force=False):
"""
Attempt to resurrect a connection from the dead pool. It will try to
locate one (not all) eligible (it's timeout is over) connection to
return to th live pool.
:arg force: resurrect a connection even if there is none eligible (used
... | Attempt to resurrect a connection from the dead pool. It will try to
locate one (not all) eligible (it's timeout is over) connection to
return to th live pool.
:arg force: resurrect a connection even if there is none eligible (used
when we have no live connections) |
def all(cls):
'''Return all tags that are currently applied to any dataset.
:returns: a list of all tags that are currently applied to any dataset
:rtype: list of ckan.model.tag.Tag objects
'''
# if vocab_id_or_name:
# vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
# if vocab is None:
# # The use... | Return all tags that are currently applied to any dataset.
:returns: a list of all tags that are currently applied to any dataset
:rtype: list of ckan.model.tag.Tag objects |
def set_instrument(self, channel, instr, bank=1):
"""Add a program change and bank select event to the track_data."""
self.track_data += self.select_bank(channel, bank)
self.track_data += self.program_change_event(channel, instr) | Add a program change and bank select event to the track_data. |
def protected_resource_view(scopes=None):
"""
View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7
"""
if scopes is None:
scopes = []
def wrapper(view):
def view_wrapper(r... | View decorator. The client accesses protected resources by presenting the
access token to the resource server.
https://tools.ietf.org/html/rfc6749#section-7 |
def seek(self, offset, whence=SEEK_SET):
"""Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data.
"""
# A nice trick is to (ab)use BytesIO.seek() to go to the desired position for easie... | Seek pointer in lob data buffer to requested position.
Might trigger further loading of data from the database if the pointer is beyond currently read data. |
def run_failure_step_group(pipeline, context):
"""Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with.
"""
logger.debug("starting")
try:
assert pipeline
# if no on_failure ex... | Run the on_failure step group if it exists.
This function will swallow all errors, to prevent obfuscating the error
condition that got it here to begin with. |
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session) | Provision vm right after clone/copy |
def run(self, **kwargs):
"""
Drive servo to the position set in the `position_sp` attribute.
"""
for key in kwargs:
setattr(self, key, kwargs[key])
self.command = self.COMMAND_RUN | Drive servo to the position set in the `position_sp` attribute. |
def getAll(self):
'''Return a dictionary with all variables'''
if not bool(len(self.ATTRIBUTES)):
self.load_attributes()
return eval(str(self.ATTRIBUTES)) | Return a dictionary with all variables |
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
... | Tokenizes a text file. |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._UserLight is not None:
return False
if self._UserPerson is not None:
return False
if self._UserCompany is not None:
return False
if self._UserApiKey is not None:
... | :rtype: bool |
def AgregarTambo(self, nro_tambo_interno, nro_renspa,
fecha_venc_cert_tuberculosis, fecha_venc_cert_brucelosis,
nro_tambo_provincial=None, **kwargs):
"Agrego los datos del productor a la liq."
tambo = {'nroTamboInterno': nro_tambo_interno,
'nroT... | Agrego los datos del productor a la liq. |
def open_zip(path_or_file, *args, **kwargs):
"""A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfil... | A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or... |
def modutf7_encode(data: str) -> bytes:
"""Encode the string using modified UTF-7.
Args:
data: The input string to encode.
"""
ret = bytearray()
is_usascii = True
encode_start = None
for i, symbol in enumerate(data):
charpoint = ord(symbol)
if is_usascii:
... | Encode the string using modified UTF-7.
Args:
data: The input string to encode. |
def GetPixelColorsHorizontally(self, x: int, y: int, count: int) -> ctypes.Array:
"""
x: int.
y: int.
count: int.
Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally.
"""
arrayType = ctypes.c_uint32 * count
values = a... | x: int.
y: int.
count: int.
Return `ctypes.Array`, an iterable array of int values in argb form point x,y horizontally. |
def ignore_whitespace_text_nodes(cls, wrapped_node):
"""
Find and delete any text nodes containing nothing but whitespace in
in the given node and its descendents.
This is useful for cleaning up excess low-value text nodes in a
document DOM after parsing a pretty-printed XML doc... | Find and delete any text nodes containing nothing but whitespace in
in the given node and its descendents.
This is useful for cleaning up excess low-value text nodes in a
document DOM after parsing a pretty-printed XML document. |
def fit_class1_pan_allele_models(
self,
n_models,
architecture_hyperparameters,
alleles,
peptides,
affinities,
inequalities,
models_dir_for_save=None,
verbose=1,
progress_preamble="",
prog... | Fit one or more pan-allele predictors using a single neural network
architecture.
The new predictors are saved in the Class1AffinityPredictor instance
and will be used on subsequent calls to `predict`.
Parameters
----------
n_models : int
Num... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.