content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def random_train_test_split(df, train_frac, random_seed=None):
"""
This function randomizes the dta based on the seed and then splits the dataframe into train and test sets which are changed to their list of vector representations.
Args:
df (Dataframe): The dataframe which is to be used to generate... | c3c399792bdc1026d74f1ccc7241bdb327f307d3 | 3,649,700 |
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None,
B0matrix=IDENTITYMATRIX,
offset=0,
pureRotation=0,
... | 243bdb74da8aa429a0748d6d15b6b9d1f20814f3 | 3,649,701 |
def load_csv(path):
"""
Function for importing data from csv. Function uses weka implementation
of CSVLoader.
:param path: input file
:return: weka arff data
"""
args, _sufix = csv_loader_parser()
loader = Loader(classname='weka.core.converters.CSVLoader',
options=ar... | d7555cbe5e54543ca8f66e5f70d4f20b7b72b549 | 3,649,702 |
import json
def from_stream(stream, storage, form):
"""Reverses to_stream, returning data"""
if storage == "pure-plain":
assert isinstance(stream, str)
if isinstance(stream, str):
txt = stream
else:
assert not stream.startswith(MAGIC_SEAMLESS)
assert... | ee8c657162947354b3533b4fe607a11a8a6457ec | 3,649,703 |
import os
def volume100():
"""Function for setting volume."""
os.system('vol 100')
return render_template('fmberryremote.html', GENRES=GENRES, choosenStation=STATION) | ff68129b8564480d5ed604fe59f3bd7350da359d | 3,649,704 |
def UniformExploration(j, state):
"""Fake player j that always targets all arms."""
return list(np.arange(state.K)) | 146b84ff0d9e28a8316e871b94e5bb82d67de997 | 3,649,705 |
def deduction_limits(data):
"""
Apply limits on itemized deductions
"""
# Split charitable contributions into cash and non-cash using ratio in PUF
cash = 0.82013
non_cash = 1. - cash
data['e19800'] = data['CHARITABLE'] * cash
data['e20100'] = data['CHARITABLE'] * non_cash
# Apply st... | ef1a6464e4bb0832a9398ad01e878c8aa4e6a620 | 3,649,706 |
import argparse
def get_args() -> argparse.Namespace:
"""Setup the argument parser
Returns:
argparse.Namespace:
"""
parser = argparse.ArgumentParser(
description='A template for python projects.',
add_help=False)
# Required Args
required_args = parser.add_argument_grou... | f5e213bf6e52ca5528262a2d2c285fe41dd0f269 | 3,649,707 |
def select_interacting(num_mtx, bin_mtx, labels):
"""
Auxiliary function for fit_msa_mdels.
Used for fitting the models in hard EM; selects observations with a hidden
variable value of 1.
"""
if labels is None:
# This is the case when initializing the models
return num_mtx, bin_m... | 4c4f6fd7b44d4c388f7ce9ba30e91886548c85ee | 3,649,708 |
def _GenDiscoveryDoc(service_class_names, doc_format,
output_path, hostname=None,
application_path=None):
"""Write discovery documents generated from a cloud service to file.
Args:
service_class_names: A list of fully qualified ProtoRPC service names.
doc_format: T... | a135c3805ee5b81e2f0f505b1710531e3db7d1f1 | 3,649,709 |
import dateutil.parser
def nitestr(nite,sep=''):
"""
Convert an ephem.Date object to a nite string.
Parameters:
-----------
nite : ephem.Date object
sep : Output separator
Returns:
--------
nitestr : String representation of the nite
"""
if isinstance(nite,bases... | 493f34ad484bbd350254c45e38c41a9559a2ce14 | 3,649,710 |
import os
def register():
"""Register user"""
form = RegistrationForm()
if form.validate_on_submit():
# only allow 1 user on locally hosted version
if len(User.query.all()) == 0:
# add user to database
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(us... | 453f4b718801de99bd468a52693350646b246738 | 3,649,711 |
def dump_tuple(tup):
"""
Dump a tuple to a string of fg,bg,attr (optional)
"""
return ','.join(str(i) for i in tup) | ffa4838e2794da9d525b60f4606633f8940480bb | 3,649,712 |
def get_single_endpoint(name):
"""
TODO - Add docstring
"""
class EndpointWithID(Resource):
def get(self, pid):
# TODO - Add return
pass
# TODO - Add `get.__doc__`
EndpointWithID.__name__ = name
return EndpointWithID | 923c470ed9f96478abe62823358f2f62f2506a82 | 3,649,713 |
from operator import ne
def generate_fermi_question(cfg, logratio, filter_single_number_lhs=True):
"""
Generates one Fermi question.
Args:
cfg: Expression config
logratio: Log ratio standard deviation (for RHS)
filter_single_number_lhs: Whether to exclude lhs of a single numerical... | 47ebeaa4389f7371fb56687788a696aa7e03426e | 3,649,714 |
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict | None, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constru... | 12b7d9121b9395668b8d260790b980260e1e4ee5 | 3,649,715 |
def Packet_genReadUserTag(errorDetectionMode, buffer, size):
"""Packet_genReadUserTag(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t"""
return _libvncxx.Packet_genReadUserTag(errorDetectionMode, buffer, size) | 111c391ee3bbef36e1d42666d608c72fb3e6c3cd | 3,649,716 |
def prefetch(tensor_dict, capacity):
"""Creates a prefetch queue for tensors.
Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a
dequeue op that evaluates to a tensor_dict. This function is useful in
prefetching preprocessed tensors so that the data is readily available for
consumers.
... | 44320c6ea24d42b9add4bd3a3677b157ffcd24b8 | 3,649,717 |
def get_total_shares():
"""
Returns a list of total shares (all, attending, in person, represented) for all voting principles.
"""
total_shares = {
'heads': [0, 0, 0, 0] # [all, attending, in person, represented]
}
principle_ids = VotingPrinciple.objects.values_list('id', flat=True)
... | c70ca3be6e0b7b9df03257b81f5abec343efa37e | 3,649,718 |
def gen_check_box_idx():
""" Generate a list containing the coordinate of three
finder patterns in QR-code
Args:
None
Returns:
idx_check_box: a list containing the coordinate each pixel
of the three finder patterns
"""
idx_check_box = []
for i in range(7... | e26d9c5a3b093b52f54eb2c65b844215c40ddab8 | 3,649,719 |
import pandas
def preprocess_mc_parameters(n_rv, dict_safir_file_param, index_column='index'):
"""
NAME: preprocess_mc_parameters
AUTHOR: Ian Fu
DATE: 18 Oct 2018
DESCRIPTION:
Takes a dictionary object with each item represents a safir input variable, distributed or static, distributed
inp... | 28d04122234572b57d978fc9e993707cea45a00d | 3,649,720 |
def AdjustColour(color, percent, alpha=wx.ALPHA_OPAQUE):
""" Brighten/Darken input colour by percent and adjust alpha
channel if needed. Returns the modified color.
@param color: color object to adjust
@param percent: percent to adjust +(brighten) or -(darken)
@keyword alpha: amount to adjust alpha ... | 76ca657e632467c5db730161a34f19633add06f4 | 3,649,721 |
def getdates(startdate, utc_to_local, enddate=None):
"""
Generate '~astropy.tot_time.Time' objects corresponding to 16:00:00 local tot_time on evenings of first and last
nights of scheduling period.
Parameters
----------
startdate : str or None
Start date (eg. 'YYYY-MM-DD'). If None, de... | 1bee7b83b2b4ce3f3347544441762287a3ff1c83 | 3,649,722 |
import re
def alerts_matcher(base_name, pattern, alerter, second_order_resolution_hours):
"""
Get a list of all the metrics that would match an ALERTS pattern
:param base_name: The metric name
:param pattern: the ALERT pattern
:param alerter: the alerter name e.g. smtp, syslog, hipchat, pagerdat... | adc880f1bfc65ed189cc834a7be8502bca588b6b | 3,649,723 |
def get_dataframe_tail(n):
""" Returns last n rows of the DataFrame"""
return dataset.tail(n) | 03a01a9535da25d30c394a8339ebbd5bd0a80b03 | 3,649,724 |
import json
def json_formatter(result, verbose=False, indent=4, offset=0):
"""Format result as json."""
string = json.dumps(result, indent=indent)
string = string.replace("\n", "\n" + " "*offset)
return string | 512847722fa36eff408ac28d6e3dc8fde5c52af1 | 3,649,725 |
import types
def simple_http_get(url, port=80, headers=None):
"""Simple interface to make an HTTP GET request
Return the entire request (line,headers,body) as raw bytes
"""
client_socket = create_async_client_socket((url, port))
calling_session = Reactor.get_instance().get_current_session()
... | 87a2115d0134ae70625abe74a4bbd78e56610604 | 3,649,726 |
def _gumbel_softmax_sample(logits, temp=1, eps=1e-20):
"""
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
dims = logits.dim()
gumbel_noise = _sa... | a61eac3861cdca17c1d2856c83bc70e03168bc45 | 3,649,727 |
from re import VERBOSE
def interpolate_points(variable, variable_name, old_r, old_theta, new_r, new_theta):
"""Interpolate the old grid onto the new grid."""
grid = griddata(
(old_r, old_theta), variable, (new_r, new_theta), method=INTERPOLATION_LEVEL, fill_value=-1
)
n_error = 0
for i, ... | 2a830e7cd04d5d0832d35a25bc58041ba192709b | 3,649,728 |
def attitude(request):
"""
View configuration for discussion step, where we will ask the user for her attitude towards a statement.
Route: /discuss/{slug}/attitude/{position_id}
:param request: request of the web server
:return: dictionary
"""
LOG.debug("View attitude: %s", request.matchdic... | 4870e19941c990350991e1540c8df1760953e455 | 3,649,729 |
def currentProgram():
"""currentProgram page."""
return render_template(
"currentProgram-index.j2.html",
title="currentProgram",
subtitle="Demonstration of Flask blueprints in action.",
template="currentProgram-template",
currentProgram=getCurrentProgr(),
timeStar... | f5c914560d3c1791e34749321b78951313d5f058 | 3,649,730 |
def checkIsMember(request):
"""
사업자번호를 조회하여 연동회원 가입여부를 확인합니다.
- https://docs.popbill.com/statement/python/api#CheckIsMember
"""
try:
# 조회할 사업자등록번호, '-' 제외 10자리
targetCorpNum = "1234567890"
response = statementService.checkIsMember(targetCorpNum)
return render(reques... | 861d4cc83102e036bb795bf8eafee2f7593925a4 | 3,649,731 |
from typing import List
from typing import Optional
from typing import Type
def f1_score(y_true: List[List[str]], y_pred: List[List[str]],
*,
average: Optional[str] = 'micro',
suffix: bool = False,
mode: Optional[str] = None,
sample_weight: Optional[Lis... | 1354e306847af1decf59ae638a1cdecd265e569a | 3,649,732 |
from typing import Any
from typing import Counter
def calc_proportion_identical(lst: Any) -> float:
"""
Returns a value between 0 and 1 for the uniformity of the values
in LST, i.e. higher if they're all the same.
"""
def count_most_common(lst):
"""
Find the most common item in LS... | adf467eba11694c5ea4583d7b53029110e59e25a | 3,649,733 |
def _rolling_mad(arr, window):
"""Rolling window MAD outlier detection on 1d array."""
outliers = []
for i in range(window, len(arr)):
cur = arr[(i - window) : i]
med, cur_mad = _mad(cur)
cur_out = cur > (med + cur_mad * 3)
idx = list(np.arange((i - window), i)[cur_out])
... | 3f28dde448b3c567a92fd22e499f812cd748a507 | 3,649,734 |
def compute_mean_and_cov(embeds, labels):
"""Computes class-specific means and shared covariance matrix of given embedding.
The computation follows Eq (1) in [1].
Args:
embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is
the sample size of training set, n_dim is the dimension ... | b6d5624b0cea9f6162ffad819d4d5917391ac73e | 3,649,735 |
def wcenergy(seq: str, temperature: float, negate: bool = False) -> float:
"""Return the wc energy of seq binding to its complement."""
loop_energies = calculate_loop_energies_dict(temperature, negate)
return sum(loop_energies[seq[i:i + 2]] for i in range(len(seq) - 1)) | 90eae3d85e90019571e4f5d674fb93d58c1d7287 | 3,649,736 |
import os
def getDirectoriesInDir(directory):
"""
Returns all the directories in the specified directory.
"""
directories = {}
for d in os.listdir(directory):
path = os.path.join(directory, d)
if os.path.isdir(path):
directories[d] = path
return directories | 8d78571d0ebc4fba58abf98354a7bd2bea018e60 | 3,649,737 |
def upload_csv():
"""
Upload csv file
"""
upload_csv_form = UploadCSVForm()
if upload_csv_form.validate_on_submit():
file = upload_csv_form.csv.data
ClassCheck.process_csv_file(file)
flash('CSV file uploaded!', 'success')
return redirect('/') | 620cc3b4e11c71fe9dedd24631f641304313150f | 3,649,738 |
async def clean(request: Request) -> RedirectResponse:
"""Access this view (GET "/clean") to remove all session contents."""
request.session.clear()
return RedirectResponse("/") | 3ef0d9298fcd7879becc8ae246656a62086f639a | 3,649,739 |
def svd(A):
"""
Singular Value Decomposition
Parameters
----------
A: af.Array
A 2 dimensional arrayfire array.
Returns
-------
(U,S,Vt): tuple of af.Arrays
- U - A unitary matrix
- S - An array containing the elements of diagonal matrix
- Vt - A... | 7b7d48dc1782d1e02eca01b657895372170caf6c | 3,649,740 |
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ct... | 24722c1dd5784896fd6aa8b39cd29eb76fec155a | 3,649,741 |
import numpy
def csr_matrix_multiply(S, x): # noqa
"""Multiplies a :class:`scipy.sparse.csr_matrix` S by an object-array vector x.
"""
h, w = S.shape
result = numpy.empty_like(x)
for i in range(h):
result[i] = sum(S.data[idx]*x[S.indices[idx]] # noqa pylint:disable=unsupported-assignme... | 77e1630cbdd59f53b1b2885b731e73a14fb18b35 | 3,649,742 |
def calculate_sem_IoU(pred_np, seg_np, num_classes):
"""Calculate the Intersection Over Union of the predicted classes and the ground truth
Args:
pred_np (array_like): List of predicted class labels
seg_np (array_like): List of ground truth labels
num_classes (int): Number of classes in... | e8e360cb8aad0f2226aa54c88c01485840017f2d | 3,649,743 |
def _invert(M, eps):
"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
... | 119c16ad816dd37b7e5eb23c121ef5affc8851f5 | 3,649,744 |
from pathlib import Path
def read_densecsv_to_anndata(ds_file: Path):
"""Reads a dense text file in csv format into the AnnData format."""
return read_densemat_to_anndata(ds_file, sep=",") | cee99509b6744972ad7a9530d66b59c06f7deec5 | 3,649,745 |
def get_original_date_jpeg(filepath):
"""
returns the DateTimeOriginal/DateTimeDigitized exif data from the given jpeg file
"""
try:
with Image.open(filepath) as image:
# NOTE: using old "private" method because new public method
# doesn't include this tag. It does ... | e3c1c8f12bff2506f5c1eed3dbe0cc9c2c45d736 | 3,649,746 |
def _singleton(name):
"""Returns a singleton object which represents itself as `name` when printed,
but is only comparable (via identity) to itself."""
return type(name, (), {'__repr__': lambda self: name})() | b07003e1716115864bf1914d4b523b36d0f0471f | 3,649,747 |
def get_zone(*, zone_name: str):
""" Get zone with given zone name.
Args:
zone_name: zone name, e.g. "haomingyin.com"
Returns:
json: zone details
"""
params = dict(name=zone_name)
zones = _get("zones", params=params)
if not zones:
raise CloudflareAPIError(f"Unable to ... | 5e79ec900af7e5cc4d457d04292e55e2e3abc9ec | 3,649,748 |
from rdkit.Chem import AllChem
import os
def load_bbbp_dataset(data_path, task_names=None, featurizer=None):
"""Load bbbp dataset ,process the classification labels and the input information.
Description:
The data file contains a csv table, in which columns below are used:
N... | 589fb4e9d6796a841264744e2ecc024e60a310f3 | 3,649,749 |
def execute(connection, cmdline, **kwargs):
"""generic function to execute command for device
| Parameters:
| connection (Adaptor): connection of device
| cmdline (str): command line
| kwargs (dict): additional keyword arguments for command line execution
| Returns:
| str: ou... | 7f3424cb8a747fab87a5a67c880ec755d9c9cb96 | 3,649,750 |
def json_to_numpy_mask(shapes, width, height):
"""Converts JSON labels with pixel classifications into NumPy arrays"""
img = Image.new("L", (width, height), 0)
for shape in shapes:
if shape["label"] == "barrel":
barrel_lst = [tuple(i) for i in shape["points"]]
ImageDraw.Draw(... | 33757246478d854d15f71a0737174ac6952514ef | 3,649,751 |
import types
import typing
def _format_call(value: ast3.Call, context: types.Context) -> typing.Text:
"""Format a function call like 'print(a*b, foo=x)'"""
try:
return _format_call_horizontal(value, context)
except errors.NotPossible:
return _format_call_vertical(value, context) | 2019f50943bb597948248dfda9ce8620d3286377 | 3,649,752 |
def get_tags(repo_dir):
"""
_get_tags_
returns a list of tags for the given repo, ordered as
newest first
"""
repo = git.Repo(repo_dir)
tags_with_date = {
tag.name: tag.commit.committed_date
for tag in repo.tags
}
return sorted(tags_with_date, key=tags_with_date.get... | aa5462ff0b15501cf486a2bf49837f0dd60ecfaf | 3,649,753 |
def readh5(filename, GroupName=None):
"""
Read the HDF5 file 'filename' into a class. Groups within the hdf5 file are
by default loaded as sub classes, unless they include a _read_as attribute
(see sharpy.postproc.savedata). In this case, group can be loaded as classes,
dictionaries, lists or tuples... | 9ab33071916a634da6ddc68df56fe29429ef6313 | 3,649,754 |
def calc_E_E_hs_d_t(W_dash_k_d_t, W_dash_s_d_t, W_dash_w_d_t, W_dash_b1_d_t, W_dash_b2_d_t, W_dash_ba1_d_t,
theta_ex_d_Ave_d,
L_dashdash_ba2_d_t):
"""1時間当たりの給湯機の消費電力量 (kWh/h) (1)
Args:
W_dash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯負荷 (MJ/h)
W_dash_s_d_t(ndarra... | d76a65d4d30b0c2cf59b837e188473827425d576 | 3,649,755 |
def best_promo(order):
"""
选择可用的最佳折扣
"""
return max(promo(order) for promo in promos) | db4001b4e04a167171da02da92e4234489bf13a5 | 3,649,756 |
def random_joint_positions(robot):
"""
Generates random joint positions within joint limits for the given robot.
@type robot: orpy.Robot
@param robot: The OpenRAVE robot
@rtype: np.array
@return:
"""
# Get the limits of the active DOFs
lower, upper = robot.GetActiveDOFLimits()
positions = lower + n... | 49fe770a8cc22945e79c892d54754c50f19974e8 | 3,649,757 |
def test_cancel_examples(example):
"""
We can't specify examples in test_fuzz_cancel (because we use data, see
https://hypothesis.readthedocs.io/en/latest/data.html#interactive-draw),
so we have this here for explicit examples.
"""
stream_req, stream_resp, draws = example
def draw(lst):
... | c3a3a970a77f136c39e86666c0485163d0fbb408 | 3,649,758 |
import pickle
def fetch_pickle(filename):
"""
Fetches any variable saved into a picklefile with the given filename.
Parameters:
filename (str): filename of the pickle file
Returns:
variable (any pickle compatible type): variable that was saved into the picklefile.
"""... | 172c18520619d102b520658949d2464d5ecfb05c | 3,649,759 |
def check_clockwise(poly):
"""Checks if a sequence of (x,y) polygon vertice pairs is ordered clockwise or not.
NOTE: Counter-clockwise (=FALSE) vertice order reserved for inner ring polygons"""
clockwise = False
if (sum(x0*y1 - x1*y0 for ((x0, y0), (x1, y1)) in zip(poly, poly[1:] + [poly[0]]))) < 0:
... | 5e9f8fba6cd11e33dfe60a89e62eeac2ac24c805 | 3,649,760 |
def bookList(request):
"""测试"""
# 查询书籍信息:使用默认的管理器对象 : 在管理器上调用过滤器方法会返回查询集
# book_list = BookInfo.objects.all()
# 查询书籍信息:使用自定义的管理器对象
# book_list = BookInfo.books.all()
# 以下代码演示,自定义管理器的类给模型类新增初始化方法: 类比books.all()
# book1 = BookInfo.books.create_model('zxc')
# book2 = BookInfo.books.creat... | b9b05f259d5cdb9d0570268c0f08eaafc8ba6cc1 | 3,649,761 |
def format_stats(stats):
"""Format statistics for printing to a table"""
result = ''
for key, value in stats.items():
result += f'{key} - {value}\n'
return result[:-1] | 2d01b6c48b83f8e8810f4609183b39fad871f942 | 3,649,762 |
def imcrop(img, bboxes, scale=1.0, pad_fill=None):
"""Crop image patches.
3 steps: scale the bboxes -> clip bboxes -> crop and pad.
Args:
img (ndarray): Image to be cropped.
bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
scale (float, optional): Scale ratio of... | 244d6c39410c5d51780a8d3a261810986c17d779 | 3,649,763 |
def timestamp2str(ts):
""" Converts Timestamp object to str containing date and time
"""
date = ts.date().strftime("%Y-%m-%d")
time = ts.time().strftime("%H:%M:%S")
return ' '.join([date, time]) | 0e847a8af0cbbacf18df911e3070ac7c70e504b7 | 3,649,764 |
from operator import index
def define_class_functions(processes, stages, progress):
"""
Define and return class of unit tests for stand-alone functions
for the given configuration.
"""
class Test_functions(TestCase):
def test_mapreduce(self):
logger = log() if progress else Non... | 0dc8df39e49f1e7591be7a7b8e80dc1266714cc4 | 3,649,765 |
def concept(*reference):
"""Reference to a semantic concept.
Parameters
----------
*reference : :obj:`str`
Keys pointing to the ruleset defining this concept in the rules file of
an ontology.
Returns
-------
:obj:`CubeProxy`
A textual reference to the concept that can be solved by ... | c3e01f48ca962c5312a0cf8d6deb66eecc062078 | 3,649,766 |
import torch
def collate_tensors(batch, stack_tensors=torch.stack):
""" Collate a list of type ``k`` (dict, namedtuple, list, etc.) with tensors.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
batch (list of k): List of rows of type `... | cbd1098188e3d47b705e25edeae636624ebbec47 | 3,649,767 |
def build_boundaries_layers(cyt_coord, nuc_coord, rna_coord):
"""
Parameters
----------
cyt_coord : np.ndarray, np.int64
Array of cytoplasm boundaries coordinates with shape (nb_points, 2).
nuc_coord : np.ndarray, np.int64
Array of nucleus boundaries coordinates with shape (nb_point... | a99efab6ccc3044c04df330ca9c3ce0ebbf0c413 | 3,649,768 |
def predicted_actual_chart(actual, predicted, title="Predicted vs Actual Values"):
"""Predicted vs actual values curve."""
source = pd.DataFrame({"x": actual, "y": predicted})
scatter = scatter_chart(source, "Actual", "Residual", title=title)
vmin = source.min().min()
vmax = source.max().max()
... | 91588a9d79bfa8eaea39067042b7e4b3c6784b7e | 3,649,769 |
def swapSeries(keypoints_array,v,c,pers1,pers2,start,end):
"""helper function for swapping sections of time series. This is useful because openpose isn't
consistent in labelling people so we need to rearrange things.
Args:
keypoints_array: all the data.
v: which video? - specifies first d... | 52cb0a81bfac6706f5c20e04f23867c7850bd2e7 | 3,649,770 |
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""... | 5b4c0c0ae82e92450113da6c2c01e5467d903a28 | 3,649,771 |
from operator import mul
from operator import inv
def interpolate(R1,R2,u):
"""Interpolate linearly between the two rotations R1 and R2. """
R = mul(inv(R1),R2)
m = moment(R)
angle = vectorops.norm(m)
if angle==0: return R1
axis = vectorops.div(m,angle)
return mul(R1,rotation(axis,angle*u)... | d4aaa976e52b6f44f44c4f26eccb59f1b85f9f0b | 3,649,772 |
def plot_spikes(
spikes: dict,
ax: plt.Axes = None,
markersize: int = None,
color: tp.Union[str, tp.Any] = "k",
) -> plt.Axes:
"""Plot Spikes returned by NeuroDriver's OutputRecorder"""
if ax is None:
fig = plt.gcf()
ax = fig.add_subplot()
for n, (name, ss) in enumerate(spik... | d757c9c342e34e45820ee81f45e0bc59b8cbc277 | 3,649,773 |
def boardToString(board):
"""
return a string representation of the current board.
"""
# global board
# b = board
rg = range(board.size())
s = "┌────┬────┬────┬────┐\n|"+"|\n╞════╪════╪════╪════╡\n|".join(
['|'.join([getCellStr(board, x, y) for x in rg]) for y in rg])
s = "\n" + s ... | 2ea53d0ce7448ab0073176195195f1c4fb028a71 | 3,649,774 |
def create_data_ops(batch_size, num_elements_min_max):
"""Returns graphs containg the inputs and targets for classification.
Refer to create_data_dicts_tf and create_linked_list_target for more details.
Args:
batch_size: batch size for the `input_graphs`.
num_elements_min_max: a 2-`tuple` of `int`s whic... | fd38b1a7d0d8e9e4633fa6fcefc5b1c1614c97fc | 3,649,775 |
import os
def compute_file_path(data_path, path, command):
"""Return the computed file path for mocked data
Keyword arguments:
data_path -- the path of the folder that contains the subbed data
path -- the URL path
command -- the HTTP verb
"""
return os.path.realpath(
os.path.join... | 8260a67edb5fca16e4b9004e8596cc080c98ff19 | 3,649,776 |
def location_matches(stmt):
"""Return a matches_key which takes geo-location into account."""
if isinstance(stmt, Event):
context_key = get_location(stmt)
matches_key = str((stmt.concept.matches_key(), context_key))
elif isinstance(stmt, Influence):
subj_context_key = get_location(st... | be261d2dcf7be09330542a4cd2c18b3261ef0eca | 3,649,777 |
def _submit_to_measurement_sets_api(measurement_set, patch_update):
"""Send the submission object to the appropriate API endpoint."""
# TODO: Add a separate method to validate submission without sending it.
# Attempt to find existing measurement sets if any exist.
try:
matching_submission = get_... | 5f0ae3a764f37ad8050b92bfc30abe338b7edd91 | 3,649,778 |
def parse_files(files, options):
"""Build datastructures from lines"""
lines = []
for line in finput(files, openhook=compr):
if (type(line) is bytes): line = line.decode('utf-8')
lines.append(line.rstrip().split("|"))
db = {}
db['rp'], db['users'], db['msgprof'], db['logins'] = {}, ... | 926f805d87ead9af1099f39bfb57be0b4b775e0a | 3,649,779 |
def resize_preserving_order(nparray: np.ndarray, length: int) -> np.ndarray:
"""Extends/truncates nparray so that ``len(result) == length``.
The elements of nparray are duplicated to achieve the desired length
(favours earlier elements).
Constructs a zeroes array of length if nparray is emp... | e074b1135d2192a9b0cf2d9b91f6d99f22408220 | 3,649,780 |
def push(service, key, data):
"""Push
Called to push data to the sync cache
Args:
service (str): The name of the service using the sync
key (mixed): The key to push the data onto
data (mixed): The data to be pushed
Returns:
bool|string
"""
# Make sure the service and key are strings
if not isinstance... | 2be85735b1c4965e5a0cdf35b5f62267ce31cc6e | 3,649,781 |
import argparse
import sys
def get_args():
"""Get all parsed arguments."""
parser = argparse.ArgumentParser(description="CLASP training loop")
# data
parser.add_argument("--id", type=str,
help="run id")
parser.add_argument("--path-data-train", type=str,
... | 7261570226596e6c4dada52dd058ecad33f4ac60 | 3,649,782 |
def get_db_filenames(database_name):
""" This is used to populate the dropdown menu, so users can
only access their data if their name is in the user column"""
con = sql.connect(database_name)
c = con.cursor()
names_list = []
for row in c.execute(
"""SELECT Dataset_Name FROM master_t... | 7ffdd7cfb24d135ddc20353799dd0c7d21504232 | 3,649,783 |
import string
def Calculate(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first.
"""
if bin_function:
values = list(m... | bb3f40eec7733d948e66e00c3bafdd032acb6372 | 3,649,784 |
import time
def getToday(format=3):
"""返回今天的日期字串"""
t = time.time()
date_ary = time.localtime(t)
if format == 1:
x = time.strftime("%Y%m%d", date_ary)
elif format == 2:
x = time.strftime("%H:%M", date_ary)
elif format == 3:
x = time.strftime("%Y/%m/%d", date_ary)
el... | 900c0a0d42dc2220c5e5030eeebd858e3e6a41bf | 3,649,785 |
def _get_referenced(body, start, end, no_header, clean, as_xml, as_list):
"""Retrieve data from body between some start and end."""
if body is None or start is None or end is None:
return None
content_list = body.get_between(
start, end, as_text=False, no_header=no_header, clean=clean
)
... | 2b3e1ce008461711c37e4af6dda7dc7d2e332d9e | 3,649,786 |
import torch
def info(filepath: str) -> AudioMetaData:
"""Get signal information of an audio file.
Args:
filepath (str): Path to audio file
Returns:
AudioMetaData: meta data of the given audio.
"""
sinfo = torch.ops.torchaudio.sox_io_get_info(filepath)
return AudioMetaData(si... | e3ff5929f563977c44f25f8f51f3a7014f43b397 | 3,649,787 |
def _override_regex_to_allow_long_doctest_lines():
"""Allow too-long lines for doctests.
Mostly a copy from `pylint/checkers/format.py`
Parts newly added are marked with comment, "[PYTA added]: ..."
"""
def new_check_lines(self, lines, i):
"""check lines have less than a maximum number of c... | 9b9d1b5eefaa9e61d1e8915aef988fbc25756d1a | 3,649,788 |
import types
def handle(*, artifacts: oa_types.SimplePropertyArtifacts) -> types.TColumn:
"""
Handle a simple property.
Args:
artifacts: The artifacts of the simple property.
Returns:
The constructed column.
"""
return facades.sqlalchemy.simple.construct(artifacts=artifacts) | 2c9d5cd47b2aecb7603430c8eec7b326ce3c249f | 3,649,789 |
def rollout_representation(representation_model, steps, obs_embed, action, prev_states, done):
"""
Roll out the model with actions and observations from data.
:param steps: number of steps to roll out
:param obs_embed: size(time_steps, batch_size, n_agents, embedding_size)
:param act... | 2736609ab54d477c3fad2ab7a4e3270772517a08 | 3,649,790 |
import argparse
def argparser():
"""parse arguments from terminal"""
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--video', dest='video')
parser.add_argument('-c', '--config', dest='config', default=CONFIG_FILE)
parser.add_argument('-o', '--output', dest='output')
return parse... | a24452f61dc5b24633397caca1632ea03e667675 | 3,649,791 |
def generate_random_ast(schema, rng):
"""End-to-end simulator for AST of Core DSL."""
distributions = [schemum[1] for schemum in schema]
partition_alpha = rng.gamma(1,1)
partition = generate_random_partition(partition_alpha, len(distributions), rng)
row_dividers = [generate_random_row_divider(rng) f... | 9547f815ad07af33b182c7edf7ea646ec9fdd49f | 3,649,792 |
def _opcode_to_string(opcode):
"""Return the printable name for a REIL opcode.
Args:
opcode (reil.Opcode): The opcode to provide in printable form.
Returns:
A string representing the opcode.
"""
return _opcode_string_map[opcode] | a1307efe0af8d223360a9ca0f2d9e96913ccb601 | 3,649,793 |
def get_shot(shot):
"""Retrieves shot object from database and returns as dictionary.
Raises exception if shot is not found.
"""
return __get_conn().get_entity(__table_name(),
shot['PartitionKey'], shot['RowKey']) | 0e9ad55427bba2074f7a77d94b61e7bae34bcbda | 3,649,794 |
def report_value_count(data_frame: pd.DataFrame, column: str, digits: int = 2) -> str:
"""
Report the number and percentage of non-empty values in the column.
Parameters
----------
data_frame : pandas.DataFrame
A data frame with one or more columns.
column : str
The name of the ... | d31d9e8bae216f7931f96ec08992d6319d4c3645 | 3,649,795 |
def input_fn(is_training, data_dir, batch_size, num_epochs=1,
num_parallel_calls=1, multi_gpu=False):
"""Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
... | 5d27f5a04b409ad4b04ce9885b592b0454ae0b4b | 3,649,796 |
def getWinners(players, game):
"""
Return a list of winners
:param players:
:param game:
:return:
"""
# get score for each player
for i in range(0, len(game.players)):
game.players[i].credits = scoreFor(i, game)
currentPlayer = whoseTurn(game)
# add 1 to players who ha... | a872d4f9ed596e31ae9a129c9054f9bb95a6e765 | 3,649,797 |
def read_xsf(filepath):
"""
:param filepath filepath of the xtd file
:return cell and atoms need to build the pymatflow.structure.crystal object
"""
a = ase.io.read(filepath, format='xsf')
cell = a.cell.tolist()
atoms = []
for i in range(len(a.arrays['numbers'])):
for item in bas... | 97152eb3d18752e78689598bb0c8603c13051623 | 3,649,798 |
def elina_abstract0_bound_linexpr(man, a, linexpr):
"""
Returns the ElinaInterval taken by an ElinaLinexpr0 over an ElinaAbstract0.
Parameters
----------
man : ElinaManagerPtr
Pointer to the ElinaManager.
a : ElinaAbstract0Ptr
Pointer to the ElinaAbstract0.
linexpr : Eli... | 2764507b79f3326741496a92642be75b5afb8ce4 | 3,649,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.