content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def extract_file_type(file_location:str) -> str:
"""
A function to return the type of file
-> file_location: str = location of a file in string... ex : "C:\\abc\\abc\\file.xyz"
----
=> str: string of the file type, ex : "xyz"
"""
if not isinstance(file_location,str):
raise TypeError... | 091930e1cd285822a6be402eb47ce0457e40b0db | 3,650,700 |
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
class re_subm_proxy:
def __init__(se... | 7ad761906ff43f8e552d9b96cc042490c45830c0 | 3,650,701 |
def search_view(request):
"""Get user's saved keywords from the database if they exist and render search page."""
if request.method == 'GET':
try:
query = request.dbsession.query(Keyword)
user_keywords = query.filter(Association.user_id == request.authenticated_userid... | 5b8208704143d0ce5aaf4c379a5bea8c01d2d4b1 | 3,650,702 |
def upload_object(
self, key, body, metadata=None, acl=None, content_type="application/json"
):
"""Upload an arbitrary object to an S3 bucket.
Parameters
----------
S3 key : `str`
The Object's key identifier.
body : `str` or `bytes`
Object data
metadata : `dict`
Head... | 3f1425544d8f29fa414995a316869480877464df | 3,650,703 |
def min_ui_count(proteins):
"""
Counts the minimum number of unique identifier peptides across all proteins
in a set
input:
proteins: list of protein sequences as strings ['protein_seq', ...]
output:
minimum number of unique identifier peptides across all proteins in
a set
"""
t... | b5bc4300ce79bf680896c6902ea721d8e73316aa | 3,650,704 |
def check_normalize_py(method):
"""A wrapper that wrap a parameter checker to the original function(normalize operation written in Python)."""
@wraps(method)
def new_method(self, *args, **kwargs):
[mean, std], _ = parse_user_args(method, *args, **kwargs)
check_normalize_py_param(mean, std)
... | acee5b58192336499939de722a8ee20778112d14 | 3,650,705 |
def GetOS(build_id, builder_name, step_name, partial_match=False):
# pylint:disable=unused-argument
"""Returns the operating system in the step_metadata.
Args:
build_id (int): Build id of the build.
builder_name (str): Builder name of the build.
step_name (str): The original step name used to get the... | 3bdbbabf551d29b0c5bd868dcc56df8fe77109ba | 3,650,706 |
from niftynet.application.label_driven_registration import SUPPORTED_INPUT
def __add_registration_args(parser):
"""
keywords defined for image registration
:param parser:
:return:
"""
parser.add_argument(
"--label_normalisation",
metavar='',
help="whether to map unique... | a2b27e27d37e90a83769a86cda2dab12b1966724 | 3,650,707 |
from typing import Optional
def get_catalog_item(catalog_id: Optional[str] = None,
catalog_item_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> A... | b52b48b90d774d00dec62eb4aa34259aa5bbaf2e | 3,650,708 |
def get_ext_suffix() -> str:
"""Get the extension suffix"""
return get_config_vars()["EXT_SUFFIX"] | e66ff0e8de776e7ae9b17a754738376dd0105bfe | 3,650,709 |
from typing import Union
import logging
import subprocess
import shlex
def install_nbextension(extension: str,
*flags,
py=True,
sys_prefix=True,
symlink=False,
overwrite=True,
... | c345fc72746f05c1a6fbfa8e27ff744c24764a63 | 3,650,710 |
def confusion_matrix(y_true, y_pred):
"""
Args:
y_true: pd.Series or array or list, ground truth (correct) labels.
y_pred: pd.Series or array or list, predicted labels, as returned by a classifier.
Returns:
Confusion matrix.
"""
t = pd.DataFrame({'actual':y_true, 'predict':y_... | fdbd1dae9354fc7358595c8a659c80eae19dc812 | 3,650,711 |
from typing import List
import os
def load_currency_abbreviation_file(path: str) -> List[str]:
"""
Return a list of currency abbreviations present as one value per line in a file.
"""
with open(
os.path.join(__location__, path), "r", encoding="utf-8"
) as _file:
file_content = _fil... | 29e94487dcf61d25599d7e537b828c45192cd9dc | 3,650,712 |
import torch
def encode_boxes(boxes, im_shape, encode=True, dim_position=64, wave_length=1000, normalize=False, quantify=-1):
""" modified from PositionalEmbedding in:
Args:
boxes: [bs, num_nodes, 4] or [num_nodes, 4]
im_shape: 2D tensor, [bs, 2] or [2], the size of image is represented as [wi... | 7bc8e2d858391c862538626ea7f3dcc291f807f6 | 3,650,713 |
def get_docstring_and_version_via_import(target):
"""
Return a tuple like (docstring, version) for the given module,
extracted by importing the module and pulling __doc__ & __version__
from it.
"""
log.debug("Loading module %s", target.file)
sl = SourceFileLoader(target.name, str(target.file... | 896fb4668ea50ecd2bcd53396549ac2427b70205 | 3,650,714 |
from typing import List
def plot_t1(times: List[float], contrast: List[float], fname: str = None) -> Figure:
"""
Plot T1 relaxation figure along with laser delay time intervals
:param times: frequencies, unit: ns
:param contrast: contrast, range between 0 and 1
:param fname: if assigned, a '.png' ... | fc5461fd2957b30624156e23693e5cd71788fa92 | 3,650,715 |
def partition_girvan_newman(graph, max_depth):
"""
Use your approximate_betweenness implementation to partition a graph.
Unlike in class, here you will not implement this recursively. Instead,
just remove edges until more than one component is created, then return
those components.
That is, comp... | 0d5d80826bdd513ad0f7e2ca72759376a3a941e0 | 3,650,716 |
from pathlib import Path
import re
def inspect_project(dirpath=None):
"""Fetch various information about an already-initialized project"""
if dirpath is None:
dirpath = Path()
else:
dirpath = Path(dirpath)
def exists(*fname):
return Path(dirpath, *fname).exists()
if not e... | 2b4e503ea801f765997e4dd6b4ad1c6dee28acda | 3,650,717 |
def current_user(request):
"""
Returning the current user with data use of token
"""
serializer = UserSerializer(request.user)
return Response(serializer.data) | 301a3e3caabd2cafebf143bbee7ad81a6957ea3b | 3,650,718 |
import numpy
def getPercentileLevels(h, frac=[0.5, 0.65, 0.95, 0.975]):
"""
Return image levels that corresponds to given percentiles values
Uses the cumulative distribution of the sorted image density values
Hence this works also for any nd-arrays
inputs:
h array
outputs:
res array containing level values
... | 126d16ab9358d9ec6e72dc653037d9235baef139 | 3,650,719 |
def _ParseSparse(data):
"""Concat sparse tensors together.
Args:
data: A dict of name -> Tensor.
Returns:
A single sparse tensor and a 1-D input spec Tensor.
Raises:
NotImplementedError: Combining dense and sparse tensors is not
supported.
ValueError: If data contains non-string Tensor... | 9860442770c52b2ee029531cb18e3dbf8429bfb1 | 3,650,720 |
def get_spectra2(X, E_in, p_dict, outputs=None):
"""
Calls get_Efield() to get Electric field, then use Jones matrices
to calculate experimentally useful quantities.
Alias for the get_spectra2 method in libs.spectra.
Inputs:
detuning_range [ numpy 1D array ]
The independent variable and defines the det... | e55c9a428876e6cb41f92775d46f6c9411913e7d | 3,650,721 |
from ovos_utils.configuration import read_mycroft_config
import os
def resolve_resource_file(res_name, root_path=None, config=None):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name firs... | 798c9070f7e741947054892ebf17602a6c9ef78e | 3,650,722 |
import subprocess
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
Input
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-inp... | 4f266c728487601d522234ce24104ab762fe0147 | 3,650,723 |
import operator
def calculate_seat_district(district_deputy_number, parties, votes):
"""
Calculate seats for each party in list of parties for a district
Params:
- district_deputy_number: the number of seats for this district
- parties: list of parties
- votes: list of votes for ... | 035a167c623d14857dcefe01e4304523959857a6 | 3,650,724 |
import unittest
def dispatch():
"""Run all dispatch tests"""
suite = ServiceTestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test_dispatch'))
return suite | c0d5235bcc22429f8ed36756c4f531f014316d04 | 3,650,725 |
import torch
def get_mask(height, width, grid_size = 10):
"""
Get the location based on the image size corresponding to relu_4_2
and relu_5_1 layer for a desired grid size.
"""
print(height, width)
x_jump = int(width/grid_size)
y_jump = int(height/grid_size)
x_idx = np.linspace(int(x_j... | a63b1bf2944309f3e5720d06bb5906a059e09ea7 | 3,650,726 |
def arrivalTimes2TimeTraceBH(arrivalTimes, binLength):
"""
Convert a list of arrivalTimes to an intensity time trace I(t)
===========================================================================
Input Meaning
---------------------------------------------------------------------------
... | 0db7d8471bb5b918c66a57ace1b84871012448a4 | 3,650,727 |
from datetime import datetime
import pytz
def datetime_to_ts(dt):
"""
convert naive or aware datetime instance to timestamp in second
Args:
dt(datetime): datetime instance
Returns:
int: timestamp in second.
"""
epoch_dt = datetime.datetime.fromtimestamp(0, tz=pytz.utc)
i... | 4fd3f0e0a4a051980a6bfe3e21a3ceb4ceca6b7e | 3,650,728 |
def load_csv(source_filepath, start_date = None, end_date = None,
timestamp_index = 0,
column_map = {"bidopen": "bidopen",
"bidclose": "bidclose",
"bidhigh": "bidhigh",
"bidlow": "bidlow",
... | c5256dddcea94854c30c6b95d96557cb81003e80 | 3,650,729 |
def create_kernel(dim0, dim1):
"""Create a two-dimensional LPF kernel, with a half-Hamming window along
the first dimension and a Gaussian along the second.
Parameters
----------
dim0 : int
Half-Hamming window length.
dim1 : int
Gaussian window length.
Returns
-------
... | f19cd7a840fd1562b0c95f614415d3471e1cb4d4 | 3,650,730 |
def generate_fractal_noise_3d(
shape, res, octaves=1, persistence=0.5, lacunarity=2,
tileable=(False, False, False), interpolant=interpolant
):
"""Generate a 3D numpy array of fractal noise.
Args:
shape: The shape of the generated array (tuple of three ints).
This must be a m... | 3c20dd9c7de6e53d074926bdf87278f92f5162f4 | 3,650,731 |
def getDeltaBetweenPosition(data_outVTK, wall_outVTK, cord_choice, x_p0, x_p1, Npts, Uinf=None, Rhoinf=None):
"""
Compute the boundary layer thickness for *Npts* equally distributed between the 2 position
defined thanks to *x_p0*, *x_p1* and *cord_choice*. See the documentation of the function
getDelta... | a765dbecf1d34c8693c73ecc5250d22bf04793fa | 3,650,732 |
def get_clean_url(url):
""" Get a url without the language part, if i18n urls are defined
:param url: a string with the url to clean
:return: a string with the cleaned url
"""
url = url.strip('/')
url = '/' if not url else url
return '/'.join(url.split('/')[1:]) | 9e5d396086d6cc5169c26f6d1645dafd23a3b8d7 | 3,650,733 |
def get_fields_from_url():
"""Returns a list of fields defined in the url as expected by the RESTful standard"""
return request.args.get('fields', '').split(",") | 7e6833f10f8b32a71667402ca3f1c9f5b8648b56 | 3,650,734 |
import collections
def get_triples_processed(dcids, limit=_MAX_LIMIT):
"""
Generate the GetTriple query and send the request.
The response is processed into as triples strings. This API is used by the
pv tree tool.
"""
url = API_ROOT + API_ENDPOINTS['get_triples']
payload = send_request(u... | de143106e4f6efad89a223b5e83146bd2de170d0 | 3,650,735 |
from datetime import datetime
def cid_to_date(cid):
"""Converts a cid to date string YYYY-MM-DD
Parameters
----------
cid : int
A cid as it is generated by the function ``utils.create_cid()``
Returns
-------
str
A string formated date (e.g. YYYY-MM-DD, 2018-10-01)
"""... | ab919f9cfd5c56f6fb6b65cbae8731687fc42faf | 3,650,736 |
import requests
import html
def get_listings(max_pages=10):
"""Returns the listings from the first max_pages of craigslist."""
page = requests.get(URL)
tree = html.fromstring(page.content)
listing_xpath = '//li[@class="result-row"]'
listings = tree.xpath(listing_xpath)
# Get total number of l... | a01aadf7e1701735fe3757b92da09c6a12848c73 | 3,650,737 |
import random
def remove_edge_stochastic_function(G, parameter, prob_func, prob_func_kws={}, random_seed=None, copy=True):
"""
Recieves a Graph and p.
p is function of a defined parameter
Returns a degraded Graph
"""
if random_seed is not None:
random.seed(random_seed)
if... | e4f49a6e512f7ad0e86c0138bf76affa330ba7a5 | 3,650,738 |
def get_sh_type(sh_type):
"""Get the section header type."""
if sh_type == 0:
return 'SHT_NULL'
elif sh_type == 1:
return 'SHT_PROGBITS'
elif sh_type == 2:
return 'SHT_SYMTAB'
elif sh_type == 3:
return 'SHT_STRTAB'
elif sh_type == 4:
return 'SHT_RELA'
... | 0d95e651cc817f48178e45373b822be2eb32fbaf | 3,650,739 |
def show_warn_message(text:str, *args:str) -> str:
"""
Show a warning message.
"""
return _base("showWarningMessage", text, *args) | 2b1851aade6ff8952c27b1f63d687ae0e95ce4c2 | 3,650,740 |
def read_gmpe_file(resid_file, period):
"""
Reads the gmpe residuals file and returns all the data
"""
gmpe_data = []
# Read residuals file and get information we need
input_file = open(resid_file, 'r')
# Look over header and figure out which column contains the period
# we need to plot... | c7cb325f6c40cc23ae8fa017a6ef924fa7df2c4e | 3,650,741 |
import os
def remove_sep(path, new_sep='--'):
"""Convert a real path into pseudo-path."""
return path.replace(os.sep, new_sep) | b7757ee861cd58dbbf2526455831ea6c5613fe27 | 3,650,742 |
def get_types(name=None):
"""Retrieves the list of device types in the system.
Note that we actually use the "GET device-families" endpoint, as this returns a complete list in one request.
"""
all_types = []
all_families = get_families(name=None, includeTypes=True)
for family in all_families:
... | dad6ff4fd938aa63e8c42dbf0e70ede0786bd458 | 3,650,743 |
import json
def read_socket(sock, buf_len, echo=True):
""" Read data from socket and return it in JSON format """
reply = sock.recv(buf_len).decode()
try:
ret = json.loads(reply)
except json.JSONDecodeError:
print("Error in reply: ", reply)
sock.close()
raise
if ech... | 07d7100ed8c1c9d22307ce293e10b2a0cd5849c6 | 3,650,744 |
def temperature_source_function(
rho,
district,
norm,
density: xr.DataArray,
etemp: xr.DataArray,
itemp: xr.DataArray,
density_source: xr.DataArray,
source_strength,
source_centre=0.3,
source_width=0.3,
):
"""
Smooth-step core power injection, mimicking Ohmic power deposi... | ba7836ce3f9d2e523dca4a6bf33999a116f20920 | 3,650,745 |
from pathlib import Path
import requests
def mock_dbt_cloud_response(
monkeypatch: MonkeyPatch,
dbt_manifest_file: Path,
dbt_run_results_file: Path,
) -> None:
"""
Mock the dbt cloud response.
Parameters
----------
monkeypatch : MonkeyPatch
The monkey patch fixture.
dbt_ma... | 1ae3a38c36a2f1468287ed587ee7495e40ceee74 | 3,650,746 |
async def model_copy(request, model_id):
""" route for copy item per row """
request_params = {elem: request.form[elem][0] for elem in request.form}
base_obj_id = utils.extract_obj_id_from_query(request_params["_id"])
try:
new_obj_key = await create_object_copy(
model_id, base_obj_id... | 2fd7ed81d10d40cd58c859e3b60de315404d9f4d | 3,650,747 |
def load_pr(fname):
"""Loads predicted tracks in tabular format."""
try:
data = np.loadtxt(fname, delimiter=',', dtype=np.float64, ndmin=2)
except (ValueError, IndexError):
# Try using whitespace delim (default).
data = np.loadtxt(fname, delimiter=None, dtype=np.float64, ndmin=2)
# If category is no... | 63fd8422adb170ccc2e97d32b9be04efe86fa72d | 3,650,748 |
def build_affine(rotation, scale, origin):
"""
Compute affine matrix given rotation, scaling, and origin.
Parameters
----------
rotation : np.array
rotation
scale : np.array
scale factor
Returns
-------
aff : np.array [4x4]
affine matrix
"""
aff = n... | 927016b37f3c1ea2c7b3720cb7b31f246784258a | 3,650,749 |
from operator import concat
def sample_per_group(data, group_by, ratio = None, n = None):
"""
:type data: DataFrame
:type group_by: list of str
:type ratio: float
:type num_rows: int
:return:
"""
# group the data
data = data.copy()
"""
:type data: DataFrame
"""
data['__order1'] = data.index
grouped = ... | ca92f79263f428aeafb07ac9efb4db65145a3113 | 3,650,750 |
def get_base64_column(metadata_df: pd.DataFrame) -> pd.DataFrame:
"""
Get accession json base64 str
:return:
"""
# Get accession json object as base64 string
metadata_df['accession_json_base64_str'] = metadata_df[METADATA_PAYLOAD_COLUMNS].\
apply(lambda x: b64encode(bytes(x.to_json(), en... | a68f9d735a4ac1612cb4fea2e83cac903e8111c8 | 3,650,751 |
def bible_studies_view(request):
"""Bible studies view."""
auth = False
try:
auth = request.cookies['auth_tkt']
auth_tools = request.dbsession.query(
MyModel
).filter(MyModel.category == 'admin').all()
except KeyError:
auth_tools = []
query = request.dbses... | c69a5291cbcb8267c0606fef2cae620878cca97b | 3,650,752 |
def get_num_channels(inputs):
""" Get number of channels in one tensor. """
return inputs.shape[1] | 6fb42e60714dc81f03b29ad87b73b41027056472 | 3,650,753 |
def single_face_marker():
"""
Face marker with a single value.
"""
return np.zeros((2, 3)).astype(int) | 2d126b031a1809a8d95df00ccada79ba6191fa61 | 3,650,754 |
def skew(arr, angle, dx=None, dy=None, fwd=True, fill_min=True):
"""
Skew the origin of successive lines by a specified angle
A skew with angle of 30 degrees causes the following transformation:
+-----------+ +---------------+
| | |000/ /|
| input ... | e25bb8632c6e86c84ab5e5a5bae56fd6f24e7c0d | 3,650,755 |
def websocket_call(configuration, _method, url, **kwargs):
"""An internal function to be called in api-client when a websocket
connection is required. method, url, and kwargs are the parameters of
apiClient.request method."""
url = get_websocket_url(url, kwargs.get("query_params"))
headers = kwargs... | add109d9caa80b74cb28754792033577e7b70ef3 | 3,650,756 |
def compute_metrics(feats, pids, camids, num_query):
""" Compute CMC and mAP metrics """
# query
qf = feats[:num_query]
q_pids = np.asarray(pids[:num_query])
q_camids = np.asarray(camids[:num_query])
# gallery
gf = feats[num_query:]
g_pids = np.asarray(pids[num_query:])
g_camids = np... | 4bd35b6e4c7ede5a54685822feeb6264e3fd7275 | 3,650,757 |
from datetime import datetime
def parse_date(string_date: str) -> datetime.datetime:
"""
Parses input string of format 'MMM-yyyy' to datetime.
:param str string_date: Date in string format 'MMM-yyyy'
:return: datetime.datetime: parsed datetime
"""
return datetime.datetime.strptime(string_dat... | 2d2b3b5332ca354e1600a25495dadd1dced31479 | 3,650,758 |
import math
def round_vzeros(v,d=10) :
"""Returns input vector with rounded to zero components
which precision less than requested number of digits.
"""
prec = pow(10,-d)
vx = v[0] if math.fabs(v[0]) > prec else 0.0
vy = v[1] if math.fabs(v[1]) > prec else 0.0
vz = v[2] if math.fabs(v[2... | aa16175bf1176383ef255460767502104be2566e | 3,650,759 |
def euclidean_distance_loss(params, params_prev):
"""
Euclidean distance loss
https://en.wikipedia.org/wiki/Euclidean_distance
:param params: the current model parameters
:param params_prev: previous model parameters
:return: float
"""
return K.sqrt(K.sum(K.square(params - params_prev), ... | 5c5366a7c60faa783ab55f85d758b7d40ff5627f | 3,650,760 |
import re
import shlex
def read_cloudflare_api_file(prog, file, state):
"""Read the input file for Cloudflare login details.
Args:
prog (State): modified if errors encountered in opening or reading
the file.
file (str): the file to read.
state (ConfigState): to record conf... | 39d5fe28f348e9e3285f55cff22f025a86f41715 | 3,650,761 |
import hmac
def hash_msg(key, msg):
"""Return SHA1 hash from key and msg"""
return b64encode(hmac.new(key, msg, sha1).digest()) | 88ad5f5a2a2cad8657440b72d086c541d357ed05 | 3,650,762 |
def insert_pattern(base, pattern, offset=None): #optional!
"""
Takes a base simulation field and places a given pattern with an offset
onto it. When offset is None, the object is placed into the middle
Parameters
----------
base : numpy.ndarray
The base simulation field. Can already hol... | ba72d5c37f9ba06cd5bd3b3803147b873bc2c742 | 3,650,763 |
def get_specific_label_dfs(raw_df, label_loc):
"""
Purpose: Split the instances of data in raw_df based on specific labels/classes
and load them to a dictionary structured -> label : Pandas Dataframe
Params: 1. raw_df (Pandas Dataframe):
- The df containing data
... | 756f03f845da64f6fd5534fb786966edb8610a13 | 3,650,764 |
def run(_):
"""
Meant for running/parallelizing training data preparation
:param _: Not used
:return: Runs prep() function
"""
return prep() | 05f2c78a681d1c432ad6b55e8b43bea44fc6242c | 3,650,765 |
def cached_property_named(name, kls=_internal_jit_attr, use_cls_setattr=False):
"""
variation of `cached_property`, just with the ability to explicitly set the attribute name
Primarily of use for when the functor it's wrapping has a generic name (
`functools.partial` instances for example).
Example... | b132cf46f07ac6aa6e6806c81b4e4a4e75ff9b4a | 3,650,766 |
def remix(tracks, n_tracks=50, n_steps=60):
"""Return new tracks generated by remixing given tracks"""
time_step = int(
np.round(next(dt for dt in sorted(tracks["Time"].diff()) if dt > 0) * 60)
)
print(
"Generating {} steps from {} steps {}s apart.".format(
n_tracks * n_steps... | 0546af43785231fbe9c813dd54cdbbd2cbd12874 | 3,650,767 |
from datetime import datetime
import logging
def prohibition_served_recently(**args) -> tuple:
"""
Returns TRUE if the prohibition was served within the previous 3 days;
otherwise returns FALSE
"""
date_served_string = args.get('date_of_service')
config = args.get('config')
delay_days = in... | fbdc32a37fd9c7cc975927309181b1ee46a4b4f2 | 3,650,768 |
import json
def project_api(request):
"""
创建项目接口
"""
if not request.user.has_perm('home_application.can_add_project'):
return render(request, '403.html')
if request.method == 'POST':
groupId=request.POST.get('group-id','')
projectName=request.POST.get('project... | 1850bf158638c7fcd8c9816fd0832818d67e27d7 | 3,650,769 |
def impute_missing_values(model,
observed_time_series,
parameter_samples,
include_observation_noise=False):
"""Runs posterior inference to impute the missing values in a time series.
This method computes the posterior marginals `p(latent... | 622546c31a10527aa0429c300e059aee69f2bb96 | 3,650,770 |
def grad_z(y, z, axis=0):
"""
Compute the vertical gradient
"z" can be an array same size as y, or vector along the first axis of "y"
Takes the derivative along the dimension specified by axis(=0)
"""
Nz = z.shape[0]
# Reshape the y variable
y = y.swapaxes(0, axis)
#assert y.shape... | 8558110580476a509735ebdc8db011806c4266fa | 3,650,771 |
def wang_ryzin_reg(h, Xi, x):
"""
A version for the Wang-Ryzin kernel for nonparametric regression.
Suggested by Li and Racine in [1] ch.4
"""
return h ** abs(Xi - x) | f87c15df408c9307c82a7bc0ab7bb700cac71f41 | 3,650,772 |
def get_all_input_values(corpus_weights):
"""
Returns all relevant input values
"""
azerty = get_azerty()
letters = get_letters()
characters = get_characters()
keyslots = get_keyslots()
similarity_c_c = get_character_similarities()
similarity_c_l = get_character_letter_similarities(... | f7b1054f34a11cde66c0506f1fb38d663ffc3645 | 3,650,773 |
def bulk_rename(doctype, rows=None, via_console = False):
"""Bulk rename documents
:param doctype: DocType to be renamed
:param rows: list of documents as `((oldname, newname), ..)`"""
if not rows:
frappe.throw(_("Please select a valid csv file with data"))
if not via_console:
max_rows = 500
if len(rows) >... | c495a70ff7becf076c42ded40ab6c4497e047c9c | 3,650,774 |
def mag_to_flux_AB(mag, mag_err):
"""Calculate flux in erg s-1 cm-2 Hz-1."""
flux = 10 ** (-.4 * (mag + 48.6))
flux_err = abs(-.4 * flux * sp.log(10) * mag_err)
return flux, flux_err | 1d56802fe2803d3ed2fa50c3e6ce06ebcb3add01 | 3,650,775 |
def django_admin_navtree(request, context):
"""show menu"""
if request and request.user.is_staff:
coop_cms_navtrees = context.get('coop_cms_navtrees', None) or []
tree_class = get_navtree_class()
admin_tree_name = "{0}_{1}".format(get_model_app(tree_class), get_model_name(tree_class))
... | 5ba3f27c09146e72cb4893529985e818295f63c0 | 3,650,776 |
from typing import Counter
def get_pos_tags(student_comment: str) -> pd.DataFrame:
"""Get the POS (part of speech) tags for each of the words in the student
comments
Keyword arguments
student_comment -- a spacy.tokens.doc.Doc object
"""
# Count how many of each pos tags are in each c... | f04cc91a7a41d65a69ee6ef8d730b98f2ad2ed6c | 3,650,777 |
import hashlib
import base64
def compute_descriptor_digest(fields, descriptors, entry, flavor):
"""
(details of the parser – private API)
Plugs into our consumer to compute extra "digest" fields that expose
the (micro-)descriptor's (micro-)digest, enabling us to easily fetch
assoc... | 53193406232cf564fe42758e9feac651a5fce7e0 | 3,650,778 |
def get_axis(array, axis, slice_num):
"""Returns a fixed axis"""
slice_list = [slice(None)] * array.ndim
slice_list[axis] = slice_num
slice_data = array[tuple(slice_list)].T # transpose for proper orientation
return slice_data | 558d4f8f8725c752c225e6958881fc18eeeab35e | 3,650,779 |
import os
def test_integrity(param_test):
"""
Test integrity of function
"""
# open result file
f = open(os.path.join(param_test.path_output, 'ernst_angle.txt'), 'r')
angle_result = float(f.read())
f.close()
# compare with GT
if abs(angle_result - param_test.angle_gt) < pa... | 7498c00ec62acb9006cb8ac8f041fc64647140e6 | 3,650,780 |
import pandas as pd
def expand_name_df(df,old_col,new_col):
"""Takes a dataframe df with an API JSON object with nested elements in old_col,
extracts the name, and saves it in a new dataframe column called new_col
Parameters
----------
df : dataframe
old_col : str
new_col : str
Retu... | d39209f71719afa0301e15d95f31d98b7949f6b3 | 3,650,781 |
def image_rpms_remove_if_exists(rpmlist):
"""
`image.rpms_remove_if_exists(["baz"])` removes `baz.rpm` if exists.
Note that removals may only be applied against the parent layer -- if your
current layer includes features both removing and installing the same
package, this will cause a build failure.
"""
re... | b0de424627b5010e8d6d351280418425cde7981a | 3,650,782 |
import itertools
def expand_set(mySet):
""" pass in a set of genome coords, and it will 'expand' the indels
within the set by adding +/- 3 bp copies for each one """
returnSet = []
for entry in mySet:
l0 = []
l1 = []
try:
sub0 = entry.split('-')[0] # split on `-`
sub1 = entry.split('-')[1] # this gu... | 4ccbff705654b5f5b89c59bb13df9fad6cba42db | 3,650,783 |
def trapezoid(t, depth, bigT, littleT):
"""Trapezoid shape for model
INPUT:
t - [float] vector of independent values to evaluate
trapezoid model
depth - [float] depth of trapezoid
bigT - [float] full trapezoid duration
littleT - [float] 'ingress/egress' durati... | 18021c6aff07931d6086d394a77f5ab8ce460f78 | 3,650,784 |
import re
def clean_username(username=''):
""" Simple helper method to ensure a username is compatible with our system requirements. """
return ('_').join(re.findall(r'[a-zA-Z0-9\-]+', username))[:USERNAME_MAX_LENGTH] | 98e563eaa04ce98b4f4d71b0b6229ca62324011a | 3,650,785 |
import ast
def hy_compile(tree, module_name, root=ast.Module, get_expr=False):
"""
Compile a HyObject tree into a Python AST Module.
If `get_expr` is True, return a tuple (module, last_expression), where
`last_expression` is the.
"""
body = []
expr = None
if not isinstance(tree, HyO... | 0aea27067aae9d517ada9a7936b2ad29506346a5 | 3,650,786 |
import argparse
def make_expt_parser():
"""
Parses arguments from the command line for running experiments
returns
args (argparse NameSpace)
"""
parser = argparse.ArgumentParser(
description='energy_py dict expt parser'
)
# required
parser.add_argument('expt_name', ... | 4b9b4b29bdb1507928f6c9a45ed354fd6dac493d | 3,650,787 |
def user_dss_clients(dss_clients, dss_target):
"""
Fixture that narrows down the dss clients to only the ones that are relevant considering the curent DSS target.
Args:
dss_clients (fixture): All the instanciated dss client for each user and dss targets
dss_target (fixture): The considered ... | 7d418b49b68d7349a089046837f3c8351c0dcc67 | 3,650,788 |
def build_log(x: np.ndarray) -> np.ndarray:
"""
Logarithmic expansion.
:param x: features
:return: augmented features
"""
expanded = np.ones((x.shape[0], 1))
expanded = np.hstack((expanded, np.nan_to_num(np.log(x))))
return expanded | 7d46b9e7ca4dd7af99da97e3d1faa67f18894fe5 | 3,650,789 |
def all_multibert_finetune_glue(m:Manager, task_name:str='MRPC')->BertGlue:
""" Finetune milti-lingual base-BERT on GLUE dataset
Ref. https://github.com/google-research/bert/blob/master/multilingual.md
"""
refbert=all_fetch_multibert(m)
refglue=all_fetchglue(m)
vocab=mklens(refbert).bert_vocab.refpath
gl... | 18785c804e9539922cc8fdae2517e2c9221f5d13 | 3,650,790 |
from typing import Union
import hashlib
def hash_eth2(data: Union[bytes, bytearray]) -> Hash32:
"""
Return SHA-256 hashed result.
Note: this API is currently under active research/development so is subject to change
without a major version bump.
Note: it's a placeholder and we aim to migrate to ... | fa43689abac95e54e984f67c623c2242d19bb52c | 3,650,791 |
import zipfile
def read_data(data_dir="../main/datasets/", data_file=DATA_FILE):
"""Returns the data, in order infos, items, orders"""
with zipfile.ZipFile(data_dir+DATA_FILE) as z:
dfs = []
for name in ["infos", "items", "orders"]:
dfs.append(pd.read_csv(z.open(f"1.0v/{name}.csv")... | c85f730bfd10fe2f3830ed9d29443b2f99c8deb6 | 3,650,792 |
def get_sprints(root_project_id, rally_number=None):
"""Get list of sprint projects.
Args:
root_project_id: Synapse Project ID with admin annotations,
including the sprint table ID.
rally_number: An integer rally number. If None, return sprints
... | 530ea32640ccfa44b3493eef470a226631e3dd34 | 3,650,793 |
from cleverhans_tutorials.tutorial_models import make_scaled_binary_rand_cnn
from cleverhans_tutorials.tutorial_models import make_basic_binary_cnn
from cleverhans_tutorials.tutorial_models import make_basic_cnn
from cleverhans.attacks import MadryEtAl
from cleverhans.attacks import FastGradientMethod
import os
def p... | 089ab3b22e00a5c978c4f6b114a9e4e3eb4b402e | 3,650,794 |
def out_of_bounds(maze: Array, x: int, y: int):
""" Return true if x, y is out of bounds """
w, h = maze.shape
is_x_out = (x < 0) + (x >= w)
is_y_out = (y < 0) + (y >= h)
return is_x_out + is_y_out | e1572dedc76ad979d5053b966b313a31cf22b257 | 3,650,795 |
import json
def cache_set(apollo_client, name, val):
"""
保存数据到redis
:return:
"""
r = redis_handler(apollo_client)
try:
res = r.set(name=name, value=json.dumps(val))
except Exception as e:
logger.error("Storage {} to cache failed!{}".format(name, e.__str__())... | d849ab7d0d65a445530dc61d427571d2bdd97a76 | 3,650,796 |
def guess_pyramid(data):
"""If shape of arrays along first axis is strictly decreasing.
"""
# If the data has ndim and is not one-dimensional then cannot be pyramid
if hasattr(data, 'ndim') and data.ndim > 1:
return False
size = np.array([np.prod(d.shape, dtype=np.uint64) for d in data])
... | f63dffe0c0ac5b36b752a7aeaa8baf4b4cc8480a | 3,650,797 |
def create_menu(menu_items, parent=None):
"""
Create the navigation nodes based on a passed list of dicts
"""
nodes = []
for menu_dict in menu_items:
try:
label = menu_dict['label']
except KeyError:
raise ImproperlyConfigured(
"No label specifi... | 0f54399e2101d6e3e4eff094041c92ea7d8eb069 | 3,650,798 |
def get_tg_ids(db):
"""Obtain a list of recognized Telegram user IDs.
Args:
db: Database connector
Returns:
Query results for later iteration
"""
return db.query(QUERY_TG_IDS) | 0b4a2fccbd42024f53a970d2e1cf52481480230d | 3,650,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.