content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import random
def ports_info(ptfadapter, duthost, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
... | 14aef7e68386872a1d960329f2f8bee452aa9e29 | 3,647,800 |
def test_text_single_line_of_text(region, projection):
"""
Place a single line text of text at some x, y location.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="This is a line of text",
)
return fig | 0e82165a2717fe9279015d3823b717a870b94e05 | 3,647,801 |
def safely_get_form(request, domain, instance_id):
"""Fetches a form and verifies that the user can access it."""
form = get_form_or_404(domain, instance_id)
if not can_edit_form_location(domain, request.couch_user, form):
raise location_restricted_exception(request)
return form | b3ba8da253a6455f5aeb65f828f8c28c826ac2d2 | 3,647,802 |
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary... | 9c0da64f5796f57d474822121e1af5ca8ebb25e2 | 3,647,803 |
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = ... | d346fb75f5ff872147a166948af65bb52bab739c | 3,647,804 |
import torch
def calculate_regularization_term(means, n_objects, norm):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
reg_term = 0.0
for i in range(bs):
if n_objects[i]:
_mean_sample = means[i, : n_objects[i], :] # n_objects, n_filters
... | b6eb43a8915449c7e86d01a08b3ea2e77ae51064 | 3,647,805 |
from typing import Union
from typing import Sequence
def plot_timeseries_histograms(
axes: Axes,
data: pd.DataFrame,
bins: Union[str, int, np.ndarray, Sequence[Union[int, float]]] = "auto",
colormap: Colormap = cm.Blues,
**plot_kwargs,
) -> Axes: # pragma: no cover
"""Generate a heat-map-like... | 5f207097478f73d969e1f85f0aa2bbe5f894f038 | 3,647,806 |
def mode(x):
""" Find most frequent element in array.
Args:
x (List or Array)
Returns:
Input array element type: Most frequent element
"""
vals, counts = np.unique(x, return_counts=True)
return vals[np.argmax(counts)] | b73bf301ca9ebf45f3a6698f8b6d45a5640cb301 | 3,647,807 |
def has_path(matrix, path: str) -> bool:
"""
Given a matrix, make sure there is a path for a given string or not.
Parameters
----------
path: str
A given path, like "abcd"
Returns
-------
out: bool
Whether the given path can be found in the matrix
"""
if not pa... | bbde72992b762dd73c44c60da675da829255000d | 3,647,808 |
def gensim_processing(data):
"""
Here we use gensim to define bi-grams and tri-grams which enable us to create a create a dictonary and corpus
We then process the data by calling the process_words function from our utils folder
"""
#build the models first
bigram = gensim.models.Phrases(data, mi... | 67a4d9a90c8ea9809980d9871b769288915fe3cc | 3,647,809 |
import os
import re
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\... | 9d89dacffe7ce865f2cb0702ccc37a57365af594 | 3,647,810 |
import os
def image_upload_to(instance, filename):
"""Create the path where to store the files.
If the file instance is a Sponsor, the file has to be the logo so it will be uploaded to
MEDIA_ROOT/sponsors/<sponsor_name>/logo<ext>.
"""
logger.debug("Hello!")
path = None
basename, ext =... | a82dc1bbdcfdb071ce0c578930e00ada64206673 | 3,647,811 |
def _distances(value_domain, distance_metric, n_v):
"""Distances of the different possible values.
Parameters
----------
value_domain : array_like, with shape (V,)
Possible values V the units can take.
If the level of measurement is not nominal, it must be ordered.
distance_metric : ... | 90c362db28497569a50475d7f6040755b1cfffea | 3,647,812 |
import torch
import math
def log_mvn_likelihood(mean: torch.FloatTensor, covariance: torch.FloatTensor, observation: torch.FloatTensor) -> torch.FloatTensor:
"""
all torch primitives
all non-diagonal elements of covariance matrix are assumed to be zero
"""
k = mean.shape[0]
variances = covaria... | 6333ea91ddff9ac685f18954c5b7344846810ec3 | 3,647,813 |
def M_Mobs(H0, M_obs):
"""
Given an observed absolute magnitude, returns absolute magnitude
"""
return M_obs + 5.*np.log10(H0/100.) | e7f817eaf281f2dd64f33ea4af44cd1cf9da31fa | 3,647,814 |
def generate_proctoring_requirements_email_context(user, course_id):
"""
Constructs a dictionary for use in proctoring requirements email context
Arguments:
user: Currently logged-in user
course_id: ID of the proctoring-enabled course the user is enrolled in
"""
course_module = modu... | fc594882b68b7f1f554fa1681943d49b722ae229 | 3,647,815 |
import random
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s) | 0ba9dd533da44bc2051a7076b775177f29f4aaa6 | 3,647,816 |
def get_one_hot(inputs, num_classes):
"""Get one hot tensor.
Parameters
----------
inputs: 3d numpy array (a x b x 1)
Input array.
num_classes: integer
Number of classes.
Returns
-------
One hot tensor.
3d numpy array (a x b x n).
"""
onehots = ... | 2f4a8b3a60a90a8f81579dd5938a1bab91cb5537 | 3,647,817 |
def one_hot_encoder(batch_inds, num_categories):
"""Applies one-hot encoding from jax.nn."""
one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)
return one_hots | 85c15859555ee1bdec64adc627f34cc161c7e66c | 3,647,818 |
def part1(entries: defaultdict) -> int:
"""part1 solver take the entries and return the part1 solution"""
return calculate(entries, 80) | a35a559395f0c53eeac4600aaa28bc04d3e1766f | 3,647,819 |
def ceki_filter(data, bound):
""" Check if convergence checks ceki are within bounds"""
ceki = data["ceki"].abs() < bound
return ceki | 09cd53f44241b13cf77eb2299c802ed238580259 | 3,647,820 |
def get_middleware(folder, request_name, middlewares=None):
""" Gets the middleware for the given folder + request """
middlewares = middlewares or MW
if folder:
middleware = middlewares[folder.META.folder_name + "_" + request_name]
else:
middleware = middlewares[request_name]
if mi... | 720aafa5a3d0ef265eeaa8fe40a68c7024b0adc3 | 3,647,821 |
from typing import Dict
import types
from typing import List
def convert_dm_compatible_observations(
observes: Dict,
dones: Dict[str, bool],
observation_spec: Dict[str, types.OLT],
env_done: bool,
possible_agents: List,
) -> Dict[str, types.OLT]:
"""Convert Parallel observation so it's dm_env ... | 8dfe814037144e2da74375b0767f5dcde95ae44f | 3,647,822 |
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a | 8337cbef8459a1403fc6a681f89c14d6ae3a00a5 | 3,647,823 |
import torch
def accuracy(output, target, topk=(1,), output_has_class_ids=False):
"""Computes the accuracy over the k top predictions for the specified values of k"""
if not output_has_class_ids:
output = torch.Tensor(output)
else:
output = torch.LongTensor(output)
target = torch.LongT... | f702000a64db1bb6f53b7686f1143656f9864e8d | 3,647,824 |
def masked_residual_block(c, k, nonlinearity, init, scope):
"""
Residual Block for PixelCNN. See https://arxiv.org/abs/1601.06759
"""
with tf.variable_scope(scope):
n_ch = c.get_shape()[3].value
half_ch = n_ch // 2
c1 = nonlinearity(c)
c1 = conv(c1, k=1, out_ch=half_ch, ... | ffd4bb042affc0250472d50b6b824be66f808878 | 3,647,825 |
def calculate_lookup(src_cdf: np.ndarray, ref_cdf: np.ndarray) -> np.ndarray:
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup... | f1433e6af001ddcda44c740dabfb1ee643cd2260 | 3,647,826 |
def measureInTransitAndDiffCentroidForOneImg(prfObj, ccdMod, ccdOut, cube, rin, bbox, rollPhase, flags, hdr=None, plot=False):
"""Measure image centroid of in-transit and difference images
Inputs:
-----------
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD m... | 655477460e5841736f07106d5e6afd666d95f450 | 3,647,827 |
def readGlobalFileWithoutCache(fileStore, jobStoreID):
"""Reads a jobStoreID into a file and returns it, without touching
the cache.
Works around toil issue #1532.
"""
f = fileStore.getLocalTempFile()
fileStore.jobStore.readFile(jobStoreID, f)
return f | 8c784e809acdc1a7fb3d8c108f85ce61bd1ad11c | 3,647,828 |
def get_user_granted_assets_direct(user):
"""Return assets granted of the user directly
:param user: Instance of :class: ``User``
:return: {asset1: {system_user1, system_user2}, asset2: {...}}
"""
assets = {}
asset_permissions_direct = user.asset_permissions.all()
for asset_permission in... | 602bd104835cc85dcf59339c8b4b2e2e2b5f747b | 3,647,829 |
def nullColumns(fileHeaders, allKeys):
"""
Return a set of column names that don't exist in the file.
"""
s1 = set(fileHeaders)
s2 = set(allKeys)
return s2.difference(s1) | 17a0bb80414fe88f213399958b217ccf6fb5d1e9 | 3,647,830 |
def listable_attachment_tags(obj, joiner=" "):
"""
Return an html string containing links for each of the attachments for
input object. Images will be shown as hover images and other attachments will be
shown as paperclip icons.
"""
items = []
attachments = obj.attachment_set.all()
labe... | b2fa3fd249469334e42616f0e4392ce16d4076d1 | 3,647,831 |
import math
def distance_km(lat1, lon1, lat2, lon2):
""" return distance between two points in km using haversine
http://en.wikipedia.org/wiki/Haversine_formula
http://www.platoscave.net/blog/2009/oct/5/calculate-distance-latitude-longitude-python/
Author: Wayne Dyck
"""
ret_val = ... | f50d444b5769b1d00045429e3d577ec22f922774 | 3,647,832 |
def _flip(r, u):
"""Negate `r` if `u` is negated, else identity."""
return ~ r if u.negated else r | 18ddcf5132867f5646c729bdadcb2c5077df8c03 | 3,647,833 |
def get_arguments():
"""Defines command-line arguments, and parses them."""
parser = ArgumentParser()
# Execution mode
parser.add_argument(
"--mode",
"-m",
choices=['train', 'test', 'full'],
default='train',
help=(
"train: performs training and valida... | 5385c75524460ed4968def0ab98fc29112d72434 | 3,647,834 |
def twoThreeMove(tri, angle, face_num, perform = True, return_edge = False):
"""Apply a 2-3 move to a taut triangulation, if possible.
If perform = False, returns if the move is possible.
If perform = True, modifies tri, returns (tri, angle) for the performed move"""
face = tri.triangle(face_num)
... | 18abe14b2b8446d39e285f1facda82568b808b60 | 3,647,835 |
import csv
def obterUFEstadoPorNome(estado):
"""
Retorna o codigo UF do estado a partir do nome do estado
:param estado: Nome do estado
:return codigoDoEstado: Código UF do estado
"""
try:
with open("./recursos/estados.csv", newline="") as csvfile:
reader = csv.DictReader(c... | 9b136fe8c557e5f75bca235cf66168f92244a4e6 | 3,647,836 |
import random
def get_random_byte_string(byte_length):
""" Use this function to generate random byte string
"""
byte_list = []
i = 0
while i < byte_length:
byte_list.append(chr(random.getrandbits(8)))
i = i + 1
# Make into a string
byte_string = ''.join(byte_list)
retur... | 0ea923a045beb476501dc3d8983f3fe89efef008 | 3,647,837 |
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Complexity Analysis:
Best case: O(t)
Worst Case: O(t)
In the best case the pattern is the empty string(''). In that scenario
thi... | 0101efe77570b5d027928495dc25cb4e02d5c2f5 | 3,647,838 |
def is_igb(request):
"""
Checks the headers for IGB headers.
"""
if 'HTTP_EVE_TRUSTED' in request.META:
return True
return False | 1e6485614063a9f4eec36407b60154300d38db76 | 3,647,839 |
from typing import OrderedDict
from re import T
def compile_ADAM_train_function(model, gparams, learning_rate=0.001, b1=0.9, b2=0.999, e=1e-8,
gamma=1 - 1e-8):
"""
ADAM update rules
Default values are taken from [Kingma2014]
References:
[Kingma2014] Kingma, Diederi... | a60f27c3b314d3adc2ec2f7bb0f8c92875d7625b | 3,647,840 |
import subprocess
import os
def Runge_Kutta_Fourth_Order(inputs, coordinate_file, temperature, zeta=-1., **keyword_parameters):
"""
This function determines the gradient of thermal expansion of a strucutre between two temperatures using
a forth order Runge-Kutta numerical analysis
:param Method: Gradi... | 72c539637359f9f4eaef06ac13ae8e24c4516f8d | 3,647,841 |
import os
def wooqi_conf():
"""
Wooqi configuration file read from specific project which is using wooqi
Return a dictionary containing all configuration attributes
"""
config_file_path = '{}/wooqi_conf.cfg'.format(os.getcwd())
if os.path.isfile(config_file_path):
config = read_cfg(con... | b24dda5d7c9376102af114d6a692c5be721e7866 | 3,647,842 |
def linear_svr_pred(X_train, Y_train):
"""
Train a linear model with Support Vector Regression
"""
svr_model = LinearSVR(random_state=RANDOM_STATE)
svr_model.fit(X_train, Y_train)
Y_pred = svr_model.predict(X_train)
return Y_pred | 336325ec53da4d4008c3219aa737365a40263bdf | 3,647,843 |
import math
def area(rad: float = 1.0) -> float:
"""
return area of a circle
>>> area(2.0)
3.141592653589793
>>> area(3.0)
7.0685834705770345
>>> area(4.0)
12.566370614359172
"""
return rad * rad * math.pi / 4 | 702fc4a9fa370804d88d1182f966890bc0634466 | 3,647,844 |
import requests
import json
def check_coverage_running(url, coverage_name):
"""
Check if Navitia coverage is up and running
:param url: Navitia server coverage url
:param coverage_name: the name of the coverage to check
:return: Whether a Navitia coverage is up and running
"""
_log.info("c... | 3d3d9b1403c541aa0cdb8867845b21bf387431fb | 3,647,845 |
import random
def make_random_board(row_count, col_count, density=0.5):
"""create a random chess board with given size and density"""
board = {}
for row_num in range(row_count):
for col_num in range(col_count):
factor = random.random() / density
if factor >= 1:
... | ea40883989675c99aa70af0b180957aa677233a5 | 3,647,846 |
def create_roots(batch_data):
"""
Create root nodes for use in MCTS simulation. Takes as a parameter a list of tuples,
containing data for each game. This data consist of: gametype, state, type of player 1
and type of player 2
"""
root_nodes = []
for data in batch_data:
game = data[0... | d07b0781605b01d08c9ef78f30dad9254ade9907 | 3,647,847 |
def _parse_crs(crs):
"""Parse a coordinate reference system from a variety of representations.
Parameters
----------
crs : {str, dict, int, CRS}
Must be either a rasterio CRS object, a proj-string, rasterio supported
dictionary, WKT string, or EPSG integer.
Returns
-------
... | 559692b146ec99a9fe5407c8bca340c72dddf0a5 | 3,647,848 |
def hs_instance_get_all(context):
"""Get a list of hyperstash instances."""
return IMPL.hs_instance_get_all(context) | e09991f71e3713eea96956306a1ab4813bfb8b1a | 3,647,849 |
import importlib
def import_from_file(module_name: str, filepath: str):
"""
Imports a module from file.
Args:
module_name (str): Assigned to the module's __name__ parameter (does not
influence how the module is named outside of this function)
filepath (str): Path to the .py fil... | 89ac082cbc7d3dd5d9158a8cc8eb5ef061c444e6 | 3,647,850 |
def plot_chirp(stim_inten, spike_bins, smooth=True, ax=None):
"""
Plot the response to a chirp stimulus (but could be any repeated stimulus, non-shuffled).
The response is plotted with seaborn's lineplot.
params:
- stim_inten: The whole stimulus intensity
- spike_bins: The cell's respon... | 75fe6defcb23a2c59e2241c9a68bf753dc6828b7 | 3,647,851 |
def _cigar_convert(cigar, chromosome, vci_file, strand='+', position=0):
"""
PHASE 1
Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
For example, depending on the Intervals in the CHAIN file, let's say we have the following
CIGAR string: 35M49N65M
This ... | 5c7a0ea83d4959d87c03d0cbfe28d5bdbe02b97e | 3,647,852 |
import argparse
def get_parser():
"""Creates an ArgumentParser object."""
parser = argparse.ArgumentParser(
"clinker",
description="clinker: Automatic creation of publication-ready"
" gene cluster comparison figures.\n\n"
"clinker generates gene cluster comparison figures from ... | 881c7bc495edd37011c07d5db6ac80c816855f4a | 3,647,853 |
def init_context_processor(app):
"""定义html模板方法"""
@app.context_processor
def pjax_processor():
"""
pjax处理器
"""
def get_template(base, pjax=None):
pjax = pjax or 'pjax.html'
if 'X-PJAX' in request.headers:
return pjax
else:... | 6b5cf03ec48a1b1324a158388098da5e4884286f | 3,647,854 |
def tiered(backup_tier, R):
"""Returns a tier aware checker.
The returned checker ensures that it's possible to construct a set
(of length R) including given set s that will contain exactly one
node from the backup tier.
`backup_tier` is a list of node ids that count as backups.
A typical inv... | ecde647738fad88ea806948a0df7bee22a73abfa | 3,647,855 |
def ls_chebyshev( A, b, s_max, s_min, tol = 1e-8, iter_lim = None ):
"""
Chebyshev iteration for linear least squares problems
"""
A = aslinearoperator(A)
m, n = A.shape
d = (s_max*s_max+s_min*s_min)/2.0
c = (s_max*s_max-s_min*s_min)/2.0
theta = (1.0-s_min/s_max)/(1... | 05e50ac0167d1ed03ae3e9fa6876c94a50db7893 | 3,647,856 |
def compute_confusion_matrix(args, df_inference, strata):
"""From a list of prediction summary (as produced by get_cloud_prediction_summary), compute a confusion matrix."""
y_true = df_inference["vt_" + strata].values
y_predicted = df_inference["pred_" + strata].values
y_true = np.vectorize(get_closes... | 0662638c4db5ee9e1d94b1e582d9b0824eefd3ff | 3,647,857 |
def get_metadata(**kwargs):
"""Metadata
Get account metadata
Reference: https://iexcloud.io/docs/api/#metadata
Data Weighting: ``Free``
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
"""
return Metadata(**kwargs).... | 9f4b506bdf978f525e26d7f976a0fdc2f483ae0f | 3,647,858 |
import torch
def get_resnet50_moco_state_dict() -> dict:
"""
Get weight of ResNet50 trained with MoCo.
Returns:
(dict): Parameters and persistent buffers of ResNet50.
"""
model_path = get_model_root() / "resnet50_moco.pth"
if not model_path.exists():
# TODO download this from ... | 953f5e0fb037f9315173345910b8808cb400b9f4 | 3,647,859 |
from keystoneclient import service_catalog
import json
import six
import base64
def _validate_client(redis_client, url, tenant, token, env, blacklist_ttl,
max_cache_life):
"""Update the env with the access information for the user
:param redis_client: redis.Redis object connected to the ... | 9041786ecce14bc0af005d320e8ff9db49a07fc7 | 3,647,860 |
import os
import shutil
def _fetch(data_filename: str) -> str:
"""Fetch a given data file from either the local cache or the repository.
This function provides the path location of the data file given
its name in the histolab repository.
Parameters
----------
data_filename: str
Name o... | 0eadbd2e2c47a9f4ce3b8992c8795d69281a9ee4 | 3,647,861 |
import os
def subset_and_group_svs(input_dataset, sample_subset, sample_remap, sample_type, ignore_missing_samples, write_subsetted_bed=False):
"""
Parses raw SV calls from the input file into the desired SV output format for samples in the given subset
:param input_dataset: file path for the raw SV call... | 9397afcbbfa93155b4ae197f44c27267e6334aca | 3,647,862 |
def load_data():
"""
Carrega os dados do dataset iris
:return: dados carregados em uma matriz
"""
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
# utiliza somente as duas primeiras classes
data = data[:100]
# transforma as... | fe2a1a999406f23676e58f75f1d5999e9f0697e8 | 3,647,863 |
import select
from datetime import datetime
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
... | bdd679d94fc68d4c4c75f410d1ed3eec193f868b | 3,647,864 |
from sys import stdout
def group(spanins, prob_threshold, scope, valid_relation_set, mode):
"""
for each unary instance that is classified as being in a relation, get the other argument which is also classifier as being in the same relation but different role
ner1/2: list of unary instances
... | d76a02d341be1e836cfba560b11acfcc3f1527b9 | 3,647,865 |
def simulate_until_target_substate_or_max_t(
_simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t,
predecessor_node_lists, truth_tables):
"""
Perform simulation to figure whether it reaches target substate.
Does not return states of simulations that don... | 526ef8085dcbe4bcbc112c3bd4626ec5247e2f97 | 3,647,866 |
import requests
from bs4 import BeautifulSoup
def query_snpedia_online(rsid):
"""
@param soup:
@param rsid:
"""
rsid = rsid.capitalize()
url = "https://bots.snpedia.com/index.php"
rsid_url = f"{url}/{rsid}"
page = requests.get(rsid_url)
soup = BeautifulSoup(page.content, "html.... | 138b252917b027564826212cfe96abafef3071b3 | 3,647,867 |
import os
import json
import aiohttp
async def get_session(client_id: str, client_secret: str) -> AuthToken:
"""
Use the Authorization Code Grant flow to get a token.
This opens a browser tab.
"""
refresh_token_file = os.path.join(config.config_dir(), '.refresh.token')
base_url = 'https://bit... | e92625219a3c48f805c0e18a172d1e7a75fceca1 | 3,647,868 |
def lower(value: str): # Only one argument.
"""Converts a string into all lowercase"""
return value.lower() | 59da46b7df5a2afdb106703568635b94174ea57c | 3,647,869 |
import pprint
def validate_oidc():
"""Demonstrates how an access token is validated"""
token = request.headers['Authorization'].split(' ')[1]
message = check_oidc_token(token)
pprint.pprint(message)
return jsonify({
'success': message['success']
}) | d76d510d1b53a10e12ac9a5c085c0650bc8fb965 | 3,647,870 |
def merge(a, b, path=None):
"""From https://stackoverflow.com/a/7205107"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
... | 8f7990f28168fe0e3eaca790baddc0088baedf65 | 3,647,871 |
def norm_sq(f,alpha,n,L_mat_long,step):
""" This function is the log-likelihood functional with the squared L2 norm
of \hat{f_\beta} as the regularization term.
"""
L_mat=L_mat_long.reshape(n,len(f))
f[f <=0] = 1e-6
val=np.log(np.dot(L_mat,f))
return -sum(val)/n+ alpha*step**2*sum(f**2) | 11a2b0fbd296b344b94cd3d5509bb0d4a12ab5fc | 3,647,872 |
def get_applications(device_id: str = None, rpc_channel: InstrumentServer = None):
"""
获取手机应用列表
:param device_id:
:param rpc_channel:
:return:
"""
if not rpc_channel:
_rpc_channel = init(device_id)
else:
_rpc_channel = rpc_channel
application_list = _rpc_channel.call(... | 150884e18349003e33011477603e2a6462bd8492 | 3,647,873 |
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks={'time':1},
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
... | ef31c732919f6b3cda0c6e5d9114fac7c39f40f7 | 3,647,874 |
def wls_sparse(X, y, w=1., calc_cov=False, verbose=False, **kwargs):
"""
Parameters
----------
X
y
w
calc_cov
verbose
kwargs
Returns
-------
"""
# The var returned by ln.lsqr is normalized by the variance of the error. To
# obtain the correct variance, it needs... | ff0bec6d6cdcee85506514348e8a812926427dee | 3,647,875 |
import os
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*'... | 6cdba610a18d4893b13bdf34a30c2fe7b05ff970 | 3,647,876 |
from typing import Tuple
def sobel_gradients(source: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes partial derivations to detect angle gradients.
"""
grad_x = generic_filter(source, np.matrix([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]
))
grad_y = generic_filter... | 19c3e3eec46bee738b1e80dd73c5477f72dcf73c | 3,647,877 |
from typing import Mapping
def flat_dict(d, prefix=""):
"""
Loop through dictionary d
Append any key, val pairs to the return list ret
Add the prefix to any key param
Recurse if encountered value is a nested dictionary.
"""
if not isinstance(d, Mapping):
return d
ret = {}
... | f0c1f519126dea89c25ee38a9b0dd788c40d2088 | 3,647,878 |
import logging
def _get_filehandler_with_formatter(logname, formatter=None):
""" Return a logging FileHandler for given logname using a given
logging formatter
:param logname: Name of the file where logs will be stored, ".log"
extension will be added
:param formatter: An instance of logging.Format... | 1cc6f83480e691c4c54c359deabd6364da65f320 | 3,647,879 |
import torch
def gen_data_tensors(
df: pd.DataFrame,
lag: int = 6,
batch_size: int = 32,
validation_ratio: float = 0.2
) -> (DataLoader, DataLoader, TensorDataset, TensorDataset):
"""
Primary goal: create dataloader object.
"""
x_train, y_train = generate_supervised(df, lag=lag)
# ... | 1451d38bd695163d84784f5a6b9b791c3987d56b | 3,647,880 |
import json
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
... | d8890f31ae67abf72cdfbd14dd2af08762131e90 | 3,647,881 |
def element_z(sym_or_name):
"""Convert element symbol or name into a valid element atomic number Z.
Args:
sym_or_name: string type representing an element symbol or name.
Returns:
Integer z that is a valid atomic number matching the symbol or name.
Raises:
ElementZError: if the symb... | b79fec9062539f98ad8c96cdc41a52f7e9c67fd9 | 3,647,882 |
from typing import Tuple
def to_int(s: str) -> Tuple[bool, int]:
"""Convert a string s to an int, if possible."""
try:
n = int(s)
return True, n
except Exception:
return False, 0 | 27d24b881f5987037f750a1cee022f7b1daa7c33 | 3,647,883 |
def simple_computation(maximum_terms:int=None, configuration_of=None):
"""
Simple 4-operand computations
移除了分数项,因为除法运算会表示为分数
禁用了括号(random_term的expression参数),因为会导致溢出
:return: Problem object
"""
if not configuration_of: configuration_of = 'simple_computation'
func_config = combine_configur... | 0dae4396f74b9a254d0c882c022018c7a69d69cd | 3,647,884 |
def add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key):
"""Console script for media_server_utils."""
core.add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key)
return 0 | ac058ff014c76a3af4054076961b52011bc7329c | 3,647,885 |
def cut_graph(G, w):
"""
Cut a graph down to a given depth
Inputs: - G Input graph
- w Depth to cut to
Output: - cut_G Cut graph
"""
# Copy the initial graph and get the number of nodes
cut_G = G.copy()
N = len(G.nodes)
# Check all ... | 314f804d2d42146e1dcfb1f4a5373f92a4fe2f17 | 3,647,886 |
def archive_filter_search(articles_qs):
"""
gets the qs and filters and sends back to the hook for rendering.
"""
return articles_qs.exclude(updates__article__stage=STAGE_PUBLISHED) | 2f5783de98110b9b74cabcf571a5919b409d1479 | 3,647,887 |
from typing import List
def init_plotscript(config, markets: List, startup_candles: int = 0):
"""
Initialize objects needed for plotting
:return: Dict with candle (OHLCV) data, trades and pairs
"""
if "pairs" in config:
pairs = expand_pairlist(config['pairs'], markets)
else:
p... | 220118a5d438227932ba9473471d59ff03b44412 | 3,647,888 |
from typing import List
from typing import Callable
def get_one(data: List[LogEntry], filterfun: Callable) -> LogEntry:
"""Get a single entry and assert that after filtering only a single entry
remains."""
filtered = list(filter(filterfun, data))
if len(filtered) != 1:
raise ValueError(f"Entri... | ece1b0b9c654f85eda89e2fc8736c84b4a2ca9ca | 3,647,889 |
import os
def parse_ignorelist(f):
# type: (IO[Text]) -> Tuple[Ignorelist, Set[Text]]
"""
Parse the ignorelist file given by `f`, and return the parsed structure.
:returns: a tuple of an Ignorelist and a set of files that are completely
skipped by the linter (i.e. have a '*' entry).
... | 17fb3af2a4cd00e93f89023928493cc2cc2fc33e | 3,647,890 |
from typing import Optional
def create(env_name: str,
episode_length: int = 1000,
action_repeat: int = 1,
auto_reset: bool = True,
batch_size: Optional[int] = None,
**kwargs) -> Env:
"""Creates an Env with a specified brax system."""
env = _envs[env_name](**k... | 1d7e8bf147843799f01e7a894b86fce74ca86da7 | 3,647,891 |
def from_software_version(software_version):
"""
Returns the product version dependant limits_constants. This is based on
the running software version on the product and can change based on up when
you ask a cluster if upgrading.
Args:
software_version: (str) software version ex "3.1.2.0" o... | fc978610a6aa55a956bb849cae107bc134934f55 | 3,647,892 |
def _from_parse_feature(parse_feature):
"""Convert a single feature spec to a ColumnSchema."""
# FixedLenFeature
if isinstance(parse_feature, tf.FixedLenFeature):
representation = FixedColumnRepresentation(parse_feature.default_value)
return ColumnSchema(parse_feature.dtype, parse_feature.shape,
... | 9c7034c3b7663a0c49dc69dbf8507f90f4cacf83 | 3,647,893 |
import json
def conditional_patch_resource(
service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id
):
"""
If a resource is found based on the search criteria specified in
the query parameters, updates part of that resource by
applying the operations specified in a JSON... | e04eada0184d38c8d0b1ec4620fd5ef1f0bc90d5 | 3,647,894 |
def cal_big_F(p, f):
"""
calculate finite strain big F for linearized form
not fully tested
:param p: pressure
:param f: small f
:return: big F
"""
return p / (3. * f * np.power((1. + 2. * f), 2.5)) | 2d98b9e525837cd8d6dd17266f92f959743ad8f2 | 3,647,895 |
from typing import List
def multiply_aug(data_aug: List[str], factor: int) -> List[str]:
"""
The original idea here was to use to to speed up some vasp calculations for
supercells by initializing the entire CHGCAR file.
The current code does not deal with transformation of the Augemetation charges aft... | 2baef4c98dbb83f1a08f11e58f3c4cf82ad8ea64 | 3,647,896 |
def _parse_instance_chain(chain_str):
""" 返回对象链解析出来的实例对象。"""
chain = chain_str.split('.')
instance_name = chain.pop(0)
attr = session['instances'][instance_name]
for attr_name in chain:
attr = getattr(attr, attr_name)
return attr | 531b78ee80f3b6437b885ef89b7f285e6cf6a8a5 | 3,647,897 |
import math
def epochs_lists(
draw,
start_time=math.inf,
max_epochs=5,
min_deme_size=FLOAT_EPS,
max_deme_size=FLOAT_MAX,
):
"""
A hypothesis strategy for creating lists of Epochs for a deme.
:param float start_time: The start time of the deme.
:param int max_epochs: The maximum nu... | 9eebece7ac1dc2f9ad6d13f7368de62a6db9433c | 3,647,898 |
import torch
def mlp_layers(nch_input, nch_layers, b_shared=True, bn_momentum=0.1, dropout=0.0):
""" [B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
layers = []
last = nch_input
for i, outp in enumerate(nch_layers):
if b_shared:
weights = torch.nn.Conv1d(last, ... | 8085b99b828fcbadee191d90737d582f7dd9ce73 | 3,647,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.