content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def createCSV(obj, filename):
"""
Create csv file from data
"""
file = open(filename + ".csv", "w")
writer = csv.writer(file, dialect="excel")
writer.writerows(obj)
file.close() | 5,353,000 |
def xirr(cashflows,guess=0.1):
"""
Calculate the Internal Rate of Return of a series of cashflows at irregular intervals.
Arguments
---------
* cashflows: a list object in which each element is a tuple of the form (date, amount), where date is a python datetime.date object and amount is an integer o... | 5,353,001 |
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_tab... | 5,353,002 |
def is_stuck(a, b, eta):
""" Check if the ricci flow is stuck. """
return ne.evaluate("a-b<eta/50").all() | 5,353,003 |
def make_map(mapping):
"""
Takes a config.yml mapping, and returns a dict of mappers.
"""
# TODO: Is this the best place for this? Should it be a @staticmethod,
# or even part of its own class?
fieldmap = {}
for field, config in mapping.items():
if type(config) is str:
... | 5,353,004 |
def task_eeg_to_bids():
"""Step 00: Bring data set into a BIDS compliant directory structure."""
# Run the script for each subject in a sub-task.
for subject in subjects:
yield dict(
# This task should come after `task_check`
task_dep=['check'],
# A name for the ... | 5,353,005 |
def run_pre_mapping_settings_triggers(sender, instance: MappingSetting, **kwargs):
"""
:param sender: Sender Class
:param instance: Row instance of Sender Class
:return: None
"""
default_attributes = ['EMPLOYEE', 'CATEGORY', 'PROJECT', 'COST_CENTER']
instance.source_field = instance.source_... | 5,353,006 |
def stable_seasoal_filter(time_series: Sized, freq: int):
"""
Стабильный сезонный фильтр для ряда.
:param time_series: временной ряд
:param freq: частота расчета среднего значения
:return: значения сезонной составляющей
"""
length = len(time_series)
if length < freq:
raise Value... | 5,353,007 |
def load_dataset(dataset, batch_size=512):
"""Load dataset with given dataset name.
Args:
dataset (str): name of the dataset, it has to be amazoncat-13k, amazoncat-14k,
eurlex-4.3k or rcv1-2k
batch_size (int): batch size of tf dataset
Returns:
(tf.dataset... | 5,353,008 |
def make_str_lst_unc_val(id, luv):
"""
make_str_lst_unc_val(id, luv)
Make a formatted string from an ID string and a list of uncertain values.
Input
-----
id A number or a string that will be output as a string.
luv A list of DTSA-II UncertainValue2 items. These will be printed
a... | 5,353,009 |
def plot_faces(ax, coordinates, meta, st):
"""plot the faces"""
for s in st.faces:
# check that this face isnt in the cut region
def t_param_difference(v1, v2):
return abs(meta["t"][v1] - meta["t"][v2])
if all(all(t_param_difference(v1, v2) < 2 for v2 in s) for v1 in s):
... | 5,353,010 |
def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict:
"""Map over a dict and translate any null string values into ' '.
Leave everything else as is. This is needed because you cannot add TableCell
objects with only a null string or the client crashes.
:param Dict d: dict of item values.
... | 5,353,011 |
def test_uvh5_partial_write_ints_irregular_multi2(uv_uvh5, tmp_path):
"""
Test writing a uvh5 file using irregular interval for freq and pol and
integer dtype.
"""
full_uvh5 = uv_uvh5
partial_uvh5 = full_uvh5.copy()
partial_uvh5.data_array = None
partial_uvh5.flag_array = None
partia... | 5,353,012 |
def HttpResponseRest(request, data):
"""
Return an Http response into the correct output format (JSON, XML or HTML),
according of the request.format parameters.
Format is automatically added when using the
:class:`igdectk.rest.restmiddleware.IGdecTkRestMiddleware` and views decorators.
"""
... | 5,353,013 |
def build_server_update_fn(model_fn, server_optimizer_fn, server_state_type,
model_weights_type):
"""Builds a `tff.tf_computation` that updates `ServerState`.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
server_optimizer_fn: A no-arg function th... | 5,353,014 |
def madgraph_tarball_filename(physics):
"""Returns the basename of a MadGraph tarball for the given physics"""
# Madgraph tarball filenames do not have a part number associated with them; overwrite it
return svj_filename("step0_GRIDPACK", Physics(physics, part=None)).replace(
".root", ".tar.xz"
... | 5,353,015 |
def _mk_cmd(verb, code, payload, dest_id, **kwargs) -> Command:
"""A convenience function, to cope with a change to the Command class."""
return Command.from_attrs(verb, dest_id, code, payload, **kwargs) | 5,353,016 |
def test_config_absent_already_configured():
"""
config_absent method - add config removed
"""
config_data = [
"snmp-server community randomSNMPstringHERE group network-operator",
"snmp-server community AnotherRandomSNMPSTring group network-admin",
]
side_effect = MagicMock(sid... | 5,353,017 |
def getobjname(item):
"""return obj name or blank """
try:
objname = item.Name
except BadEPFieldError as e:
objname = ' '
return objname | 5,353,018 |
def test_generator(storage):
"""It should support generator values."""
breaker = CircuitBreaker(state_storage=storage)
@breaker
def func_yield_succeed():
"""Docstring"""
yield True
@breaker
def func_yield_exception():
"""Docstring"""
x = yield True
rais... | 5,353,019 |
def process_fire_data(filename=None, fire=None, and_save=False, timezone='Asia/Bangkok', to_drop=True):
""" Add datetime, drop duplicate data and remove uncessary columns.
"""
if filename:
fire = pd.read_csv(filename)
# add datetime
fire = add_datetime_fire(fire, timezone)
# drop dup... | 5,353,020 |
def schedule_slack_tweets(**kwargs):
"""
Schedule a tweet to be sent out once it is user approved in slack
"""
num_tweets = 1
interval = 15
tweet_url = "https://twitter.com/{name}/status/{tweet_id}"
embeded_tweet = tweet_url.format(name=kwargs["screen_name"], tweet_id=kwargs["tweet_id"])
... | 5,353,021 |
def __feed_pets_without_confirmation(plan: FeedPlan):
"""Feed all the pets in the plan. Print the result to the terminal.
Warning: this function does ask for confirmation.
"""
# Feel free to refactor this such that we don't iterate over
# the plan twice.
feed_requests: List[Callable[[], Respons... | 5,353,022 |
def create_controllable_source(source, control, loop, sleep):
"""Makes an observable controllable to handle backpressure
This function takes an observable as input makes it controllable by
executing it in a dedicated worker thread. This allows to regulate
the emission of the items independently of the ... | 5,353,023 |
def getE5():
"""
Returns the e5
Args:
"""
return E5.get() | 5,353,024 |
def test_translate_command(command, expected):
"""Check the marcel --> docker command translation."""
assert translate_command(command) == expected | 5,353,025 |
def triangulate(pts_subset):
"""
This function encapsulates the whole triangulation algorithm into four
steps. The function takes as input a list of points. Each point is of the
form [x, y], where x and y are the coordinates of the point.
Step 1) The list of points is split into groups. ... | 5,353,026 |
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.S... | 5,353,027 |
def safe_write_file(file: str, s: str) -> None:
"""
Safely write to a file by acquiring an exclusive lock to prevent other
processes from reading and writing to it while writing.
"""
# Open in read and update mode, so we don't modify the file before we acquire a lock
file_obj = open(file, "r+")
... | 5,353,028 |
def intp_sc(x, points):
"""
SCurve spline based interpolation
args:
x (list) : t coordinate list
points (list) : xyz coordinate input points
returns:
x (relative coordinate point list)
o (xyz coordinate points list, resplined)
"""
sc = vtk.vtkSCurveSpline(... | 5,353,029 |
def backend():
"""Publicly accessible method
for determining the current backend.
# Returns
String, the name of the backend PyEddl is currently using.
# Example
```python
>>> eddl.backend.backend()
'eddl'
```
"""
return _BACKEND | 5,353,030 |
def query_collection_mycollections():
"""
Query Content API Collection with access token.
"""
access_token = request.args.get("access_token", None)
if access_token is not None and access_token != '':
# Construct an Authorization header with the value of 'Bearer <access token>'
... | 5,353,031 |
def test_merge2_sql_semantics_outerjoin_multi_keep_firstNone():
"""
Test that merge2 matches the following SQL query:
select
f.id as foo_id,
f.col1 as foo_col1,
f.col2 as foo_col2,
f.team_name as foo_teamname,
b.id as bar_id,
b.col1 as bar_col1,
... | 5,353,032 |
def xmlbuildmanual() -> __xml_etree:
"""
Returns a empty xml ElementTree obj to build/work with xml data
Assign the output to var
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3... | 5,353,033 |
def graclus_cluster(row, col, weight=None, num_nodes=None):
"""A greedy clustering algorithm of picking an unmarked vertex and matching
it with one its unmarked neighbors (that maximizes its edge weight).
Args:
row (LongTensor): Source nodes.
col (LongTensor): Target nodes.
weight (... | 5,353,034 |
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature... | 5,353,035 |
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or ... | 5,353,036 |
def load_all_functions(path, tool, factorize=True, agents_quantities=False, rewards_only=False, f_only=False):
""" Loads all results of parameter synthesis from *path* folder into two maps - f list of rational functions for each property, and rewards list of rational functions for each reward
Args:
... | 5,353,037 |
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards)) | 5,353,038 |
def _max_pool(heat, kernel=3):
"""
NCHW
do max pooling operation
"""
# print("heat.shape: ", heat.shape) # default: torch.Size([1, 1, 152, 272])
pad = (kernel - 1) // 2
h_max = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
# print("h_max.shape: ", h_max.shape... | 5,353,039 |
def getBool(string):
"""
Stub function, set PshellServer.py softlink to PshellServer-full.py for full functionality
"""
return (True) | 5,353,040 |
def multi_dists(
continuous,
categorical,
count_cutoff,
summary_type,
ax=None,
stripplot=False,
order="ascending",
newline_counts=False,
xtick_rotation=45,
xtick_ha="right",
seaborn_kwargs={},
stripplot_kwargs={},
):
"""
Compare the distributions of a continuous v... | 5,353,041 |
def expand_xdg(xdg_var: str, path: str) -> PurePath:
"""Return the value of an XDG variable prepended to path.
This function expands an XDG variable, and then concatenates to it the
given path. The XDG variable name can be passed both uppercase or
lowercase, and either with or without the 'XDG_' prefix... | 5,353,042 |
def follow_index(request):
"""Просмотр подписок"""
users = request.user.follower.all()
paginator = Paginator(users, 3)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, 'recipes/follow_index.html',
{'page': page, 'paginator': pa... | 5,353,043 |
def test_sample_multi_sync_1():
"""
Tests the multi-threaded sample submission
:return:
"""
t = ThunderstormAPI(host=THOR_THUNDERSTORM_HOST, port=THOR_THUNDERSTORM_PORT)
status1 = t.get_status()
results = t.scan_multi(SAMPLES_1)
status2 = t.get_status()
assert results
assert len(... | 5,353,044 |
def main():
"""
Main function :)
:return:
"""
config = Config()
config.read()
level = logging.INFO
if config.enforce_type(bool, config.DEFAULT.Debug):
level = logging.DEBUG
set_sql_debug(True)
# our logger
logger = setup_logger('mesh', level)
# meshtastic lo... | 5,353,045 |
def test_drawcounties_cornbelt():
"""draw counties on the map"""
mp = MapPlot(sector="cornbelt", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig | 5,353,046 |
def calculate_kde_cli(
ascending=True,
evaluate=False,
input_ts="-",
columns=None,
start_date=None,
end_date=None,
clean=False,
skiprows=None,
index_type="datetime",
source_units=None,
target_units=None,
names=None,
tablefmt="csv",
):
"""Return the kernel density ... | 5,353,047 |
def convert_remoteResources_fields(asset):
"""
"remoteResources" : [ {
"@class": ".XRemoteResource",
"dataSource": null,
"keywords": null,
"label": "testresource",
"lastModifiedTimestamp": 1472138644728,
"remoteResourceId": 8446,
"resourceNumber": "1258.15... | 5,353,048 |
def install(name, dst, capture_error=False):
"""Install the user provided entry point to be executed as follow:
- add the path to sys path
- if the user entry point is a command, gives exec permissions to the script
Args:
name (str): name of the script or module.
dst (str): path... | 5,353,049 |
def atomic_degrees(mol: IndigoObject) -> dict:
"""Get the number of atoms direct neighbors (except implicit hydrogens) in a molecule.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of atomic degrees
"""
degrees = []
for atom in mol.i... | 5,353,050 |
def hotspots(raster, kernel, x='x', y='y'):
"""Identify statistically significant hot spots and cold spots in an input
raster. To be a statistically significant hot spot, a feature will have a
high value and be surrounded by other features with high values as well.
Neighborhood of a feature defined by t... | 5,353,051 |
def create_from_image(input_path, output_path=None,
fitimage="FITDEF",
compress="NORMAL",
zoom=0, # %; 0=100%
size=Point(0, 0), # Point (in mm), int or str; 1,2..10=A3R,A3..B5
align=("CENTER", "CENTER"), # LEFT/CENTER/RIGHT, TOP/CENTER/BOTTOM
maxpapersize="DEFAULT",
... | 5,353,052 |
def _read_config(filename):
"""Reads configuration file.
Returns DysonLinkCredentials or None on error.
"""
config = configparser.ConfigParser()
logging.info('Reading "%s"', filename)
try:
config.read(filename)
except configparser.Error as ex:
logging.critical('Could not r... | 5,353,053 |
def view_share_link(request, token):
"""
Translate a given sharelink to a proposal-detailpage.
:param request:
:param token: sharelink token, which includes the pk of the proposal
:return: proposal detail render
"""
try:
pk = signing.loads(token, max_age=settings.MAXAGESHARELINK)
... | 5,353,054 |
def create(pdef):
"""Scikit-learn Pipelines objects creation (deprecated).
This function creates a list of sklearn Pipeline objects starting from the
list of list of tuples given in input that could be created using the
adenine.core.define_pipeline module.
Parameters
-----------
pdef : lis... | 5,353,055 |
def pressure_differentiable(altitude):
"""
Computes the pressure at a given altitude with a differentiable model.
Args:
altitude: Geopotential altitude [m]
Returns: Pressure [Pa]
"""
return np.exp(interpolated_log_pressure(altitude)) | 5,353,056 |
def write_pmd_field(h5, data, name=None):
"""
Data is a dict with:
attrs: flat dict of attributes.
components: flat dict of components
See inverse routine:
.readers.load_field_data
"""
if name:
g = h5.create_group(name)
else:
g = h5
... | 5,353,057 |
def create_optimizer(hparams, global_step, use_tpu=False):
"""Creates a TensorFlow Optimizer.
Args:
hparams: ConfigDict containing the optimizer configuration.
global_step: The global step Tensor.
use_tpu: If True, the returned optimizer is wrapped in a
CrossShardOptimizer.
Returns:
A Tens... | 5,353,058 |
def compare_dirs_ignore_words(dir1, dir2, ignore_words, ignore_files=None):
"""Same as compare_dirs but ignores lines with words in ignore_words.
"""
return compare_dirs(
dir1,
dir2,
ignore=ignore_files,
function=lambda file1, file2:
compare_text_files_ignore_lines(fi... | 5,353,059 |
def escalon(num, den):
"""
Función escalón, para generar la respuesta escalón en base a una
función de transferencia.
Ejemplo:
escalon(num, den)
num = valores en formato de lista, que contiene lo valores del
númerador de la fución de transferencia.
den = valores en formato de li... | 5,353,060 |
def test_env_dirs_correct():
"""Test that the .envrc values are correct for each directory environment variable"""
assert dict_envrc_dir == dict(zip(ENV_DIRS_EXPECTED, DIRS_EXPECTED)) | 5,353,061 |
def toggle_ascii_filter():
""" Toggle ASCII filter (Y) """
global ascii_filter
if ascii_filter:
ascii_filter = False
else:
ascii_filter = True
display_textual_content() | 5,353,062 |
def test_robot_depends_on(
robot_with_mount_and_modules_services: Dict[str, Any]
) -> None:
"""Confirm that modules depend on emulator proxy."""
assert robot_with_mount_and_modules_services[OT2_ID].depends_on is None | 5,353,063 |
def is_zero(actual: Union[int, float]):
"""
Checks if an object is equal to zero.
:param actual: object to evaluate
:return: None
:raise AssertionError: if object is not equal to zero
"""
_check_argument_is_number(actual, 'is_zero')
if actual != 0:
raise AssertionError(f"'{short... | 5,353,064 |
def float_to_wazn(value):
"""Converts a float value to an integer in the WAZN notation.
The float format has a maxium of 6 decimal digits.
:param value: value to convert from float to WAZN notation
:returns: converted value in WAZN notation
"""
return int(Decimal(value) / MICRO_WAZN) | 5,353,065 |
def timer(func):
""" Decorator to measure execution time """
import time
def wrapper(*args, **kwargs):
start_time = time.time()
ret = func(*args, **kwargs)
elapsed = time.time() - start_time
print('{:s}: {:4f} sec'.format(func.__name__, elapsed))
return ret
re... | 5,353,066 |
def test_wf_3nd_st_3(plugin):
""" workflow with three tasks, third one connected to two previous tasks,
splitter and partial combiner (from the second task) on the workflow level
"""
wf = Workflow(name="wf_st_9", input_spec=["x", "y"])
wf.add(add2(name="add2x", x=wf.lzin.x))
wf.add(add2(name... | 5,353,067 |
def validate_assessments(url):
"""
Validate tests inside of the file referenced by the URL argument.
:param url: URL to the tests file
"""
valid = True
for test in tests_loader.load_all_tests(url):
assessment = assessment_loader.load_assessment_from_urls(test.questions_url, test.answer... | 5,353,068 |
def _create_keyword_plan_campaign(client, customer_id, keyword_plan):
"""Adds a keyword plan campaign to the given keyword plan.
Args:
client: An initialized instance of GoogleAdsClient
customer_id: A str of the customer_id to use in requests.
keyword_plan: A str of the keyword plan res... | 5,353,069 |
def word2vec(sentences, year):
"""
Creates a word2vec model.
@param sentences: list of list of words in each sentence (title + abstract)
@return word2vec model
"""
print("Creating word2vec model")
model = Word2Vec(sentences, size=500, window=5, min_count=1, workers=4)
model.save(f"models... | 5,353,070 |
def true_rjust(string, width, fillchar=' '):
""" Justify the string to the right, using printable length as the width. """
return fillchar * (width - true_len(string)) + string | 5,353,071 |
def test_execute_list_collection_all(mocker, capsys, mock_collection_objects, tmp_path_factory):
"""Test listing all collections from multiple paths"""
cliargs()
mocker.patch('os.path.exists', return_value=True)
mocker.patch('os.path.isdir', return_value=True)
gc = GalaxyCLI(['ansible-galaxy', 'co... | 5,353,072 |
def threshold_generator_with_values(values, duration, num_classes):
"""
Args:
values: A Tensor with shape (-1,)
Values = strictly positive, float thresholds.
duration: An int.
num_classes: An int.
Returns:
thresh: A Tensor with shape
(len(list_values... | 5,353,073 |
def is_hermitian(mx, tol=1e-9):
"""
Test whether mx is a hermitian matrix.
Parameters
----------
mx : numpy array
Matrix to test.
tol : float, optional
Tolerance on absolute magitude of elements.
Returns
-------
bool
True if mx is hermitian, otherwise False... | 5,353,074 |
def argmax(a, b, axis=1, init_value=-1, name="argmax"):
""" sort in axis with ascending order """
assert axis<len(a.shape) and len(a.shape)<=2, "invalid axis"
assert b.shape[axis] == 2, "shape mismatch"
size = a.shape[axis] # save max arg index
def argmax2d(A, B):
init = hcl.compute((2,... | 5,353,075 |
def morphology(src, operation="open", kernel_shape=(3, 3), kernel_type="ones"):
"""Dynamic calls different morphological operations
("open", "close", "dilate" and "erode") with the given parameters
Arguments:
src (numpy.ndarray) : source image of shape (rows, cols)
operation (str, optional)... | 5,353,076 |
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=None):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns ... | 5,353,077 |
def build_rnd_graph(golden, rel, seed=None):
"""Build a random graph for testing."""
def add_word(word):
if word not in words:
words.add(word)
def add_edge(rel, word1, word2):
data.append((rel, word1, word2))
random.seed(seed)
m, _ = golden.shape
words = set()
... | 5,353,078 |
def iree_build_test(name, targets):
"""Dummy rule to ensure that targets build.
This is currently undefined in bazel and is preserved for compatibility.
"""
pass | 5,353,079 |
def main() -> None:
"""Main program.
"""
utils_io.find_or_create_dir(DATA_FOLDER)
utils_io.find_or_create_dir(SPECTROGRAMS_FOLDER)
utils_io.find_or_create_dir(PLOTS_FOLDER)
preprocess_data(method='download')
classify_bands_different_genres()
classify_bands_same_genre()
classify_genre... | 5,353,080 |
def _make_parser_func(sep):
"""
Create a parser function from the given sep.
Parameters
----------
sep: str
The separator default to use for the parser.
Returns
-------
A function object.
"""
def parser_func(
filepath_or_buffer: Union[str, pathlib.Path, IO[AnyS... | 5,353,081 |
def load_and_join(LC_DIR):
"""
load and join quarters together.
Takes a list of fits file names for a given star.
Returns the concatenated arrays of time, flux and flux_err
"""
fnames = sorted(glob.glob(os.path.join(LC_DIR, "*fits")))
hdulist = fits.open(fnames[0])
t = hdulist[1].data
... | 5,353,082 |
def dcm_to_pil_image_gray(file_path):
"""Read a DICOM file and return it as a gray scale PIL image"""
ds = dcmread(file_path)
# Get the image after apply clahe
img_filtered = Image.fromarray(apply_clahe(ds.pixel_array).astype("uint8"))
# Normalize original image to the interval [0, 255]
img = cv... | 5,353,083 |
def get_object_unique_name(obj: Any) -> str:
"""Return a unique string associated with the given object.
That string is constructed as follows: <object class name>_<object_hex_id>
"""
return f"{type(obj).__name__}_{hex(id(obj))}" | 5,353,084 |
def create_voting_dict():
"""
Input: a list of strings. Each string represents the voting record of a senator.
The string consists of
- the senator's last name,
- a letter indicating the senator's party,
- a couple of letters indicating the senator's home ... | 5,353,085 |
def areFriends(profile1, profile2):
"""Checks wether profile1 is connected to profile2 and profile2 is connected to profile1"""
def check(p1, p2):
if p1.isServiceIdentity:
fsic = get_friend_serviceidentity_connection(p2.user, p1.user)
return fsic is not None and not fsic.deleted
... | 5,353,086 |
def filter_dict(regex_dict, request_keys):
"""
filter regular expression dictionary by request_keys
:param regex_dict: a dictionary of regular expressions that
follows the following format:
{
"name": "sigma_aldrich",
"regexes": ... | 5,353,087 |
def calculate_partition_movement(prev_assignment, curr_assignment):
"""Calculate the partition movements from initial to current assignment.
Algorithm:
For each partition in initial assignment
# If replica set different in current assignment:
# Get Difference in sets
:rty... | 5,353,088 |
def benchmark(part, methods, p=0.9, n_shots=1000, randseed=None):
"""
Benchmark different count-correction methods with qubits partitioned into groups.
part: list of positive integers, representing partition of qubits
p: probability of any bit having its expected value when generating fake calibration ... | 5,353,089 |
def correct_crop_centers(
centers: List[Union[int, torch.Tensor]],
spatial_size: Union[Sequence[int], int],
label_spatial_shape: Sequence[int],
) -> List[int]:
"""
Utility to correct the crop center if the crop size is bigger than the image size.
Args:
ceters: pre-computed crop centers,... | 5,353,090 |
def contrast_normalize(data, centered=False):
"""Normalizes image data to have variance of 1
Parameters
----------
data : array-like
data to be normalized
centered : boolean
When False (the default), centers the data first
Returns
-------
data : array-like
norm... | 5,353,091 |
def create_gdrive_folders(website_short_id: str) -> bool:
"""Create gdrive folder for website if it doesn't already exist"""
folder_created = False
service = get_drive_service()
base_query = "mimeType = 'application/vnd.google-apps.folder' and not trashed and "
query = f"{base_query}name = '{website... | 5,353,092 |
def filter_for_recognized_pumas(df):
"""Written for income restricted indicator but can be used for many other
indicators that have rows by puma but include some non-PUMA rows. Sometimes
we set nrows in read csv/excel but this approach is more flexible"""
return df[df["puma"].isin(get_all_NYC_PUMAs())] | 5,353,093 |
def hotspots2006(path):
"""Hawaian island chain hotspot Argon-Argon ages
Ar-Ar Ages (millions of years) and distances (km) from Kilauea along the
trend of the chain of Hawaian volcanic islands and other seamounts that
are believed to have been created by a moving "hot spot".
A data frame with 10 observation... | 5,353,094 |
def school_booking_cancel(request, pk_booking):
"""Render the school booking cancel page for a school representative.
:param request: httprequest received
:type request: HttpRequest
:param pk_booking: Primary Key of a Booking
:type pk_booking: int
:return: Return a HttpResponse whose content is... | 5,353,095 |
def draw_beam_figure():
"""Draw a simple astigmatic beam ellipse with labels."""
theta = np.radians(30)
xc = 0
yc = 0
dx = 50
dy = 25
plt.subplots(1, 1, figsize=(6, 6))
# If the aspect ratio is not `equal` then the major and minor radii
# do not appear to be orthogonal to each othe... | 5,353,096 |
def bandpass_filter(df, spiky_var):
"""Detect outliers according to a passband filter specific to each variable.
Parameters
----------
df: pandas DataFrame that contains the spiky variable
spiky_var: string that designate the spiky variable
Returns
-------
id_outlier: index of outliers... | 5,353,097 |
def load_energy():
"""Loads the energy file, skipping all useluss information and returns it as a dataframe"""
energy = pd.read_excel("Energy Indicators.xls", skiprows=17, header=0,
skip_footer=53-15, na_values="...", usecols=[2,3,4,5])
# Rename columns
energy.columns = ["Coun... | 5,353,098 |
def list_list_to_string(list_lists,data_delimiter=None,row_formatter_string=None,line_begin=None,line_end=None):
"""Repeatedly calls list to string on each element of a list and string adds the result
. ie coverts a list of lists to a string. If line end is None the value defaults to "\n", for no seperator use ... | 5,353,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.