hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a6c2e5a6d6baef647e0e3b1e7b605691b398cfe
| 188
|
py
|
Python
|
res/example1.py
|
tghira16/Giraphics
|
74265c4c0220c677e0fa3e5e65fd0b7087401106
|
[
"MIT"
] | 1
|
2021-03-24T10:09:57.000Z
|
2021-03-24T10:09:57.000Z
|
res/example1.py
|
tghira16/Giraphics
|
74265c4c0220c677e0fa3e5e65fd0b7087401106
|
[
"MIT"
] | null | null | null |
res/example1.py
|
tghira16/Giraphics
|
74265c4c0220c677e0fa3e5e65fd0b7087401106
|
[
"MIT"
] | null | null | null |
from giraphics.graphing.graph import Graph
def func(x):
return (x-3)*(x+2)*x*0.2
g = Graph(800,600,8,6, 'example1.svg')
g.bg()
g.grid()
g.axes()
g.graph(func)
g.save()
g.display()
| 12.533333
| 42
| 0.632979
| 38
| 188
| 3.131579
| 0.631579
| 0.10084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080247
| 0.138298
| 188
| 15
| 43
| 12.533333
| 0.654321
| 0
| 0
| 0
| 0
| 0
| 0.063492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0.1
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a6c4e202130d51c730ab01bd3f2f21e5ec32862
| 758
|
py
|
Python
|
tools/data.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 29
|
2020-02-07T06:41:25.000Z
|
2022-03-16T18:04:07.000Z
|
tools/data.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 6
|
2020-04-27T01:36:27.000Z
|
2022-01-31T11:59:05.000Z
|
tools/data.py
|
seanys/2D-Irregular-Packing-Algorithm
|
cc10edff2bc2631fcbcb47acf7bb3215e5c5023c
|
[
"MIT"
] | 12
|
2020-05-05T05:34:06.000Z
|
2022-03-26T07:32:46.000Z
|
from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
| 36.095238
| 141
| 0.60686
| 110
| 758
| 4.172727
| 0.581818
| 0.026144
| 0.074074
| 0.043573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 0.166227
| 758
| 20
| 142
| 37.9
| 0.662975
| 0.039578
| 0
| 0
| 0
| 0
| 0.170472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a6e9d6c995b4c34ef5a6722c4973c2c7fb333f1
| 1,065
|
py
|
Python
|
projects/eyetracking/gen_adhd_sin.py
|
nirdslab/streaminghub
|
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
|
[
"MIT"
] | null | null | null |
projects/eyetracking/gen_adhd_sin.py
|
nirdslab/streaminghub
|
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
|
[
"MIT"
] | null | null | null |
projects/eyetracking/gen_adhd_sin.py
|
nirdslab/streaminghub
|
a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf
|
[
"MIT"
] | 1
|
2020-01-22T15:35:29.000Z
|
2020-01-22T15:35:29.000Z
|
#!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| 35.5
| 107
| 0.613146
| 170
| 1,065
| 3.652941
| 0.482353
| 0.05153
| 0.022544
| 0.032206
| 0.077295
| 0.077295
| 0.077295
| 0.077295
| 0
| 0
| 0
| 0.01891
| 0.155869
| 1,065
| 29
| 108
| 36.724138
| 0.671858
| 0.117371
| 0
| 0
| 0
| 0
| 0.277481
| 0.11206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.190476
| 0
| 0.190476
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a6f626dba5ce35c66724326d654b9ba19117497
| 4,322
|
py
|
Python
|
dataProcessing.py
|
TauferLab/PENGUIN
|
af789163078310f2504b8a0163df4395ccf119f1
|
[
"Apache-2.0"
] | null | null | null |
dataProcessing.py
|
TauferLab/PENGUIN
|
af789163078310f2504b8a0163df4395ccf119f1
|
[
"Apache-2.0"
] | null | null | null |
dataProcessing.py
|
TauferLab/PENGUIN
|
af789163078310f2504b8a0163df4395ccf119f1
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find all DIRECTORIES containing non-hidden files ending in FILENAME
def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.append(directory.path)
return directories
#get all non-hidden data files in DIRECTORY with extension EXT
def getDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.append(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < minLoss:
minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, minLoss
return i, minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in information (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header='infer', sep=' '):
data = pd.read_csv(FILENAME, sep, header=header)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample less often, but not more often. We verify that we're not being asked for a granularity that is smaller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we get to this point in function, it means z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
| 38.936937
| 204
| 0.657103
| 601
| 4,322
| 4.703827
| 0.31614
| 0.027237
| 0.066148
| 0.045278
| 0.303856
| 0.259993
| 0.22179
| 0.22179
| 0.22179
| 0.22179
| 0
| 0.004846
| 0.236002
| 4,322
| 110
| 205
| 39.290909
| 0.851302
| 0.215178
| 0
| 0.246753
| 0
| 0.012987
| 0.110027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077922
| false
| 0.012987
| 0.090909
| 0
| 0.272727
| 0.012987
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a6fea40902a5d1ec59a6cdd9117e96fcdef70a1
| 572
|
py
|
Python
|
algo_probs/newcoder/classic/nc52.py
|
Jackthebighead/recruiment-2022
|
a81007908e3c2f65a6be3ff2d62dfb92d0753b0d
|
[
"MIT"
] | null | null | null |
algo_probs/newcoder/classic/nc52.py
|
Jackthebighead/recruiment-2022
|
a81007908e3c2f65a6be3ff2d62dfb92d0753b0d
|
[
"MIT"
] | null | null | null |
algo_probs/newcoder/classic/nc52.py
|
Jackthebighead/recruiment-2022
|
a81007908e3c2f65a6be3ff2d62dfb92d0753b0d
|
[
"MIT"
] | null | null | null |
# 题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,"()"和"()[]{}"都是合法的括号序列,但"(]"和"([)]"不合法。
# @param s string字符串
# @return bool布尔型
#
class Solution:
def isValid(self , s ):
# write code here
if not s: return True
stack = []
dic = {'{':'}','[':']','(':')'}
for char in s:
if not stack or char in dic: stack.append(char)
elif stack and dic.get(stack[-1])!=char: return False
else:
stack.pop()
continue
return True
| 30.105263
| 114
| 0.47028
| 62
| 572
| 4.33871
| 0.645161
| 0.037175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002632
| 0.335664
| 572
| 19
| 115
| 30.105263
| 0.705263
| 0.286713
| 0
| 0
| 0
| 0
| 0.014888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a73038a9d54b6fdd609f321f9fbc694a2017b7b
| 2,385
|
py
|
Python
|
piecrust/processing/util.py
|
airbornemint/PieCrust2
|
bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb
|
[
"Apache-2.0"
] | null | null | null |
piecrust/processing/util.py
|
airbornemint/PieCrust2
|
bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb
|
[
"Apache-2.0"
] | null | null | null |
piecrust/processing/util.py
|
airbornemint/PieCrust2
|
bd8e44a1a3ba646a9ebfbb4d4f1fa01a1daa3beb
|
[
"Apache-2.0"
] | null | null | null |
import os.path
import time
import logging
import yaml
from piecrust.processing.base import Processor
logger = logging.getLogger(__name__)
class _ConcatInfo(object):
timestamp = 0
files = None
delim = "\n"
class ConcatProcessor(Processor):
PROCESSOR_NAME = 'concat'
def __init__(self):
super(ConcatProcessor, self).__init__()
self._cache = {}
def matches(self, path):
return path.endswith('.concat')
def getDependencies(self, path):
info = self._load(path)
return info.files
def getOutputFilenames(self, filename):
return [filename[:-7]]
def process(self, path, out_dir):
dirname, filename = os.path.split(path)
out_path = os.path.join(out_dir, filename[:-7])
info = self._load(path)
if not info.files:
raise Exception("No files specified in: %s" %
os.path.relpath(path, self.app.root_dir))
logger.debug("Concatenating %d files to: %s" %
(len(info.files), out_path))
encoded_delim = info.delim.encode('utf8')
with open(out_path, 'wb') as ofp:
for p in info.files:
with open(p, 'rb') as ifp:
ofp.write(ifp.read())
if info.delim:
ofp.write(encoded_delim)
return True
def _load(self, path):
cur_time = time.time()
info = self._cache.get(path)
if (info is not None and
(cur_time - info.timestamp <= 1 or
os.path.getmtime(path) < info.timestamp)):
return info
if info is None:
info = _ConcatInfo()
self._cache[path] = info
with open(path, 'r') as fp:
config = yaml.load(fp)
info.files = config.get('files', [])
info.delim = config.get('delim', "\n")
info.timestamp = cur_time
path_mode = config.get('path_mode', 'relative')
if path_mode == 'relative':
dirname, _ = os.path.split(path)
info.files = [os.path.join(dirname, f) for f in info.files]
elif path_mode == 'absolute':
info.files = [os.path.join(self.app.root_dir, f)
for f in info.files]
else:
raise Exception("Unknown path mode: %s" % path_mode)
return info
| 28.392857
| 71
| 0.554717
| 288
| 2,385
| 4.458333
| 0.322917
| 0.063084
| 0.023364
| 0.024922
| 0.054517
| 0.024922
| 0
| 0
| 0
| 0
| 0
| 0.003127
| 0.32956
| 2,385
| 83
| 72
| 28.73494
| 0.799875
| 0
| 0
| 0.0625
| 0
| 0
| 0.060403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.078125
| 0.03125
| 0.359375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a73f2115b3d49a7048eebbbf6a7d009bf2bcb02
| 864
|
py
|
Python
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
| 36
| 83
| 0.706019
| 101
| 864
| 6.039604
| 0.653465
| 0.044262
| 0.062295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014472
| 0.200231
| 864
| 23
| 84
| 37.565217
| 0.868307
| 0.392361
| 0
| 0
| 0
| 0
| 0.174257
| 0.112871
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a78745915eb3a4aaf90865a024b4d8bafd46ca7
| 5,151
|
py
|
Python
|
research/gnn/sgcn/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 1
|
2021-11-18T08:17:44.000Z
|
2021-11-18T08:17:44.000Z
|
research/gnn/sgcn/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | null | null | null |
research/gnn/sgcn/postprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 2
|
2019-09-01T06:17:04.000Z
|
2019-10-04T08:39:45.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| 48.140187
| 117
| 0.644729
| 685
| 5,151
| 4.674453
| 0.340146
| 0.033729
| 0.06371
| 0.016864
| 0.138663
| 0.094316
| 0.078701
| 0.063086
| 0.063086
| 0.042473
| 0
| 0.021423
| 0.211609
| 5,151
| 106
| 118
| 48.59434
| 0.767052
| 0.230052
| 0
| 0.03125
| 0
| 0
| 0.214512
| 0.037995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0.078125
| 0
| 0.15625
| 0.078125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a78d7cdf72b62f6c5c9341d633e72ed6d4ea01c
| 4,001
|
py
|
Python
|
pykeops/common/get_options.py
|
dvolgyes/keops
|
58b2c5f7822a7468a6da2ce439939e7dad04d7f3
|
[
"MIT"
] | 1
|
2020-09-29T13:21:30.000Z
|
2020-09-29T13:21:30.000Z
|
pykeops/common/get_options.py
|
dvolgyes/keops
|
58b2c5f7822a7468a6da2ce439939e7dad04d7f3
|
[
"MIT"
] | null | null | null |
pykeops/common/get_options.py
|
dvolgyes/keops
|
58b2c5f7822a7468a6da2ce439939e7dad04d7f3
|
[
"MIT"
] | null | null | null |
import re
import numpy as np
from collections import OrderedDict
import pykeops
import pykeops.config
############################################################
# define backend
############################################################
class SetBackend():
"""
This class is used to centralized the options used in PyKeops.
"""
dev = OrderedDict([('CPU',0),('GPU',1)])
grid = OrderedDict([('1D',0),('2D',1)])
memtype = OrderedDict([('host',0), ('device',1)])
possible_options_list = ['auto',
'CPU',
'GPU',
'GPU_1D', 'GPU_1D_device', 'GPU_1D_host',
'GPU_2D', 'GPU_2D_device', 'GPU_2D_host'
]
def define_tag_backend(self, backend, variables):
"""
Try to make a good guess for the backend... available methods are: (host means Cpu, device means Gpu)
CPU : computations performed with the host from host arrays
GPU_1D_device : computations performed on the device from device arrays, using the 1D scheme
GPU_2D_device : computations performed on the device from device arrays, using the 2D scheme
GPU_1D_host : computations performed on the device from host arrays, using the 1D scheme
GPU_2D_host : computations performed on the device from host data, using the 2D scheme
:param backend (str), variables (tuple)
:return (tagCPUGPU, tag1D2D, tagHostDevice)
"""
# check that the option is valid
if (backend not in self.possible_options_list):
raise ValueError('Invalid backend. Should be one of ', self.possible_options_list)
# auto : infer everything
if backend == 'auto':
return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables)
split_backend = re.split('_',backend)
if len(split_backend) == 1: # CPU or GPU
return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables)
elif len(split_backend) == 2: # GPU_1D or GPU_2D
return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables)
elif len(split_backend) == 3: # the option is known
return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]]
def define_backend(self, backend, variables):
tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables)
return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice]
@staticmethod
def _find_dev():
return int(pykeops.config.gpu_available)
@staticmethod
def _find_mem(variables):
if all([type(var) is np.ndarray for var in variables ]): # Infer if we're working with numpy arrays or torch tensors:
MemType = 0
elif pykeops.config.torch_found:
import torch
if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]):
from pykeops.torch.utils import is_on_device
VarsAreOnGpu = tuple(map(is_on_device, tuple(variables)))
if all(VarsAreOnGpu):
MemType = 1
elif not any(VarsAreOnGpu):
MemType = 0
else:
raise ValueError('At least two input variables have different memory locations (Cpu/Gpu).')
else:
raise TypeError('All variables should either be numpy arrays or torch tensors.')
return MemType
@staticmethod
def _find_grid():
return 0
def get_tag_backend(backend, variables, str = False):
"""
entry point to get the correct backend
"""
res = SetBackend()
if not str:
return res.define_tag_backend(backend, variables)
else:
return res.define_backend(backend, variables)
| 38.84466
| 125
| 0.59935
| 476
| 4,001
| 4.890756
| 0.260504
| 0.056701
| 0.039519
| 0.044674
| 0.292096
| 0.243127
| 0.213918
| 0.166667
| 0.097079
| 0.097079
| 0
| 0.014972
| 0.282179
| 4,001
| 102
| 126
| 39.22549
| 0.795613
| 0.227443
| 0
| 0.133333
| 0
| 0
| 0.091419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.116667
| 0.033333
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a78e9f69beda0a9b40161770e8196cc19774191
| 4,306
|
py
|
Python
|
prepare_features_vc.py
|
tkm2261/dnn-voice-changer
|
63a4ca0b2d8a33a26fc5aaec168180152df1b429
|
[
"MIT"
] | 13
|
2018-03-09T07:56:50.000Z
|
2022-03-26T12:23:22.000Z
|
prepare_features_vc.py
|
tkm2261/dnn-voice-changer
|
63a4ca0b2d8a33a26fc5aaec168180152df1b429
|
[
"MIT"
] | null | null | null |
prepare_features_vc.py
|
tkm2261/dnn-voice-changer
|
63a4ca0b2d8a33a26fc5aaec168180152df1b429
|
[
"MIT"
] | 2
|
2018-06-16T03:44:56.000Z
|
2021-04-06T17:32:38.000Z
|
"""Prepare acoustic features for one-to-one voice conversion.
usage:
prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker>
options:
--max_files=<N> Max num files to be collected. [default: 100]
--dst_dir=<d> Destination directory [default: data/cmu_arctic_vc].
--overwrite Overwrite files.
-h, --help show this help message and exit
"""
from __future__ import division, print_function, absolute_import
from docopt import docopt
import numpy as np
from nnmnkwii.datasets import FileSourceDataset
from nnmnkwii import preprocessing as P
from nnmnkwii.preprocessing.alignment import DTWAligner
from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016
import pysptk
import pyworld
from scipy.io import wavfile
from tqdm import tqdm
from os.path import basename, splitext, exists, expanduser, join, dirname
import os
import sys
from hparams import vc as hp
from hparams import hparams_debug_string
# vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be
# drop-in replacement. See below for details:
# https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources
class MGCSource(cmu_arctic.WavFileDataSource):
def __init__(self, data_root, speakers, max_files=None):
super(MGCSource, self).__init__(data_root, speakers,
max_files=max_files)
self.alpha = None
def collect_features(self, wav_path):
fs, x = wavfile.read(wav_path)
x = x.astype(np.float64)
f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period)
f0 = pyworld.stonemask(x, f0, timeaxis, fs)
spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs)
spectrogram = P.trim_zeros_frames(spectrogram)
if self.alpha is None:
self.alpha = pysptk.util.mcepalpha(fs)
mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha)
# Drop 0-th coefficient
mgc = mgc[:, 1:]
# 50Hz cut-off MS smoothing
hop_length = int(fs * (hp.frame_period * 0.001))
modfs = fs / hop_length
mgc = P.modspec_smoothing(mgc, modfs, cutoff=50)
# Add delta
mgc = P.delta_features(mgc, hp.windows)
return mgc.astype(np.float32)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
DATA_ROOT = args["<DATA_ROOT>"]
source_speaker = args["<source_speaker>"]
target_speaker = args["<target_speaker>"]
max_files = int(args["--max_files"])
dst_dir = args["--dst_dir"]
overwrite = args["--overwrite"]
print(hparams_debug_string(hp))
X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker],
max_files=max_files))
Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker],
max_files=max_files))
skip_feature_extraction = exists(join(dst_dir, "X")) \
and exists(join(dst_dir, "Y"))
if overwrite:
skip_feature_extraction = False
if skip_feature_extraction:
print("Features seems to be prepared, skipping feature extraction.")
sys.exit(0)
# Create dirs
for speaker, name in [(source_speaker, "X"), (target_speaker, "Y")]:
d = join(dst_dir, name)
print("Destination dir for {}: {}".format(speaker, d))
if not exists(d):
os.makedirs(d)
# Convert to arrays
print("Convert datasets to arrays")
X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1)
# Alignment
print("Perform alignment")
X, Y = DTWAligner().transform((X, Y))
print("Save features to disk")
for idx, (x, y) in tqdm(enumerate(zip(X, Y))):
# paths
src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0]
tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0]
src_path = join(dst_dir, "X", src_name)
tgt_path = join(dst_dir, "Y", tgt_name)
# Trim and ajast frames
x = P.trim_zeros_frames(x)
y = P.trim_zeros_frames(y)
x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2)
# Save
np.save(src_path, x)
np.save(tgt_path, y)
| 35.883333
| 86
| 0.656061
| 567
| 4,306
| 4.781305
| 0.340388
| 0.029509
| 0.018443
| 0.023239
| 0.101807
| 0.019181
| 0
| 0
| 0
| 0
| 0
| 0.012154
| 0.235718
| 4,306
| 119
| 87
| 36.184874
| 0.811607
| 0.171621
| 0
| 0.025641
| 0
| 0
| 0.072435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.205128
| 0
| 0.25641
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7905cf7b3fc947d0fefe5c680371a050d82807
| 1,855
|
py
|
Python
|
lib/tests/streamlit/pydeck_test.py
|
zgtz/streamlit
|
be797682394955ef2b94a5f7641b6f9d8fd1dbfc
|
[
"Apache-2.0"
] | 1
|
2022-01-19T10:48:49.000Z
|
2022-01-19T10:48:49.000Z
|
lib/tests/streamlit/pydeck_test.py
|
zgtz/streamlit
|
be797682394955ef2b94a5f7641b6f9d8fd1dbfc
|
[
"Apache-2.0"
] | 52
|
2021-10-04T21:52:48.000Z
|
2021-12-29T02:18:44.000Z
|
lib/tests/streamlit/pydeck_test.py
|
zgtz/streamlit
|
be797682394955ef2b94a5f7641b6f9d8fd1dbfc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pandas as pd
import pydeck as pdk
from tests import testutil
import streamlit as st
import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart
df1 = pd.DataFrame({"lat": [1, 2, 3, 4], "lon": [10, 20, 30, 40]})
class PyDeckTest(testutil.DeltaGeneratorTestCase):
def test_basic(self):
"""Test that pydeck object orks."""
st.pydeck_chart(
pdk.Deck(
layers=[
pdk.Layer("ScatterplotLayer", data=df1),
]
)
)
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual["layers"][0]["@@type"], "ScatterplotLayer")
self.assertEqual(
actual["layers"][0]["data"],
[
{"lat": 1, "lon": 10},
{"lat": 2, "lon": 20},
{"lat": 3, "lon": 30},
{"lat": 4, "lon": 40},
],
)
def test_no_args(self):
"""Test that it can be called with no args."""
st.pydeck_chart()
el = self.get_delta_from_queue().new_element
actual = json.loads(el.deck_gl_json_chart.json)
self.assertEqual(actual, deck_gl_json_chart.EMPTY_MAP)
| 30.409836
| 75
| 0.618329
| 247
| 1,855
| 4.526316
| 0.489879
| 0.053667
| 0.044723
| 0.067084
| 0.192308
| 0.161002
| 0.161002
| 0.161002
| 0.161002
| 0.161002
| 0
| 0.029477
| 0.268464
| 1,855
| 60
| 76
| 30.916667
| 0.794399
| 0.338005
| 0
| 0.121212
| 0
| 0
| 0.06971
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.060606
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a790773c525636d7fecb88a7d84df906ba09ba6
| 40,698
|
py
|
Python
|
sdks/python/apache_beam/io/gcp/bigquery_tools.py
|
Doctusoft/beam
|
91d59e78ffec3771a1d646c4e320fff571393829
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/apache_beam/io/gcp/bigquery_tools.py
|
Doctusoft/beam
|
91d59e78ffec3771a1d646c4e320fff571393829
|
[
"Apache-2.0"
] | 1
|
2022-02-10T06:56:11.000Z
|
2022-02-10T06:56:11.000Z
|
sdks/python/apache_beam/io/gcp/bigquery_tools.py
|
Doctusoft/beam
|
91d59e78ffec3771a1d646c4e320fff571393829
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tools used by BigQuery sources and sinks.
Classes, constants and functions in this file are experimental and have no
backwards compatibility guarantees.
These tools include wrappers and clients to interact with BigQuery APIs.
NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.
"""
from __future__ import absolute_import
import datetime
import decimal
import json
import logging
import re
import sys
import time
import uuid
from builtins import object
from future.utils import iteritems
from apache_beam import coders
from apache_beam.internal.gcp import auth
from apache_beam.internal.gcp.json_value import from_json_value
from apache_beam.internal.gcp.json_value import to_json_value
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.gcp.internal.clients import bigquery
from apache_beam.options import value_provider
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.runners.dataflow.native_io import iobase as dataflow_io
from apache_beam.transforms import DoFn
from apache_beam.utils import retry
# Protect against environments where bigquery library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
MAX_RETRIES = 3
JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not JSON compliant.'
def default_encoder(obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
raise TypeError(
"Object of type '%s' is not JSON serializable" % type(obj).__name__)
def get_hashable_destination(destination):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
destination: Either a TableReference object from the bigquery API.
The object has the following attributes: projectId, datasetId, and
tableId. Or a string representing the destination containing
'PROJECT:DATASET.TABLE'.
Returns:
A string representing the destination containing
'PROJECT:DATASET.TABLE'.
"""
if isinstance(destination, bigquery.TableReference):
return '%s:%s.%s' % (
destination.projectId, destination.datasetId, destination.tableId)
else:
return destination
def parse_table_schema_from_json(schema_string):
"""Parse the Table Schema provided as string.
Args:
schema_string: String serialized table schema, should be a valid JSON.
Returns:
A TableSchema of the BigQuery export from either the Query or the Table.
"""
json_schema = json.loads(schema_string)
def _parse_schema_field(field):
"""Parse a single schema field from dictionary.
Args:
field: Dictionary object containing serialized schema.
Returns:
A TableFieldSchema for a single column in BigQuery.
"""
schema = bigquery.TableFieldSchema()
schema.name = field['name']
schema.type = field['type']
if 'mode' in field:
schema.mode = field['mode']
else:
schema.mode = 'NULLABLE'
if 'description' in field:
schema.description = field['description']
if 'fields' in field:
schema.fields = [_parse_schema_field(x) for x in field['fields']]
return schema
fields = [_parse_schema_field(f) for f in json_schema['fields']]
return bigquery.TableSchema(fields=fields)
def parse_table_reference(table, dataset=None, project=None):
"""Parses a table reference into a (project, dataset, table) tuple.
Args:
table: The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument is None
then the table argument must contain the entire table reference:
'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a
bigquery.TableReference instance in which case dataset and project are
ignored and the reference is returned as a result. Additionally, for date
partitioned tables, appending '$YYYYmmdd' to the table name is supported,
e.g. 'DATASET.TABLE$YYYYmmdd'.
dataset: The ID of the dataset containing this table or null if the table
reference is specified entirely by the table argument.
project: The ID of the project containing this table or null if the table
reference is specified entirely by the table (and possibly dataset)
argument.
Returns:
A TableReference object from the bigquery API. The object has the following
attributes: projectId, datasetId, and tableId.
Raises:
ValueError: if the table reference as a string does not match the expected
format.
"""
if isinstance(table, bigquery.TableReference):
return table
elif callable(table):
return table
elif isinstance(table, value_provider.ValueProvider):
return table
table_reference = bigquery.TableReference()
# If dataset argument is not specified, the expectation is that the
# table argument will contain a full table reference instead of just a
# table name.
if dataset is None:
match = re.match(
r'^((?P<project>.+):)?(?P<dataset>\w+)\.(?P<table>[\w\$]+)$', table)
if not match:
raise ValueError(
'Expected a table reference (PROJECT:DATASET.TABLE or '
'DATASET.TABLE) instead of %s.' % table)
table_reference.projectId = match.group('project')
table_reference.datasetId = match.group('dataset')
table_reference.tableId = match.group('table')
else:
table_reference.projectId = project
table_reference.datasetId = dataset
table_reference.tableId = table
return table_reference
# -----------------------------------------------------------------------------
# BigQueryWrapper.
class BigQueryWrapper(object):
"""BigQuery client wrapper with utilities for querying.
The wrapper is used to organize all the BigQuery integration points and
offer a common place where retry logic for failures can be controlled.
In addition it offers various functions used both in sources and sinks
(e.g., find and create tables, query a table, etc.).
"""
TEMP_TABLE = 'temp_table_'
TEMP_DATASET = 'temp_dataset_'
def __init__(self, client=None):
self.client = client or bigquery.BigqueryV2(
http=get_new_http(),
credentials=auth.get_service_credentials(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self._unique_row_id = 0
# For testing scenarios where we pass in a client we do not want a
# randomized prefix for row IDs.
self._row_id_prefix = '' if client else uuid.uuid4()
self._temporary_table_suffix = uuid.uuid4().hex
@property
def unique_row_id(self):
"""Returns a unique row ID (str) used to avoid multiple insertions.
If the row ID is provided, BigQuery will make a best effort to not insert
the same row multiple times for fail and retry scenarios in which the insert
request may be issued several times. This comes into play for sinks executed
in a local runner.
Returns:
a unique row ID string
"""
self._unique_row_id += 1
return '%s_%d' % (self._row_id_prefix, self._unique_row_id)
def _get_temp_table(self, project_id):
return parse_table_reference(
table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix,
dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix,
project=project_id)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_query_location(self, project_id, query, use_legacy_sql):
"""
Get the location of tables referenced in a query.
This method returns the location of the first referenced table in the query
and depends on the BigQuery service to provide error handling for
queries that reference tables in multiple locations.
"""
reference = bigquery.JobReference(jobId=uuid.uuid4().hex,
projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=True,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
)),
jobReference=reference))
response = self.client.jobs.Insert(request)
if response.statistics is None:
# This behavior is only expected in tests
logging.warning(
"Unable to get location, missing response.statistics. Query: %s",
query)
return None
referenced_tables = response.statistics.query.referencedTables
if referenced_tables: # Guards against both non-empty and non-None
table = referenced_tables[0]
location = self.get_table_location(
table.projectId,
table.datasetId,
table.tableId)
logging.info("Using location %r from table %r referenced by query %s",
location, table, query)
return location
logging.debug("Query %s does not reference any tables.", query)
return None
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_copy_job(self,
project_id,
job_id,
from_table_reference,
to_table_reference,
create_disposition=None,
write_disposition=None):
reference = bigquery.JobReference()
reference.jobId = job_id
reference.projectId = project_id
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
copy=bigquery.JobConfigurationTableCopy(
destinationTable=to_table_reference,
sourceTable=from_table_reference,
createDisposition=create_disposition,
writeDisposition=write_disposition,
)
),
jobReference=reference,
)
)
logging.info("Inserting job request: %s", request)
response = self.client.jobs.Insert(request)
logging.info("Response was %s", response)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _insert_load_job(self,
project_id,
job_id,
table_reference,
source_uris,
schema=None,
write_disposition=None,
create_disposition=None):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
load=bigquery.JobConfigurationLoad(
sourceUris=source_uris,
destinationTable=table_reference,
schema=schema,
writeDisposition=write_disposition,
createDisposition=create_disposition,
sourceFormat='NEWLINE_DELIMITED_JSON',
autodetect=schema is None,
)
),
jobReference=reference,
)
)
response = self.client.jobs.Insert(request)
return response.jobReference
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results,
job_id, dry_run=False):
reference = bigquery.JobReference(jobId=job_id, projectId=project_id)
request = bigquery.BigqueryJobsInsertRequest(
projectId=project_id,
job=bigquery.Job(
configuration=bigquery.JobConfiguration(
dryRun=dry_run,
query=bigquery.JobConfigurationQuery(
query=query,
useLegacySql=use_legacy_sql,
allowLargeResults=True,
destinationTable=self._get_temp_table(project_id),
flattenResults=flatten_results)),
jobReference=reference))
response = self.client.jobs.Insert(request)
return response.jobReference.jobId
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_query_results(self, project_id, job_id,
page_token=None, max_results=10000):
request = bigquery.BigqueryJobsGetQueryResultsRequest(
jobId=job_id, pageToken=page_token, projectId=project_id,
maxResults=max_results)
response = self.client.jobs.GetQueryResults(request)
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter)
def _insert_all_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Calls the insertAll BigQuery API endpoint.
Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\
/rest/v2/tabledata/insertAll."""
# The rows argument is a list of
# bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as
# required by the InsertAll() method.
request = bigquery.BigqueryTabledataInsertAllRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(
skipInvalidRows=skip_invalid_rows,
# TODO(silviuc): Should have an option for ignoreUnknownValues?
rows=rows))
response = self.client.tabledata.InsertAll(request)
# response.insertErrors is not [] if errors encountered.
return not response.insertErrors, response.insertErrors
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table(self, project_id, dataset_id, table_id):
"""Lookup a table's metadata object.
Args:
client: bigquery.BigqueryV2 instance
project_id, dataset_id, table_id: table lookup parameters
Returns:
bigquery.Table instance
Raises:
HttpError if lookup failed.
"""
request = bigquery.BigqueryTablesGetRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
response = self.client.tables.Get(request)
return response
def _create_table(self, project_id, dataset_id, table_id, schema):
table = bigquery.Table(
tableReference=bigquery.TableReference(
projectId=project_id, datasetId=dataset_id, tableId=table_id),
schema=schema)
request = bigquery.BigqueryTablesInsertRequest(
projectId=project_id, datasetId=dataset_id, table=table)
response = self.client.tables.Insert(request)
logging.debug("Created the table with id %s", table_id)
# The response is a bigquery.Table instance.
return response
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_dataset(self, project_id, dataset_id, location=None):
# Check if dataset already exists otherwise create it
try:
dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
return dataset
except HttpError as exn:
if exn.status_code == 404:
dataset_reference = bigquery.DatasetReference(
projectId=project_id, datasetId=dataset_id)
dataset = bigquery.Dataset(datasetReference=dataset_reference)
if location is not None:
dataset.location = location
request = bigquery.BigqueryDatasetsInsertRequest(
projectId=project_id, dataset=dataset)
response = self.client.datasets.Insert(request)
# The response is a bigquery.Dataset instance.
return response
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _is_table_empty(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTabledataListRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id,
maxResults=1)
response = self.client.tabledata.List(request)
# The response is a bigquery.TableDataList instance.
return response.totalRows == 0
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_table(self, project_id, dataset_id, table_id):
request = bigquery.BigqueryTablesDeleteRequest(
projectId=project_id, datasetId=dataset_id, tableId=table_id)
try:
self.client.tables.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Table %s:%s.%s does not exist', project_id,
dataset_id, table_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _delete_dataset(self, project_id, dataset_id, delete_contents=True):
request = bigquery.BigqueryDatasetsDeleteRequest(
projectId=project_id, datasetId=dataset_id,
deleteContents=delete_contents)
try:
self.client.datasets.Delete(request)
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
dataset_id)
return
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_table_location(self, project_id, dataset_id, table_id):
table = self.get_table(project_id, dataset_id, table_id)
return table.location
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def create_temporary_dataset(self, project_id, location):
dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix
# Check if dataset exists to make sure that the temporary id is unique
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=dataset_id))
if project_id is not None:
# Unittests don't pass projectIds so they can be run without error
raise RuntimeError(
'Dataset %s:%s already exists so cannot be used as temporary.'
% (project_id, dataset_id))
except HttpError as exn:
if exn.status_code == 404:
logging.warning(
'Dataset %s:%s does not exist so we will create it as temporary '
'with location=%s',
project_id, dataset_id, location)
self.get_or_create_dataset(project_id, dataset_id, location=location)
else:
raise
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def clean_up_temporary_dataset(self, project_id):
temp_table = self._get_temp_table(project_id)
try:
self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(
projectId=project_id, datasetId=temp_table.datasetId))
except HttpError as exn:
if exn.status_code == 404:
logging.warning('Dataset %s:%s does not exist', project_id,
temp_table.datasetId)
return
else:
raise
self._delete_dataset(temp_table.projectId, temp_table.datasetId, True)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_job(self, project, job_id, location=None):
request = bigquery.BigqueryJobsGetRequest()
request.jobId = job_id
request.projectId = project
request.location = location
return self.client.jobs.Get(request)
def perform_load_job(self,
destination,
files,
job_id,
schema=None,
write_disposition=None,
create_disposition=None):
"""Starts a job to load data into BigQuery.
Returns:
bigquery.JobReference with the information about the job that was started.
"""
return self._insert_load_job(
destination.projectId, job_id, destination, files,
schema=schema,
create_disposition=create_disposition,
write_disposition=write_disposition)
@retry.with_exponential_backoff(
num_retries=MAX_RETRIES,
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def get_or_create_table(
self, project_id, dataset_id, table_id, schema,
create_disposition, write_disposition):
"""Gets or creates a table based on create and write dispositions.
The function mimics the behavior of BigQuery import jobs when using the
same create and write dispositions.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
schema: A bigquery.TableSchema instance or None.
create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.
write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.
Returns:
A bigquery.Table instance if table was found or created.
Raises:
RuntimeError: For various mismatches between the state of the table and
the create/write dispositions passed in. For example if the table is not
empty and WRITE_EMPTY was specified then an error will be raised since
the table was expected to be empty.
"""
from apache_beam.io.gcp.bigquery import BigQueryDisposition
found_table = None
try:
found_table = self.get_table(project_id, dataset_id, table_id)
except HttpError as exn:
if exn.status_code == 404:
if create_disposition == BigQueryDisposition.CREATE_NEVER:
raise RuntimeError(
'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'
% (project_id, dataset_id, table_id))
else:
raise
# If table exists already then handle the semantics for WRITE_EMPTY and
# WRITE_TRUNCATE write dispositions.
if found_table:
table_empty = self._is_table_empty(project_id, dataset_id, table_id)
if (not table_empty and
write_disposition == BigQueryDisposition.WRITE_EMPTY):
raise RuntimeError(
'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'
% (project_id, dataset_id, table_id))
# Delete the table and recreate it (later) if WRITE_TRUNCATE was
# specified.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
self._delete_table(project_id, dataset_id, table_id)
# Create a new table potentially reusing the schema from a previously
# found table in case the schema was not specified.
if schema is None and found_table is None:
raise RuntimeError(
'Table %s:%s.%s requires a schema. None can be inferred because the '
'table does not exist.'
% (project_id, dataset_id, table_id))
if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:
return found_table
else:
created_table = self._create_table(project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema=schema or found_table.schema)
logging.info('Created table %s.%s.%s with schema %s. Result: %s.',
project_id, dataset_id, table_id,
schema or found_table.schema,
created_table)
# if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete
# the table before this point.
if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
# BigQuery can route data to the old table for 2 mins max so wait
# that much time before creating the table and writing it
logging.warning('Sleeping for 150 seconds before the write as ' +
'BigQuery inserts can be routed to deleted table ' +
'for 2 mins after the delete and create.')
# TODO(BEAM-2673): Remove this sleep by migrating to load api
time.sleep(150)
return created_table
else:
return created_table
def run_query(self, project_id, query, use_legacy_sql, flatten_results,
dry_run=False):
job_id = self._start_query_job(project_id, query, use_legacy_sql,
flatten_results, job_id=uuid.uuid4().hex,
dry_run=dry_run)
if dry_run:
# If this was a dry run then the fact that we get here means the
# query has no errors. The start_query_job would raise an error otherwise.
return
page_token = None
while True:
response = self._get_query_results(project_id, job_id, page_token)
if not response.jobComplete:
# The jobComplete field can be False if the query request times out
# (default is 10 seconds). Note that this is a timeout for the query
# request not for the actual execution of the query in the service. If
# the request times out we keep trying. This situation is quite possible
# if the query will return a large number of rows.
logging.info('Waiting on response from query: %s ...', query)
time.sleep(1.0)
continue
# We got some results. The last page is signalled by a missing pageToken.
yield response.rows, response.schema
if not response.pageToken:
break
page_token = response.pageToken
def insert_rows(self, project_id, dataset_id, table_id, rows,
skip_invalid_rows=False):
"""Inserts rows into the specified table.
Args:
project_id: The project id owning the table.
dataset_id: The dataset id owning the table.
table_id: The table id.
rows: A list of plain Python dictionaries. Each dictionary is a row and
each key in it is the name of a field.
skip_invalid_rows: If there are rows with insertion errors, whether they
should be skipped, and all others should be inserted successfully.
Returns:
A tuple (bool, errors). If first element is False then the second element
will be a bigquery.InserttErrorsValueListEntry instance containing
specific errors.
"""
# Prepare rows for insertion. Of special note is the row ID that we add to
# each row in order to help BigQuery avoid inserting a row multiple times.
# BigQuery will do a best-effort if unique IDs are provided. This situation
# can happen during retries on failures.
# TODO(silviuc): Must add support to writing TableRow's instead of dicts.
final_rows = []
for row in rows:
json_object = bigquery.JsonObject()
for k, v in iteritems(row):
if isinstance(v, decimal.Decimal):
# decimal values are converted into string because JSON does not
# support the precision that decimal supports. BQ is able to handle
# inserts into NUMERIC columns by receiving JSON with string attrs.
v = str(v)
json_object.additionalProperties.append(
bigquery.JsonObject.AdditionalProperty(
key=k, value=to_json_value(v)))
final_rows.append(
bigquery.TableDataInsertAllRequest.RowsValueListEntry(
insertId=str(self.unique_row_id),
json=json_object))
result, errors = self._insert_all_rows(
project_id, dataset_id, table_id, final_rows, skip_invalid_rows)
return result, errors
def _convert_cell_value_to_dict(self, value, field):
if field.type == 'STRING':
# Input: "XYZ" --> Output: "XYZ"
return value
elif field.type == 'BOOLEAN':
# Input: "true" --> Output: True
return value == 'true'
elif field.type == 'INTEGER':
# Input: "123" --> Output: 123
return int(value)
elif field.type == 'FLOAT':
# Input: "1.23" --> Output: 1.23
return float(value)
elif field.type == 'TIMESTAMP':
# The UTC should come from the timezone library but this is a known
# issue in python 2.7 so we'll just hardcode it as we're reading using
# utcfromtimestamp.
# Input: 1478134176.985864 --> Output: "2016-11-03 00:49:36.985864 UTC"
dt = datetime.datetime.utcfromtimestamp(float(value))
return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')
elif field.type == 'BYTES':
# Input: "YmJi" --> Output: "YmJi"
return value
elif field.type == 'DATE':
# Input: "2016-11-03" --> Output: "2016-11-03"
return value
elif field.type == 'DATETIME':
# Input: "2016-11-03T00:49:36" --> Output: "2016-11-03T00:49:36"
return value
elif field.type == 'TIME':
# Input: "00:49:36" --> Output: "00:49:36"
return value
elif field.type == 'RECORD':
# Note that a schema field object supports also a RECORD type. However
# when querying, the repeated and/or record fields are flattened
# unless we pass the flatten_results flag as False to the source
return self.convert_row_to_dict(value, field)
elif field.type == 'NUMERIC':
return decimal.Decimal(value)
elif field.type == 'GEOGRAPHY':
return value
else:
raise RuntimeError('Unexpected field type: %s' % field.type)
def convert_row_to_dict(self, row, schema):
"""Converts a TableRow instance using the schema to a Python dict."""
result = {}
for index, field in enumerate(schema.fields):
value = None
if isinstance(schema, bigquery.TableSchema):
cell = row.f[index]
value = from_json_value(cell.v) if cell.v is not None else None
elif isinstance(schema, bigquery.TableFieldSchema):
cell = row['f'][index]
value = cell['v'] if 'v' in cell else None
if field.mode == 'REPEATED':
if value is None:
# Ideally this should never happen as repeated fields default to
# returning an empty list
result[field.name] = []
else:
result[field.name] = [self._convert_cell_value_to_dict(x['v'], field)
for x in value]
elif value is None:
if not field.mode == 'NULLABLE':
raise ValueError('Received \'None\' as the value for the field %s '
'but the field is not NULLABLE.' % field.name)
result[field.name] = None
else:
result[field.name] = self._convert_cell_value_to_dict(value, field)
return result
# -----------------------------------------------------------------------------
# BigQueryReader, BigQueryWriter.
class BigQueryReader(dataflow_io.NativeSourceReader):
"""A reader for a BigQuery source."""
def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True,
flatten_results=True, kms_key=None):
self.source = source
self.test_bigquery_client = test_bigquery_client
if auth.is_running_in_gce:
self.executing_project = auth.executing_project
elif hasattr(source, 'pipeline_options'):
self.executing_project = (
source.pipeline_options.view_as(GoogleCloudOptions).project)
else:
self.executing_project = None
# TODO(silviuc): Try to automatically get it from gcloud config info.
if not self.executing_project and test_bigquery_client is None:
raise RuntimeError(
'Missing executing project information. Please use the --project '
'command line option to specify it.')
self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder)
# Schema for the rows being read by the reader. It is initialized the
# first time something gets read from the table. It is not required
# for reading the field values in each row but could be useful for
# getting additional details.
self.schema = None
self.use_legacy_sql = use_legacy_sql
self.flatten_results = flatten_results
self.kms_key = kms_key
if self.source.table_reference is not None:
# If table schema did not define a project we default to executing
# project.
project_id = self.source.table_reference.projectId
if not project_id:
project_id = self.executing_project
self.query = 'SELECT * FROM [%s:%s.%s];' % (
project_id,
self.source.table_reference.datasetId,
self.source.table_reference.tableId)
elif self.source.query is not None:
self.query = self.source.query
else:
# Enforce the "modes" enforced by BigQuerySource.__init__.
# If this exception has been raised, the BigQuerySource "modes" have
# changed and this method will need to be updated as well.
raise ValueError("BigQuerySource must have either a table or query")
def _get_source_location(self):
"""
Get the source location (e.g. ``"EU"`` or ``"US"``) from either
- :data:`source.table_reference`
or
- The first referenced table in :data:`source.query`
See Also:
- :meth:`BigQueryWrapper.get_query_location`
- :meth:`BigQueryWrapper.get_table_location`
Returns:
Optional[str]: The source location, if any.
"""
if self.source.table_reference is not None:
tr = self.source.table_reference
return self.client.get_table_location(
tr.projectId if tr.projectId is not None else self.executing_project,
tr.datasetId, tr.tableId)
else: # It's a query source
return self.client.get_query_location(
self.executing_project,
self.source.query,
self.source.use_legacy_sql)
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.create_temporary_dataset(
self.executing_project, location=self._get_source_location())
return self
def __exit__(self, exception_type, exception_value, traceback):
self.client.clean_up_temporary_dataset(self.executing_project)
def __iter__(self):
for rows, schema in self.client.run_query(
project_id=self.executing_project, query=self.query,
use_legacy_sql=self.use_legacy_sql,
flatten_results=self.flatten_results):
if self.schema is None:
self.schema = schema
for row in rows:
if self.row_as_dict:
yield self.client.convert_row_to_dict(row, schema)
else:
yield row
class BigQueryWriter(dataflow_io.NativeSinkWriter):
"""The sink writer for a BigQuerySink."""
def __init__(self, sink, test_bigquery_client=None, buffer_size=None):
self.sink = sink
self.test_bigquery_client = test_bigquery_client
self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder)
# Buffer used to batch written rows so we reduce communication with the
# BigQuery service.
self.rows_buffer = []
self.rows_buffer_flush_threshold = buffer_size or 1000
# Figure out the project, dataset, and table used for the sink.
self.project_id = self.sink.table_reference.projectId
# If table schema did not define a project we default to executing project.
if self.project_id is None and hasattr(sink, 'pipeline_options'):
self.project_id = (
sink.pipeline_options.view_as(GoogleCloudOptions).project)
assert self.project_id is not None
self.dataset_id = self.sink.table_reference.datasetId
self.table_id = self.sink.table_reference.tableId
def _flush_rows_buffer(self):
if self.rows_buffer:
logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer),
self.project_id, self.dataset_id, self.table_id)
passed, errors = self.client.insert_rows(
project_id=self.project_id, dataset_id=self.dataset_id,
table_id=self.table_id, rows=self.rows_buffer)
self.rows_buffer = []
if not passed:
raise RuntimeError('Could not successfully insert rows to BigQuery'
' table [%s:%s.%s]. Errors: %s' %
(self.project_id, self.dataset_id,
self.table_id, errors))
def __enter__(self):
self.client = BigQueryWrapper(client=self.test_bigquery_client)
self.client.get_or_create_table(
self.project_id, self.dataset_id, self.table_id, self.sink.table_schema,
self.sink.create_disposition, self.sink.write_disposition)
return self
def __exit__(self, exception_type, exception_value, traceback):
self._flush_rows_buffer()
def Write(self, row):
self.rows_buffer.append(row)
if len(self.rows_buffer) > self.rows_buffer_flush_threshold:
self._flush_rows_buffer()
class RowAsDictJsonCoder(coders.Coder):
"""A coder for a table row (represented as a dict) to/from a JSON string.
This is the default coder for sources and sinks if the coder argument is not
specified.
"""
def encode(self, table_row):
# The normal error when dumping NAN/INF values is:
# ValueError: Out of range float values are not JSON compliant
# This code will catch this error to emit an error that explains
# to the programmer that they have used NAN/INF values.
try:
return json.dumps(
table_row, allow_nan=False, default=default_encoder).encode('utf-8')
except ValueError as e:
raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))
def decode(self, encoded_table_row):
return json.loads(encoded_table_row.decode('utf-8'))
class RetryStrategy(object):
RETRY_ALWAYS = 'RETRY_ALWAYS'
RETRY_NEVER = 'RETRY_NEVER'
RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR'
_NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'}
@staticmethod
def should_retry(strategy, error_message):
if strategy == RetryStrategy.RETRY_ALWAYS:
return True
elif strategy == RetryStrategy.RETRY_NEVER:
return False
elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and
error_message not in RetryStrategy._NON_TRANSIENT_ERRORS):
return True
else:
return False
class AppendDestinationsFn(DoFn):
"""Adds the destination to an element, making it a KV pair.
Outputs a PCollection of KV-pairs where the key is a TableReference for the
destination, and the value is the record itself.
Experimental; no backwards compatibility guarantees.
"""
def __init__(self, destination):
self.destination = AppendDestinationsFn._get_table_fn(destination)
@staticmethod
def _value_provider_or_static_val(elm):
if isinstance(elm, value_provider.ValueProvider):
return elm
else:
# The type argument is a NoOp, because we assume the argument already has
# the proper formatting.
return value_provider.StaticValueProvider(lambda x: x, value=elm)
@staticmethod
def _get_table_fn(destination):
if callable(destination):
return destination
else:
return lambda x: AppendDestinationsFn._value_provider_or_static_val(
destination).get()
def process(self, element):
yield (self.destination(element), element)
| 39.095101
| 80
| 0.685832
| 5,124
| 40,698
| 5.263466
| 0.149688
| 0.027364
| 0.016611
| 0.01802
| 0.319355
| 0.278865
| 0.238747
| 0.221246
| 0.205451
| 0.17297
| 0
| 0.00557
| 0.236793
| 40,698
| 1,040
| 81
| 39.132692
| 0.862722
| 0.282373
| 0
| 0.331325
| 0
| 0
| 0.066047
| 0.004368
| 0
| 0
| 0
| 0.002885
| 0.001506
| 1
| 0.070783
| false
| 0.004518
| 0.037651
| 0.003012
| 0.21988
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a790aaa3beecccbae1e5fe2d0bb1478dbadd597
| 1,841
|
py
|
Python
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 3
|
2018-11-27T06:30:23.000Z
|
2021-05-30T15:56:32.000Z
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 1
|
2018-11-15T02:00:31.000Z
|
2021-12-06T02:20:32.000Z
|
VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py
|
workingyifei/display-pattern-generator
|
b27be84c6221fa93833f283109870737b05bfbf6
|
[
"MIT"
] | 1
|
2020-11-06T18:46:35.000Z
|
2020-11-06T18:46:35.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import (
get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,
collect_data_files)
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt5 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])
extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),
'Qt', 'bin')])
# In the new consolidated mode any PyQt depends on _qt
hiddenimports = ['sip', 'PyQt5.Qt']
# Collect just the qt.conf file.
datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if
x[0].endswith('qt.conf')]
# For Qt<5.4 to work on Mac OS X it is necessary to include `qt_menu.nib`.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
# Version of the currently installed Qt 5.x shared library.
qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')
if is_module_satisfies('Qt < 5.4', qt_version):
datas = [(qt_menu_nib_dir('PyQt5'), '')]
| 42.813953
| 90
| 0.669745
| 257
| 1,841
| 4.642023
| 0.455253
| 0.030176
| 0.022632
| 0.031852
| 0.043588
| 0.043588
| 0
| 0
| 0
| 0
| 0
| 0.014887
| 0.160782
| 1,841
| 42
| 91
| 43.833333
| 0.757282
| 0.501901
| 0
| 0
| 0
| 0
| 0.091111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7abfc40ef422e33ab3c8284edc61617b59e3dc
| 1,165
|
py
|
Python
|
skimage/segmentation/tests/test_felzenszwalb.py
|
jaberg/scikits-image
|
2ab3e2dfb341189ef2ff9370c6cf3d33ef6ec88d
|
[
"BSD-3-Clause"
] | 2
|
2020-02-17T18:54:33.000Z
|
2021-09-28T15:18:23.000Z
|
skimage/segmentation/tests/test_felzenszwalb.py
|
jaberg/scikits-image
|
2ab3e2dfb341189ef2ff9370c6cf3d33ef6ec88d
|
[
"BSD-3-Clause"
] | 1
|
2020-03-30T12:31:55.000Z
|
2020-03-30T12:31:55.000Z
|
skimage/segmentation/tests/test_felzenszwalb.py
|
emmanuelle/scikit-image
|
eccc41907135cf81b99c4be18a480a9bc705485d
|
[
"BSD-3-Clause"
] | 1
|
2019-12-20T19:19:59.000Z
|
2019-12-20T19:19:59.000Z
|
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from nose.tools import assert_greater
from skimage.segmentation import felzenszwalb
def test_grey():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21))
img[:10, 10:] = 0.2
img[10:, :10] = 0.4
img[10:, 10:] = 0.6
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
# that mostly respect the 4 regions:
for i in range(4):
hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]
assert_greater(hist[i], 40)
def test_color():
# very weak tests. This algorithm is pretty unstable.
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
seg = felzenszwalb(img, sigma=0)
# we expect 4 segments:
assert_equal(len(np.unique(seg)), 4)
assert_array_equal(seg[:10, :10], 0)
assert_array_equal(seg[10:, :10], 2)
assert_array_equal(seg[:10, 10:], 1)
assert_array_equal(seg[10:, 10:], 3)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| 29.125
| 73
| 0.628326
| 190
| 1,165
| 3.710526
| 0.342105
| 0.056738
| 0.059574
| 0.04539
| 0.479433
| 0.479433
| 0.348936
| 0.348936
| 0.348936
| 0.348936
| 0
| 0.092715
| 0.222318
| 1,165
| 39
| 74
| 29.871795
| 0.68543
| 0.156223
| 0
| 0.142857
| 0
| 0
| 0.008188
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7ac7f87e160e8f864dafce2acd68a6454b8a68
| 1,419
|
py
|
Python
|
tests/middleware/test_csrf_middleware.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
tests/middleware/test_csrf_middleware.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
tests/middleware/test_csrf_middleware.py
|
w3x10e8/core
|
d8f0ca29c2bd5e86d199391fa916ce2f5c9b0f49
|
[
"MIT"
] | null | null | null |
from masonite.request import Request
from masonite.view import View
from masonite.auth.Csrf import Csrf
from masonite.app import App
from masonite.middleware import CsrfMiddleware
from masonite.testsuite.TestSuite import generate_wsgi
import pytest
from masonite.exceptions import InvalidCSRFToken
class TestCSRFMiddleware:
def setup_method(self):
self.app = App()
self.request = Request(generate_wsgi())
self.view = View(self.app)
self.app.bind('Request', self.request)
self.request = self.app.make('Request')
self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view)
def test_middleware_shares_correct_input(self):
self.middleware.before()
assert 'csrf_field' in self.view.dictionary
assert self.view.dictionary['csrf_field'].startswith("<input type='hidden' name='__token' value='")
def test_middleware_throws_exception_on_post(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.middleware.exempt = []
with pytest.raises(InvalidCSRFToken):
self.middleware.before()
def test_incoming_token_does_not_throw_exception_with_token(self):
self.request.environ['REQUEST_METHOD'] = 'POST'
self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')})
self.middleware.exempt = []
self.middleware.before()
| 36.384615
| 107
| 0.718816
| 170
| 1,419
| 5.817647
| 0.323529
| 0.100101
| 0.045501
| 0.044489
| 0.086957
| 0.086957
| 0.086957
| 0.086957
| 0
| 0
| 0
| 0
| 0.176885
| 1,419
| 38
| 108
| 37.342105
| 0.846747
| 0
| 0
| 0.233333
| 0
| 0
| 0.091614
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.133333
| false
| 0
| 0.266667
| 0
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7bd23662f4d2b0b0c83db0df08df0f16f7923c
| 690
|
py
|
Python
|
phoible/views.py
|
ltxom/phoible
|
7ce6f5e62d885f142dba61937d920e68fa7f9fca
|
[
"Apache-2.0"
] | 31
|
2015-01-20T01:36:22.000Z
|
2022-03-11T16:47:30.000Z
|
phoible/views.py
|
ltxom/phoible
|
7ce6f5e62d885f142dba61937d920e68fa7f9fca
|
[
"Apache-2.0"
] | 22
|
2015-03-09T11:11:31.000Z
|
2022-03-07T14:08:29.000Z
|
phoible/views.py
|
ltxom/phoible
|
7ce6f5e62d885f142dba61937d920e68fa7f9fca
|
[
"Apache-2.0"
] | 12
|
2015-11-16T18:28:43.000Z
|
2021-05-20T21:55:49.000Z
|
from pyramid.view import view_config
import os
@view_config(route_name='faq', renderer='faq.mako')
def faq_view(request):
dir_path = os.path.dirname(__file__)
faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html')
with open(faq_file, 'r') as f:
faq_page = f.read()
return {'content': faq_page}
@view_config(route_name='conventions', renderer='conventions.mako')
def conventions_view(request):
dir_path = os.path.dirname(__file__)
conventions_file = os.path.join(dir_path, 'static/conventions.html')
with open(conventions_file, 'r') as file:
conventions_page = file.read().replace('\n', '')
return {'content': conventions_page}
| 32.857143
| 72
| 0.708696
| 98
| 690
| 4.693878
| 0.336735
| 0.06087
| 0.065217
| 0.082609
| 0.269565
| 0.269565
| 0.269565
| 0.152174
| 0
| 0
| 0
| 0
| 0.149275
| 690
| 20
| 73
| 34.5
| 0.783646
| 0
| 0
| 0.125
| 0
| 0
| 0.155072
| 0.073913
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7c5a43d05f7336921551d124cf954c34bc06e5
| 46,013
|
py
|
Python
|
tests/restapi/test_routes.py
|
aiace9/aiida-core
|
09ac91654648adb684a58d5d2d7b1c11a503dae8
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
tests/restapi/test_routes.py
|
aiace9/aiida-core
|
09ac91654648adb684a58d5d2d7b1c11a503dae8
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
tests/restapi/test_routes.py
|
aiace9/aiida-core
|
09ac91654648adb684a58d5d2d7b1c11a503dae8
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=too-many-lines
"""Unittests for REST API."""
import tempfile
from flask_cors.core import ACL_ORIGIN
from aiida import orm
from aiida.backends.testbase import AiidaTestCase
from aiida.common import json
from aiida.common.links import LinkType
from aiida.restapi.run_api import configure_api
class RESTApiTestCase(AiidaTestCase):
"""
Setup of the tests for the AiiDA RESTful-api
"""
_url_prefix = '/api/v4'
_dummy_data = {}
_PERPAGE_DEFAULT = 20
_LIMIT_DEFAULT = 400
@classmethod
def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements
"""
Add objects to the database for different requests/filters/orderings etc.
"""
super().setUpClass()
api = configure_api(catch_internal_server=True)
cls.app = api.app
cls.app.config['TESTING'] = True
# create test inputs
cell = ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))
structure = orm.StructureData(cell=cell)
structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])
structure.store()
structure.add_comment('This is test comment.')
structure.add_comment('Add another comment.')
cif = orm.CifData(ase=structure.get_ase())
cif.store()
parameter1 = orm.Dict(dict={'a': 1, 'b': 2})
parameter1.store()
parameter2 = orm.Dict(dict={'c': 3, 'd': 4})
parameter2.store()
kpoint = orm.KpointsData()
kpoint.set_kpoints_mesh([4, 4, 4])
kpoint.store()
resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}
calcfunc = orm.CalcFunctionNode(computer=cls.computer)
calcfunc.store()
calc = orm.CalcJobNode(computer=cls.computer)
calc.set_option('resources', resources)
calc.set_attribute('attr1', 'OK')
calc.set_attribute('attr2', 'OK')
calc.set_extra('extra1', False)
calc.set_extra('extra2', 'extra_info')
calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure')
calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter')
aiida_in = 'The input file\nof the CalcJob node'
# Add the calcjob_inputs folder with the aiida.in file to the CalcJobNode repository
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_in)
handle.flush()
handle.seek(0)
calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True)
calc.store()
# create log message for calcjob
import logging
from aiida.common.log import LOG_LEVEL_REPORT
from aiida.common.timezone import now
from aiida.orm import Log
log_record = {
'time': now(),
'loggername': 'loggername',
'levelname': logging.getLevelName(LOG_LEVEL_REPORT),
'dbnode_id': calc.id,
'message': 'This is a template record message',
'metadata': {
'content': 'test'
},
}
Log(**log_record)
aiida_out = 'The output file\nof the CalcJob node'
retrieved_outputs = orm.FolderData()
# Add the calcjob_outputs folder with the aiida.out file to the FolderData node
with tempfile.NamedTemporaryFile(mode='w+') as handle:
handle.write(aiida_out)
handle.flush()
handle.seek(0)
retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True)
retrieved_outputs.store()
retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved')
kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create')
calc1 = orm.CalcJobNode(computer=cls.computer)
calc1.set_option('resources', resources)
calc1.store()
dummy_computers = [{
'label': 'test1',
'hostname': 'test1.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'pbspro',
}, {
'label': 'test2',
'hostname': 'test2.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'torque',
}, {
'label': 'test3',
'hostname': 'test3.epfl.ch',
'transport_type': 'local',
'scheduler_type': 'slurm',
}, {
'label': 'test4',
'hostname': 'test4.epfl.ch',
'transport_type': 'ssh',
'scheduler_type': 'slurm',
}]
for dummy_computer in dummy_computers:
computer = orm.Computer(**dummy_computer)
computer.store()
# Prepare typical REST responses
cls.process_dummy_data()
def get_dummy_data(self):
return self._dummy_data
def get_url_prefix(self):
return self._url_prefix
@classmethod
def process_dummy_data(cls):
# pylint: disable=fixme
"""
This functions prepare atomic chunks of typical responses from the
RESTapi and puts them into class attributes
"""
# TODO: Storing the different nodes as lists and accessing them
# by their list index is very fragile and a pain to debug.
# Please change this!
computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type']
computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({
'comp': [{
'id': {
'order': 'asc'
}
}]
}).dict()
# Cast UUID into a string (e.g. in sqlalchemy it comes as a UUID object)
computers = [_['comp'] for _ in computers]
for comp in computers:
if comp['uuid'] is not None:
comp['uuid'] = str(comp['uuid'])
cls._dummy_data['computers'] = computers
calculation_projections = ['id', 'uuid', 'user_id', 'node_type']
calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc',
project=calculation_projections).order_by({
'calc': [{
'id': {
'order': 'desc'
}
}]
}).dict()
calculations = [_['calc'] for _ in calculations]
for calc in calculations:
if calc['uuid'] is not None:
calc['uuid'] = str(calc['uuid'])
cls._dummy_data['calculations'] = calculations
data_projections = ['id', 'uuid', 'user_id', 'node_type']
data_types = {
'cifdata': orm.CifData,
'parameterdata': orm.Dict,
'structuredata': orm.StructureData,
'data': orm.Data,
}
for label, dataclass in data_types.items():
data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({
'data': [{
'id': {
'order': 'desc'
}
}]
}).dict()
data = [_['data'] for _ in data]
for datum in data:
if datum['uuid'] is not None:
datum['uuid'] = str(datum['uuid'])
cls._dummy_data[label] = data
def split_path(self, url):
# pylint: disable=no-self-use
"""
Split the url with "?" to get url path and it's parameters
:param url: Web url
:return: url path and url parameters
"""
parts = url.split('?')
path = ''
query_string = ''
if parts:
path = parts[0]
if len(parts) > 1:
query_string = parts[1]
return path, query_string
def compare_extra_response_data(self, node_type, url, response, uuid=None):
"""
In url response, we pass some extra information/data along with the node
results. e.g. url method, node_type, path, pk, query_string, url,
url_root,
etc.
:param node_type: url requested fot the type of the node
:param url: web url
:param response: url response
:param uuid: url requested for the node pk
"""
path, query_string = self.split_path(url)
self.assertEqual(response['method'], 'GET')
self.assertEqual(response['resource_type'], node_type)
self.assertEqual(response['path'], path)
self.assertEqual(response['id'], uuid)
self.assertEqual(response['query_string'], query_string)
self.assertEqual(response['url'], f'http://localhost{url}')
self.assertEqual(response['url_root'], 'http://localhost/')
# node details and list with limit, offset, page, perpage
def process_test(
self,
entity_type,
url,
full_list=False,
empty_list=False,
expected_list_ids=None,
expected_range=None,
expected_errormsg=None,
uuid=None,
result_node_type=None,
result_name=None
):
# pylint: disable=too-many-arguments
"""
Check whether response matches expected values.
:param entity_type: url requested for the type of the node
:param url: web url
:param full_list: if url is requested to get full list
:param empty_list: if the response list is empty
:param expected_list_ids: list of expected ids from data
:param expected_range: [start, stop] range of expected ids from data
:param expected_errormsg: expected error message in response
:param uuid: url requested for the node pk
:param result_node_type: node type in response data
:param result_name: result name in response e.g. incoming, outgoing
"""
if expected_list_ids is None:
expected_list_ids = []
if expected_range is None:
expected_range = []
if result_node_type is None and result_name is None:
result_node_type = entity_type
result_name = entity_type
url = self._url_prefix + url
with self.app.test_client() as client:
rv_response = client.get(url)
response = json.loads(rv_response.data)
if expected_errormsg:
self.assertEqual(response['message'], expected_errormsg)
else:
if full_list:
expected_data = self._dummy_data[result_node_type]
elif empty_list:
expected_data = []
elif expected_list_ids:
expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids]
elif expected_range != []:
expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]]
else:
from aiida.common.exceptions import InputValidationError
raise InputValidationError('Pass the expected range of the dummydata')
expected_node_uuids = [node['uuid'] for node in expected_data]
result_node_uuids = [node['uuid'] for node in response['data'][result_name]]
self.assertEqual(expected_node_uuids, result_node_uuids)
self.compare_extra_response_data(entity_type, url, response, uuid)
class RESTApiTestSuite(RESTApiTestCase):
# pylint: disable=too-many-public-methods
"""
Define unittests for rest api
"""
############### generic endpoints ########################
def test_server(self):
"""
Test that /server endpoint returns AiiDA version
"""
url = f'{self.get_url_prefix()}/server'
from aiida import __version__
with self.app.test_client() as client:
response = client.get(url)
data = json.loads(response.data)['data']
self.assertEqual(__version__, data['AiiDA_version'])
self.assertEqual(self.get_url_prefix(), data['API_prefix'])
def test_base_url(self):
"""
Test that / returns list of endpoints
"""
with self.app.test_client() as client:
data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data']
data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data']
self.assertTrue(len(data_base['available_endpoints']) > 0)
self.assertDictEqual(data_base, data_server)
def test_cors_headers(self):
"""
Test that REST API sets cross-origin resource sharing headers
"""
url = f'{self.get_url_prefix()}/server'
with self.app.test_client() as client:
response = client.get(url)
headers = response.headers
self.assertEqual(headers.get(ACL_ORIGIN), '*')
############### computers endpoint ########################
def test_computers_details(self):
"""
Requests the details of single computer
"""
node_uuid = self.get_dummy_data()['computers'][1]['uuid']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid
)
def test_computers_list(self):
"""
Get the full list of computers from database
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_list_limit_offset(self):
"""
Get the list of computers from database using limit
and offset parameter.
It should return the no of rows specified in limit from
database starting from the no. specified in offset
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4]
)
def test_computers_list_limit_only(self):
"""
Get the list of computers from database using limit
parameter.
It should return the no of rows specified in limit from
database.
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2])
def test_computers_list_offset_only(self):
"""
Get the list of computers from database using offset
parameter
It should return all the rows from database starting from
the no. specified in offset
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None])
def test_computers_list_limit_offset_perpage(self):
"""
If we pass the limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error
)
def test_computers_list_page_limit_offset(self):
"""
If we use the page, limit and offset at same time, it
would return the error message.
"""
expected_error = 'requesting a specific page is incompatible with ' \
'limit and offset'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error
)
def test_complist_pagelimitoffset_perpage(self):
"""
If we use the page, limit, offset and perpage at same time, it
would return the error message.
"""
expected_error = 'perpage key is incompatible with limit and offset'
RESTApiTestCase.process_test(
self,
'computers',
'/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id',
expected_errormsg=expected_error
)
def test_computers_list_page_default(self):
"""
it returns the no. of rows defined as default perpage option
from database.
no.of pages = total no. of computers in database / perpage
"/page" acts as "/page/1?perpage=default_value"
"""
RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True)
def test_computers_list_page_perpage(self):
"""
no.of pages = total no. of computers in database / perpage
Using this formula it returns the no. of rows for requested page
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2]
)
def test_computers_list_page_perpage_exceed(self):
"""
no.of pages = total no. of computers in database / perpage
If we request the page which exceeds the total no. of pages then
it would return the error message.
"""
expected_error = 'Non existent page requested. The page range is [1 : ' \
'3]'
RESTApiTestCase.process_test(
self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error
)
############### list filters ########################
def test_computers_filter_id1(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_id2(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id > 2)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None]
)
def test_computers_filter_pk(self):
"""
Add filter on the id of computer and get the filtered computer
list (e.g. id=1)
"""
node_pk = self.get_dummy_data()['computers'][1]['id']
RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1])
def test_computers_filter_name(self):
"""
Add filter for the name of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?name="test1"', expected_list_ids=[1])
def test_computers_filter_hostname(self):
"""
Add filter for the hostname of computer and get the filtered computer
list
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?hostname="test1.epfl.ch"', expected_list_ids=[1])
def test_computers_filter_transport_type(self):
"""
Add filter for the transport_type of computer and get the filtered
computer
list
"""
RESTApiTestCase.process_test(
self, 'computers', '/computers?transport_type="local"&name="test3"&orderby=+id', expected_list_ids=[3]
)
############### list orderby ########################
def test_computers_orderby_id_asc(self):
"""
Returns the computers list ordered by "id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True)
def test_computers_orderby_id_asc_sign(self):
"""
Returns the computers list ordered by "+id" in ascending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)
def test_computers_orderby_id_desc(self):
"""
Returns the computers list ordered by "id" in descending
order
"""
RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0])
def test_computers_orderby_name_asc(self):
"""
Returns the computers list ordered by "name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_asc_sign(self):
"""
Returns the computers list ordered by "+name" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4]
)
def test_computers_orderby_name_desc(self):
"""
Returns the computers list ordered by "name" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1]
)
def test_computers_orderby_scheduler_type_asc(self):
"""
Returns the computers list ordered by "scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_comp_orderby_scheduler_ascsign(self):
"""
Returns the computers list ordered by "+scheduler_type" in ascending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?transport_type=\"ssh\"&pk>{str(node_pk)}&orderby=+scheduler_type",
expected_list_ids=[1, 4, 2]
)
def test_computers_orderby_schedulertype_desc(self):
"""
Returns the computers list ordered by "scheduler_type" in descending
order
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?pk>{str(node_pk)}&transport_type=\"ssh\"&orderby=-scheduler_type",
expected_list_ids=[2, 4, 1]
)
############### list orderby combinations #######################
def test_computers_orderby_mixed1(self):
"""
Returns the computers list first order by "transport_type" in
ascending order and if it is having same transport_type, order it
by "id"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=transport_type,id',
expected_list_ids=[3, 1, 2, 4]
)
def test_computers_orderby_mixed2(self):
"""
Returns the computers list first order by "scheduler_type" in
descending order and if it is having same scheduler_type, order it
by "name"
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name',
expected_list_ids=[2, 3, 4, 1]
)
def test_computers_orderby_mixed3(self):
"""
Returns the computers list first order by "scheduler_type" in
ascending order and if it is having same scheduler_type, order it
by "hostname" descending order
Response::
test4 slurm
test3 slurm
test2 torque
test1 pbspro
localhost pbspro
==========
Expected::
test1 pbspro
localhost pbspro
test4 slurm
test3 slurm
test2 torque
test1 test4
RESTApiTestCase.process_test(self, "computers",
"/computers?orderby=+scheduler_type,
-hostname",
expected_list_ids=[1,0,4,3,2])
"""
############### list filter combinations #######################
def test_computers_filter_mixed1(self):
"""
Add filter for the hostname and id of computer and get the
filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f"/computers?id>{str(node_pk)}&hostname=\"test1.epfl.ch\"", expected_list_ids=[1]
)
def test_computers_filter_mixed2(self):
"""
Add filter for the id, hostname and transport_type of the computer
and get the filtered computer list
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>{str(node_pk)}&hostname=\"test3.epfl.ch\"&transport_type=\"ssh\"",
empty_list=True
)
############### list all parameter combinations #######################
def test_computers_mixed1(self):
"""
url parameters: id, limit and offset
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4]
)
def test_computers_mixed2(self):
"""
url parameters: id, page, perpage
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4]
)
def test_computers_mixed3(self):
"""
url parameters: id, transport_type, orderby
"""
node_pk = self.get_dummy_data()['computers'][0]['id']
RESTApiTestCase.process_test(
self,
'computers',
f"/computers?id>={str(node_pk)}&transport_type=\"ssh\"&orderby=-id&limit=2",
expected_list_ids=[4, 2]
)
########## pass unknown url parameter ###########
def test_computers_unknown_param(self):
"""
url parameters: id, limit and offset
from aiida.common.exceptions import InputValidationError
RESTApiTestCase.node_exception(self, "/computers?aa=bb&id=2", InputValidationError)
"""
############### calculation retrieved_inputs and retrieved_outputs #############
def test_calculation_retrieved_inputs(self):
"""
Get the list of given calculation retrieved_inputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}])
def test_calculation_retrieved_outputs(self):
"""
Get the list of given calculation retrieved_outputs
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}])
############### calculation incoming #############
def test_calculation_inputs(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f'/nodes/{str(node_uuid)}/links/incoming?orderby=id',
expected_list_ids=[5, 3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_input_filters(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
self.process_test(
'nodes',
f"/nodes/{str(node_uuid)}/links/incoming?node_type=\"data.dict.Dict.\"",
expected_list_ids=[3],
uuid=node_uuid,
result_node_type='data',
result_name='incoming'
)
def test_calculation_iotree(self):
"""
Get filtered incoming list for given calculations
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(len(response['data']['nodes']), 1)
self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1)
self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1)
self.assertEqual(len(response['data']['metadata']), 1)
expected_attr = [
'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing'
]
received_attr = response['data']['nodes'][0].keys()
for attr in expected_attr:
self.assertIn(attr, received_attr)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation attributes #############
def test_calculation_attributes(self):
"""
Get list of calculation attributes
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], attributes)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_contents_attributes_filter(self):
"""
Get list of calculation attributes with filter attributes_filter
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\"attr1\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(response['data']['attributes'], {'attr1': 'OK'})
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
############### calculation node attributes filter #############
def test_calculation_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
attributes = {
'attr1': 'OK',
'attr2': 'OK',
'resources': {
'num_machines': 1,
'num_mpiprocs_per_machine': 1
},
}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['attributes'], attributes)
############### calculation node extras_filter #############
def test_calculation_extras_filter(self):
"""
Get the list of given calculation extras filtered
"""
extras = {'extra1': False, 'extra2': 'extra_info'}
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1'])
self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2'])
############### structure node attributes filter #############
def test_structure_attributes_filter(self):
"""
Get the list of given calculation attributes filtered
"""
cell = [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell)
############### node attributes_filter with pagination #############
def test_node_attributes_filter_pagination(self):
"""
Check that node attributes specified in attributes_filter are
returned as a dictionary when pagination is set
"""
expected_attributes = ['resources', 'cell']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('attributes', node)
self.assertNotIn('attributes.resources', node)
self.assertNotIn('attributes.cell', node)
self.assertEqual(len(node['attributes']), len(expected_attributes))
for attr in expected_attributes:
self.assertIn(attr, node['attributes'])
############### node get one attributes_filter with pagination #############
def test_node_single_attributes_filter(self):
"""
Check that when only one node attribute is specified in attributes_filter
only this attribute is returned as a dictionary when pagination is set
"""
expected_attribute = ['resources']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['attributes'].keys()), expected_attribute)
############### node extras_filter with pagination #############
def test_node_extras_filter_pagination(self):
"""
Check that node extras specified in extras_filter are
returned as a dictionary when pagination is set
"""
expected_extras = ['extra1', 'extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertIn('extras', node)
self.assertNotIn('extras.extra1', node)
self.assertNotIn('extras.extra2', node)
self.assertEqual(len(node['extras']), len(expected_extras))
for extra in expected_extras:
self.assertIn(extra, node['extras'])
############### node get one extras_filter with pagination #############
def test_node_single_extras_filter(self):
"""
Check that when only one node extra is specified in extras_filter
only this extra is returned as a dictionary when pagination is set
"""
expected_extra = ['extra2']
url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertNotEqual(len(response['data']['nodes']), 0)
for node in response['data']['nodes']:
self.assertEqual(list(node['extras'].keys()), expected_extra)
############### node full_type filter #############
def test_nodes_full_type_filter(self):
"""
Get the list of nodes filtered by full_type
"""
expected_node_uuids = []
for calc in self.get_dummy_data()['calculations']:
if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.':
expected_node_uuids.append(calc['uuid'])
url = f"{self.get_url_prefix()}/nodes/?full_type=\"process.calculation.calcjob.CalcJobNode.|\""
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
for node in response['data']['nodes']:
self.assertIn(node['uuid'], expected_node_uuids)
############### Structure visualization and download #############
def test_structure_derived_properties(self):
"""
Get the list of give calculation incoming
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
self.assertEqual(
response['data']['derived_properties']['dimensionality'], {
'dim': 3,
'value': 8.0,
'label': 'volume'
}
)
self.assertEqual(response['data']['derived_properties']['formula'], 'Ba')
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)
def test_structure_download(self):
"""
Test download of structure file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf'
with self.app.test_client() as client:
rv_obj = client.get(url)
structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, structure_data)
def test_cif(self):
"""
Test download of cif file
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['cifdata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif'
with self.app.test_client() as client:
rv_obj = client.get(url)
cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access
self.assertEqual(rv_obj.data, cif)
############### projectable_properties #############
def test_projectable_properties(self):
"""
test projectable_properties endpoint
"""
for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']:
url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
self.assertNotIn('message', response)
expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type']
# check fields
for _, pinfo in response['data']['fields'].items():
available_keys = pinfo.keys()
for prop in expected_keys:
self.assertIn(prop, available_keys)
# check order
available_properties = response['data']['fields'].keys()
for prop in response['data']['ordering']:
self.assertIn(prop, available_properties)
def test_node_namespace(self):
"""
Test the rest api call to get list of available node namespace
"""
url = f'{self.get_url_prefix()}/nodes/full_types'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)
expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type']
response_keys = response['data'].keys()
for dkay in expected_data_keys:
self.assertIn(dkay, response_keys)
RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response)
def test_comments(self):
"""
Get the node comments
"""
node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']
url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments'
with self.app.test_client() as client:
rv_obj = client.get(url)
response = json.loads(rv_obj.data)['data']['comments']
all_comments = []
for comment in response:
all_comments.append(comment['message'])
self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.']))
def test_repo(self):
"""
Test to get repo list or repo file contents for given node
"""
from aiida.orm import load_node
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\"calcjob_inputs\""
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}])
url = f"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\"calcjob_inputs/aiida.in\""
with self.app.test_client() as client:
response_obj = client.get(url)
input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb')
self.assertEqual(response_obj.data, input_file)
def test_process_report(self):
"""
Test process report
"""
node_uuid = self.get_dummy_data()['calculations'][1]['uuid']
url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
expected_keys = response['data'].keys()
for key in ['logs']:
self.assertIn(key, expected_keys)
expected_log_keys = response['data']['logs'][0].keys()
for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']:
self.assertIn(key, expected_log_keys)
def test_download_formats(self):
"""
test for download format endpoint
"""
url = f'{self.get_url_prefix()}/nodes/download_formats'
with self.app.test_client() as client:
response_value = client.get(url)
response = json.loads(response_value.data)
for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']:
self.assertIn(key, response['data'].keys())
for key in ['cif', 'xsf', 'xyz']:
self.assertIn(key, response['data']['data.structure.StructureData.|'])
self.assertIn('cif', response['data']['data.cif.CifData.|'])
| 40.186026
| 117
| 0.583878
| 5,197
| 46,013
| 4.981335
| 0.090437
| 0.021361
| 0.016224
| 0.021014
| 0.585522
| 0.554465
| 0.518812
| 0.472922
| 0.432401
| 0.410229
| 0
| 0.008314
| 0.283768
| 46,013
| 1,144
| 118
| 40.221154
| 0.777218
| 0.181949
| 0
| 0.330827
| 0
| 0.009023
| 0.193589
| 0.097403
| 0
| 0
| 0
| 0.001748
| 0.094737
| 1
| 0.103759
| false
| 0.001504
| 0.02406
| 0.003008
| 0.141353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7d500dd98fa04ac32ae6b712ad22a261bd4d52
| 3,644
|
py
|
Python
|
processmonitor.py
|
yletallec/processmonitor
|
95db3416ec35fcb1325a1ac6c5a26807e4c3a474
|
[
"MIT"
] | null | null | null |
processmonitor.py
|
yletallec/processmonitor
|
95db3416ec35fcb1325a1ac6c5a26807e4c3a474
|
[
"MIT"
] | null | null | null |
processmonitor.py
|
yletallec/processmonitor
|
95db3416ec35fcb1325a1ac6c5a26807e4c3a474
|
[
"MIT"
] | null | null | null |
"""Process Monitor
Usage:
processmonitor.py <process_name> <overall_duration> [<sampling_interval>]
processmonitor.py -h|--help
processmonitor.py -v|--version
Options:
<process_name> Process name argument.
<overall_duration> Overall duration of the monitoring in seconds.
<sampling_interval> Sampling interval in seconds (optional, default 5).
-h --help Show this screen.
-v --version Show version.
"""
from docopt import docopt
from utils import string_to_integer
from process import Process
from threading import Event, Thread
from datetime import datetime
import os
import sys
import csv
import time
from enum import IntEnum
class ExitStatus(IntEnum):
OK = 0
BAD_DURATION = 1
BAD_INTERVAL = 2
INTERVAL_GT_DURATION = 3
def call_repeatedly(interval, func, *args):
stopped = Event()
def loop():
iteration = 1
while not stopped.wait(interval - time.time() % interval):
func(*args, iteration)
iteration = iteration + 1
Thread(target=loop).start()
return stopped.set
def print_average():
cpu_avg, mem_avg, files_avg = Process.metrics_average()
if cpu_avg != None and mem_avg != None and files_avg != None:
print(f"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}")
return True
return False
def generate_report(name, duration, interval):
if len(Process.metrics) == 0:
return False
ts = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv"
with open(f"{filename}", mode='w') as report:
writer = csv.writer(report, delimiter=',')
writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES'])
iteration = 1
for metric in Process.metrics:
writer.writerow([
iteration,
metric.cpu,
metric.mem,
metric.files])
iteration = iteration + 1
reportpath = f"./{filename}"
print(f"Metrics report: {reportpath}")
return True
def raise_memory_leak_warning(name):
if (Process.has_memory_leaks(name)):
print(f"WARNING: possible memory leaks detected for process \'{name}\'")
return True
return False
def main():
args = docopt(__doc__, version='Process Monitor 1.0')
if not args['<sampling_interval>']:
args['<sampling_interval>'] = 5
name = args['<process_name>']
try:
duration = string_to_integer(args['<overall_duration>'])
except:
print("duration parameter is not an integer")
return ExitStatus.BAD_DURATION
try:
interval = string_to_integer(args['<sampling_interval>'])
except:
print("interval parameter is not an integer")
return ExitStatus.BAD_INTERVAL
if interval > duration:
print("interval parameter is greater than duration parameter")
return ExitStatus.INTERVAL_GT_DURATION
print("---------------------------------------------")
print(" Process Monitor")
print("---------------------------------------------")
print(f"Monitoring process \'{name}\' every {interval} sec for {duration} sec")
cancel_future_calls = call_repeatedly(interval, Process.monitor, name)
time.sleep(duration)
cancel_future_calls()
print_average()
generate_report(name, duration, interval)
raise_memory_leak_warning(name)
return ExitStatus.OK
def init():
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append('-h')
sys.exit(main())
init()
| 31.145299
| 94
| 0.630626
| 430
| 3,644
| 5.181395
| 0.304651
| 0.029623
| 0.020197
| 0.035009
| 0.113106
| 0.037702
| 0.037702
| 0.037702
| 0
| 0
| 0
| 0.005014
| 0.233809
| 3,644
| 116
| 95
| 31.413793
| 0.79298
| 0.118002
| 0
| 0.179775
| 0
| 0.011236
| 0.227627
| 0.046773
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078652
| false
| 0
| 0.11236
| 0
| 0.370787
| 0.134831
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7d668b99ceea74e75c844a87347ac04ef02b71
| 6,740
|
py
|
Python
|
Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | 2
|
2020-12-05T07:42:55.000Z
|
2021-01-06T23:23:18.000Z
|
Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
Projects/DeepLearningTechniques/MobileNet_v2/tiny_imagenet/data_loader.py
|
Tim232/Python-Things
|
05f0f373a4cf298e70d9668c88a6e3a9d1cd8146
|
[
"MIT"
] | null | null | null |
import os
import re
import numpy as np
from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import *
class DataLoader:
# todo train/test/validation => (클래스 당 500/50/50)
def __init__(self):
self.image_width = flags.FLAGS.image_width
self.image_height = flags.FLAGS.image_height
self.batch_size = flags.FLAGS.batch_size
self.data_path = flags.FLAGS.data_path
self.img_reg = re.compile('.*\\.jpeg', re.IGNORECASE)
self.init_class()
self.init_annotation()
def init_class(self):
self.cls = {}
for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))):
self.cls[dir] = idx
def init_annotation(self):
self.anno = {}
for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')):
filename, label, *_ = line.split('\t')
self.anno[filename] = label
def init_train(self):
train_x, train_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')):
for file in files:
if self.img_reg.match(file):
train_x.append(os.path.join(path, file))
train_y.append(self.cls[re.match('(.+)\\_\d+\\.jpeg', file, re.IGNORECASE).group(1)])
self.train_len = len(train_y)
#todo train data random sort
random_sort = np.random.permutation(self.train_len)
train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) => Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x')
self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y')
def init_validation(self):
valid_x, valid_y = [], []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')):
for file in files:
if self.img_reg.match(file):
valid_x.append(os.path.join(path, file))
valid_y.append(self.cls[self.anno[file]])
self.valid_len = len(valid_y)
#todo validataion data random sort
random_sort = np.random.permutation(self.valid_len)
valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort]
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x')
self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y')
def init_test(self):
test_x = []
for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')):
for file in files:
test_x.append(os.path.join(path, file))
self.test_len = len(test_x)
#todo (Numpy / List) -> Tensor 로 변환
with tf.variable_scope(name_or_scope='data_tensor'):
self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x')
def train_normal(self, x, y):
with tf.variable_scope(name_or_scope='train_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def train_random_crop(self, x, y):
with tf.variable_scope(name_or_scope='train_random_crop'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]], name='padding')
# x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3))
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def valid_normal(self, x, y):
with tf.variable_scope(name_or_scope='valid_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x, y
def test_normal(self, x):
with tf.variable_scope(name_or_scope='test_normal'):
x = tf.read_file(filename=x)
x = tf.image.decode_png(contents=x, channels=3, name='decode_png')
x = tf.divide(tf.cast(x, tf.float32), 255.)
x = tf.subtract(x, [0.4921, 0.4833, 0.4484])
x = tf.divide(x, [0.2465, 0.2431, 0.2610])
return x
def dataset_batch_loader(self, dataset, ref_func, name):
with tf.variable_scope(name_or_scope=name):
dataset_map = dataset.map(ref_func).batch(self.batch_size)
iterator = dataset_map.make_one_shot_iterator()
batch_input = iterator.get_next()
return batch_input
def train_loader(self):
with tf.variable_scope('train_loader'):
'''
repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수
shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한 random sort)
'''
dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch')
random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch')
return normal_batch, random_crop_batch
def valid_loader(self):
with tf.variable_scope('valid_loader'):
dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch')
return normal_batch
def test_loader(self):
with tf.variable_scope('test_loader'):
dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat()
normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch')
return normal_batch
| 41.863354
| 145
| 0.616914
| 990
| 6,740
| 3.989899
| 0.162626
| 0.022785
| 0.038987
| 0.052911
| 0.622025
| 0.562532
| 0.506329
| 0.437722
| 0.398734
| 0.338987
| 0
| 0.034026
| 0.25
| 6,740
| 161
| 146
| 41.863354
| 0.747379
| 0.050593
| 0
| 0.294643
| 0
| 0
| 0.051732
| 0
| 0
| 0
| 0
| 0.006211
| 0
| 1
| 0.125
| false
| 0
| 0.035714
| 0
| 0.241071
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7d81f9fd3f30534398ff05abd7412a6f78b709
| 4,035
|
py
|
Python
|
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
MarkReport/MarkReport.py
|
dedukun/MarkReport
|
2d92c87a69db5868d14b7a59e815b9ee72d439f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Command line flags
import os
import glob
import re
import pyinotify
import subprocess
from sys import stdout, stderr
from time import time, sleep
from tempfile import gettempdir
from distutils.dir_util import copy_tree
from shutil import copyfile
from weasyprint import HTML
import argparse
parser = argparse.ArgumentParser(
description='Converts Markdown to elegant PDF reports')
parser.add_argument('--basic', dest='basic', action='store_true',
help='Do not enrich HTML with LaTeX and syntax highlighting (faster builds)')
parser.add_argument('--watch', dest='watch', action='store_true',
help='Watch the current folder for changes and rebuild automatically')
parser.add_argument('--quiet', dest='quiet', action='store_true',
help='Do not output any information')
parser.add_argument("--timeout", type=int, default=2,
help='Page generation timeout')
parser.add_argument("--base-html", type=str, default="",
help='The path to the base HTML file')
parser.set_defaults(watch=False)
args = parser.parse_args()
# Check directory
ok = False
for file in os.listdir("."):
if file.endswith(".md"):
ok = True
break
if not ok:
stderr.write("No markdown file found in the current folder")
exit(1)
if args.base_html != "":
if not os.path.isfile(args.base_html):
stderr.write("The given base HTML file doesn't exist")
exit(1)
script_path = os.path.dirname(os.path.realpath(__file__))
# Temp dir
timestamp = str(int(time()))
tmp_dir = gettempdir() + "/" + timestamp + "_md-report/"
os.makedirs(tmp_dir, exist_ok=True)
# Headless browser
if not args.basic:
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
options = Options()
options.headless = True
options.log.level = "trace"
d = DesiredCapabilities.FIREFOX
d['loggingPrefs'] = {'browser': 'ALL'}
driver = webdriver.Firefox(options=options, capabilities=d)
driver.set_page_load_timeout(args.timeout)
prev_compile_time = 0
def recompile(notifier):
if notifier is not None and (notifier.maskname != "IN_MODIFY" or notifier.pathname.endswith(".pdf")):
return
global prev_compile_time
if time() - prev_compile_time < 1:
return
prev_compile_time = time()
if not args.quiet:
stdout.write("\rBuilding the PDF file...")
stdout.flush()
files = glob.glob(tmp_dir + '/*.md')
for f in files:
os.remove(f)
if args.base_html == "":
copyfile(script_path + "/base.html", tmp_dir + "/base.html")
else:
copyfile(args.base_html, tmp_dir + "/base.html")
if not os.path.islink(tmp_dir + "/src"):
os.symlink(script_path + "/src", tmp_dir + "/src")
copy_tree(".", tmp_dir)
# Markdown parsing
subprocess.check_output(script_path + "/md-parsing " +
tmp_dir, shell=True).decode('utf-8')
html_file_name = tmp_dir + "output.html"
# Interpret JS code
if not args.basic:
driver.get("file:///" + html_file_name)
sleep(2)
elem = driver.find_element_by_xpath("//*")
interpreted_html = elem.get_attribute("outerHTML")
with open(html_file_name, "w") as html_out_file:
html_out_file.write(interpreted_html)
# Create final PDF file
pdf = HTML(html_file_name).write_pdf()
f = open("output.pdf", 'wb')
f.write(pdf)
if not args.quiet:
stdout.write("\rDone. ")
stdout.flush()
recompile(None)
if not args.watch:
if not args.basic:
driver.quit()
exit(0)
watch_manager = pyinotify.WatchManager()
event_notifier = pyinotify.Notifier(watch_manager, recompile)
watch_manager.add_watch(os.path.abspath("."), pyinotify.ALL_EVENTS, rec=True)
event_notifier.loop()
if not args.basic:
driver.quit()
| 27.827586
| 105
| 0.662949
| 533
| 4,035
| 4.874296
| 0.35272
| 0.030793
| 0.024249
| 0.021555
| 0.092379
| 0.084681
| 0
| 0
| 0
| 0
| 0
| 0.002847
| 0.216605
| 4,035
| 144
| 106
| 28.020833
| 0.819045
| 0.034449
| 0
| 0.138614
| 0
| 0
| 0.164352
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009901
| false
| 0
| 0.148515
| 0
| 0.178218
| 0.009901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7e18d0d0b30bb03c5125997bb7d29ab2737184
| 902
|
py
|
Python
|
DFS/13023.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
DFS/13023.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
DFS/13023.py
|
kjh9267/BOJ_Python
|
b4d2ae09c252cc9280df93ccecbd07880947827e
|
[
"Apache-2.0"
] | null | null | null |
# https://www.acmicpc.net/problem/13023
import sys
sys.setrecursionlimit(999999999)
def dfs_all():
is_possible = [False]
for node in range(N):
visited = [False for _ in range(N)]
dfs(node, 0, visited, is_possible)
if is_possible[0]:
return 1
return 0
def dfs(cur, depth, visited, is_possible):
if visited[cur]:
return
if depth == target_depth:
is_possible[0] = True
return
visited[cur] = True
for nxt in graph[cur]:
dfs(nxt, depth + 1, visited, is_possible)
visited[cur] = False
if __name__ == '__main__':
input = __import__('sys').stdin.readline
target_depth = 4
N, M = map(int, input().split())
graph = [list() for _ in range(N)]
for _ in range(M):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
print(dfs_all())
| 19.191489
| 49
| 0.578714
| 124
| 902
| 4.008065
| 0.379032
| 0.120724
| 0.04829
| 0.044266
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03271
| 0.288248
| 902
| 46
| 50
| 19.608696
| 0.741433
| 0.04102
| 0
| 0.066667
| 0
| 0
| 0.012746
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.266667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7ecd71a92cf19cd5b6422ac30a671d4195653c
| 1,358
|
py
|
Python
|
experiments/bst/setup.py
|
bigchaindb/privacy-protocols
|
d220f642c7c056e5ec179b47a8d0863dbc373d9d
|
[
"CC-BY-4.0"
] | 68
|
2017-08-02T14:22:59.000Z
|
2022-02-19T05:27:42.000Z
|
experiments/bst/setup.py
|
bigchaindb/privacy-protocols
|
d220f642c7c056e5ec179b47a8d0863dbc373d9d
|
[
"CC-BY-4.0"
] | 6
|
2017-08-05T18:30:14.000Z
|
2017-08-22T19:54:53.000Z
|
experiments/bst/setup.py
|
bigchaindb/privacy-protocols
|
d220f642c7c056e5ec179b47a8d0863dbc373d9d
|
[
"CC-BY-4.0"
] | 15
|
2017-08-22T16:04:26.000Z
|
2022-03-13T10:36:02.000Z
|
"""bst: BigchainDB Sharing Tools"""
from setuptools import setup, find_packages
install_requires = [
'base58~=0.2.2',
'PyNaCl~=1.1.0',
'bigchaindb-driver',
'click==6.7',
'colorama',
]
setup(
name='bst',
version='0.1.0',
description='bst: BigchainDB Sharing Tools',
long_description=(
'A collection of scripts with different patterns to share'
'private data on BigchainDB.'),
url='https://github.com/vrde/bst/',
author='Alberto Granzotto',
author_email='[email protected]',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'bst=bst.cli:main'
],
},
install_requires=install_requires
)
| 26.115385
| 74
| 0.594993
| 141
| 1,358
| 5.659574
| 0.64539
| 0.056391
| 0.093985
| 0.097744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020937
| 0.261414
| 1,358
| 51
| 75
| 26.627451
| 0.774676
| 0.021355
| 0
| 0.047619
| 0
| 0
| 0.540438
| 0.016629
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02381
| 0
| 0.02381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7f7c81cefa2649d2218e763e7fb484932406a9
| 8,498
|
py
|
Python
|
voting_ml/main.py
|
tommy-waltmann/voting-ml
|
327de4515d8f2f7b8e072833df20eca651621ea6
|
[
"BSD-3-Clause"
] | null | null | null |
voting_ml/main.py
|
tommy-waltmann/voting-ml
|
327de4515d8f2f7b8e072833df20eca651621ea6
|
[
"BSD-3-Clause"
] | 2
|
2021-04-20T19:04:36.000Z
|
2021-04-24T22:33:47.000Z
|
voting_ml/main.py
|
tommy-waltmann/voting-ml
|
327de4515d8f2f7b8e072833df20eca651621ea6
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import sklearn
import subprocess
from sklearn import model_selection, tree
import data
import feature_selection
import model_sel
import os
import matplotlib.pyplot as plt
import seaborn as sns
def main():
#parameter space
list_test_size = [0.1,0.15,0.2] # decide this
list_ftsel_method = ['chi2','mutlinfo','pca','dt']
list_num_features = [10,15,20] # decide this
list_Kfold = [3,5]
list_corr_threshold = [1,0.5,0.6,0.7] # decide this
param_space = {
'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 7],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [2, 5, 10],
'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15],
}
repeat = 1
#output dictrionary list
list_output_dict = []
# output directory path
outdir = "../results/run1/"
if(not os.path.isdir(outdir)):
os.mkdir(outdir)
o_models_file = open(outdir+"models.csv","w")
o_models_file.write("test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\n")
#splitting data and weights into train, test (refer to optimal_params.py)
poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False)
acc = []
'''refer to optimal_params.py. Functions from this python scripts are transferred here. (get_bad_questions() and separate_weights().)'''
for ts in list_test_size:
for run_num in range(repeat):
all_data, all_data_questions = poll_data.all_data_except(get_bad_questions())
X = all_data[:, :-1]
y = all_data[:, -1]
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y,
test_size=ts,
shuffle=True)
X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1])
X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1])
print("Number of Training Samples:", len(X_train))
print("Number of Testing Samples:", len(X_test))
data_dict = {
'X_train': X_train,
'X_test': X_test,
'y_train': y_train,
'y_test': y_test
}
weights_dict = {
'weights_train': weights_train,
'weights_test': weights_test}
for meth in list_ftsel_method:
'''Create class objects of the current selection method'''
for thres in list_corr_threshold:
data_ranked_dict, ranked_questions = {}, []
ftsel_obj =None
if(meth=='chi2'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_chi2"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres)
elif(meth=='mutlinfo'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_mutlinfo"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres)
elif(meth=='pca'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_pca"
)
data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict)
fts = data_sel_dict['X_train'].shape[1]
questions_int = list(map(str, list(range(1,fts+1,1))))
ranked_questions = ["ft_"+x for x in questions_int]
elif(meth=='dt'):
ftsel_obj = feature_selection.FeatureSelection(
necess_que_file="../extern/manage_data/list_all_questions.txt",
unnecess_que_file="../extern/manage_data/list_unnecessary_columns.txt",
bool_necess_que=False,
run_name="test_dt"
)
data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres)
for num in list_num_features:
data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions)
ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions)
for K in list_Kfold:
'''Here create a class onject of "model_sel" and output all the best parameters and values into "list_output_dict". Then, can create a .csv file to list all the models and accuracies.'''
model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model()
# intermediate = model_obj.select_model()
acc.append(model_obj['test_acc'])
o_models_file.write(str(ts)+",")
o_models_file.write(str(run_num)+",")
o_models_file.write(meth+",")
o_models_file.write(str(K)+",")
o_models_file.write(str(num)+",")
o_models_file.write(str(thres)+",")
for ii in range(len(model_obj['best_features'])):
o_models_file.write(model_obj['best_features'][ii]+" ")
o_models_file.write(",")
o_models_file.write(model_obj['best_params']['criterion']+",")
o_models_file.write(str(model_obj['best_params']['max_depth'])+",")
o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+",")
o_models_file.write(str(model_obj['best_params']['min_samples_split'])+",")
o_models_file.write(str(model_obj['train_acc'])+",")
o_models_file.write(str(model_obj['test_acc'])+",")
o_models_file.write("\n")
list_output_dict.append(model_obj)
'''Once all the models are run, select the model with best test accuracy and return the output dict for that model.'''
o_models_file.close()
best_index = np.argmax(acc)
best_model_dict = list_output_dict[best_index]
print("The best model parameters:")
print(best_model_dict)
def get_bad_questions():
f = open("../extern/manage_data/list_unnecessary_columns.txt", 'r')
bad_questions = f.readline().split(',')
bad_questions[-1] = bad_questions[-1][:-1] # chop the \n off the end
bad_questions.remove('weight') # need weight for training
return bad_questions
def separate_weights(X_train, column_names):
"""
Removes the column containing weights from X_train, and returns it as
a separate array.
"""
weight_column_idx = column_names.index('weight')
weights = X_train[:, weight_column_idx]
new_X_train = np.delete(X_train, weight_column_idx, axis=1)
new_questions = column_names
new_questions.remove('weight')
return new_X_train, weights, new_questions
if __name__ == "__main__":
main()
| 47.741573
| 222
| 0.564603
| 997
| 8,498
| 4.469408
| 0.201605
| 0.029847
| 0.046903
| 0.061041
| 0.342011
| 0.278276
| 0.27693
| 0.236535
| 0.194345
| 0.194345
| 0
| 0.011119
| 0.333255
| 8,498
| 177
| 223
| 48.011299
| 0.775327
| 0.040833
| 0
| 0.117647
| 0
| 0.007353
| 0.151889
| 0.067288
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022059
| false
| 0
| 0.073529
| 0
| 0.110294
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7f9273d28271b0f56005e762e91504d2293322
| 12,334
|
py
|
Python
|
src/the_tale/the_tale/game/heroes/tests/test_logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/heroes/tests/test_logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
src/the_tale/the_tale/game/heroes/tests/test_logic.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class HeroDescriptionTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def test_no_description(self):
self.assertEqual(logic.get_hero_description(self.hero.id), '')
def test_has_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla')
def test_update_description(self):
logic.set_hero_description(self.hero.id, 'bla-bla')
logic.set_hero_description(self.hero.id, 'new description')
self.assertEqual(logic.get_hero_description(self.hero.id), 'new description')
class CreateHero(utils_testcase.TestCase):
def setUp(self):
super().setUp()
game_logic.create_test_map()
self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx',
email='[email protected]',
is_fast=False)
self.attributes = {'is_fast': False,
'is_bot': False,
'might': 0,
'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3),
'premium_state_end_at': datetime.datetime.fromtimestamp(0),
'ban_state_end_at': datetime.datetime.fromtimestamp(0)}
def test_default(self):
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.id, self.account.id)
self.assertEqual(hero.account_id, self.account.id)
self.assertIn(hero.gender, (game_relations.GENDER.MALE,
game_relations.GENDER.FEMALE))
self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration)
self.assertEqual(hero.habit_honor.raw_value, 0)
self.assertEqual(hero.habit_peacefulness.raw_value, 0)
self.assertTrue(hero.preferences.archetype.is_NEUTRAL)
self.assertTrue(hero.upbringing.is_PHILISTINE)
self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS)
self.assertTrue(hero.death_age.is_MATURE)
def test_account_attributes_required(self):
for attribute in self.attributes.keys():
with self.assertRaises(exceptions.HeroAttributeRequiredError):
logic.create_hero(account_id=self.account.id,
attributes={key: value for key, value in self.attributes.items() if key != attribute })
def test_account_attributes(self):
attributes = {'is_fast': random.choice((True, False)),
'is_bot': random.choice((True, False)),
'might': random.randint(1, 1000),
'active_state_end_at': datetime.datetime.fromtimestamp(1),
'premium_state_end_at': datetime.datetime.fromtimestamp(2),
'ban_state_end_at': datetime.datetime.fromtimestamp(3)}
logic.create_hero(account_id=self.account.id, attributes=attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.is_fast, attributes['is_fast'])
self.assertEqual(hero.is_bot, attributes['is_bot'])
self.assertEqual(hero.might, attributes['might'])
self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at'])
self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at'])
self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at'])
def test_attributes(self):
self.attributes.update({'race': game_relations.RACE.random(),
'gender': game_relations.GENDER.random(),
'name': game_names.generator().get_name(game_relations.RACE.random(),
game_relations.GENDER.random()),
'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),
'archetype': game_relations.ARCHETYPE.random(),
'upbringing': tt_beings_relations.UPBRINGING.random(),
'first_death': tt_beings_relations.FIRST_DEATH.random(),
'death_age': tt_beings_relations.AGE.random()})
logic.create_hero(account_id=self.account.id, attributes=self.attributes)
hero = logic.load_hero(self.account.id)
self.assertEqual(hero.race, self.attributes['race'])
self.assertEqual(hero.gender, self.attributes['gender'])
self.assertEqual(hero.utg_name, self.attributes['name'])
self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness'])
self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor'])
self.assertEqual(hero.preferences.archetype, self.attributes['archetype'])
self.assertEqual(hero.upbringing, self.attributes['upbringing'])
self.assertEqual(hero.first_death, self.attributes['first_death'])
self.assertEqual(hero.death_age, self.attributes['death_age'])
class RegisterSpendingTests(utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account()
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
self.hero.premium_state_end_at
game_tt_services.debug_clear_service()
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_not_in_place(self):
self.hero.position.set_position(0, 0)
self.assertEqual(self.hero.position.place_id, None)
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False)
def test_can_not_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(impacts, [])
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 100)
self.assertTrue(impacts[0].target_type.is_PLACE)
self.assertEqual(impacts[0].target_id, self.places[0].id)
@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)
def test_can_change_place_power__below_zero(self):
self.hero.position.set_place(self.places[0])
logic.register_spending(self.hero, 100)
logic.register_spending(self.hero, -50)
impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])
self.assertEqual(len(impacts), 1)
self.assertEqual(impacts[0].amount, 150)
class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin,
utils_testcase.TestCase):
def setUp(self):
super().setUp()
self.places = game_logic.create_test_map()
account = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(account)
self.hero = self.storage.accounts_to_heroes[account.id]
def place_0_cost(self):
return logic.get_places_path_modifiers(self.hero)[self.places[0].id]
def test_every_place_has_modifier(self):
modifiers = logic.get_places_path_modifiers(self.hero)
self.assertEqual(set(modifiers.keys()), {place.id for place in self.places})
def test_race_bonus(self):
self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,))
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].race = self.hero.race
def test_modifier_bonus(self):
self.assertFalse(self.places[0].is_modifier_active())
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):
self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT)
self.create_effect(self.places[0].id,
value=100500,
attribute=places_relations.ATTRIBUTE.MODIFIER_FORT,
delta=0)
self.places[0].refresh_attributes()
self.assertTrue(self.places[0].is_modifier_active())
def test_home_place(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0])
def test_friend(self):
with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0])
def test_enemy(self):
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0])
def test_tax(self):
self.places[0].attrs.size = 10
self.places[0].refresh_attributes()
self.assertEqual(self.places[0].attrs.tax, 0)
with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):
self.create_effect(self.places[0].id,
value=100,
attribute=places_relations.ATTRIBUTE.TAX,
delta=0)
self.places[0].refresh_attributes()
HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA),
(-1, 0, 0),
(-1, +1, +c.PATH_MODIFIER_MINOR_DELTA),
( 0, -1, 0),
( 0, 0, 0),
( 0, +1, 0),
(+1, -1, +c.PATH_MODIFIER_MINOR_DELTA),
(+1, 0, 0),
(+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)]
def test_habits__honor(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_honor.set_habit(0)
self.hero.habit_honor.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER)
def test_habits__peacefulness(self):
for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:
self.places[0].habit_peacefulness.set_habit(0)
self.hero.habit_peacefulness.set_habit(0)
with self.check_almost_delta(self.place_0_cost, expected_delta):
self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER)
self.hero.habit_peacefulness.set_habit(hero_direction * c.HABITS_BORDER)
| 44.688406
| 137
| 0.652749
| 1,499
| 12,334
| 5.098065
| 0.12475
| 0.064774
| 0.040304
| 0.01675
| 0.654148
| 0.632426
| 0.587804
| 0.503402
| 0.474483
| 0.443339
| 0
| 0.012953
| 0.236339
| 12,334
| 275
| 138
| 44.850909
| 0.798386
| 0
| 0
| 0.328283
| 0
| 0
| 0.053191
| 0.018163
| 0
| 0
| 0
| 0
| 0.212121
| 1
| 0.126263
| false
| 0
| 0.010101
| 0.005051
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a7fb88f2b8f8ab7d00332f23a58d29ccc1392ee
| 1,346
|
py
|
Python
|
postcipes/hydraulic_jump.py
|
timofeymukha/postcipes
|
f37b349038e26bb0295a2511295a46ef63fcd851
|
[
"MIT"
] | null | null | null |
postcipes/hydraulic_jump.py
|
timofeymukha/postcipes
|
f37b349038e26bb0295a2511295a46ef63fcd851
|
[
"MIT"
] | null | null | null |
postcipes/hydraulic_jump.py
|
timofeymukha/postcipes
|
f37b349038e26bb0295a2511295a46ef63fcd851
|
[
"MIT"
] | 1
|
2019-03-20T22:39:55.000Z
|
2019-03-20T22:39:55.000Z
|
# This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
from scipy.interpolate import interp1d
import numpy as np
import h5py
__all__ = ["HydraulicJump"]
class HydraulicJump(Postcipe):
def __init__(self, path):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.case['alphag'] = 1 - self.case['alpha.waterMean']
self.U = self.case.boundary_data("inlet", sort="y")[1]['UMean'][0, 0]
y_inlet = self.case.boundary_data("inlet", sort="y")[0][:, 1]
inlet_edge_length = tbl.edge_lengths(self.case, "inlet")
self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]
self.Fr1 = self.U/np.sqrt(9.81*self.d)
self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2
self.Fr2 = self.U/np.sqrt(9.81*self.d2)
iso05 = tbl.isoline(self.case, "alpha.waterMean", 0.5)
idx = iso05[:, 0].argsort()
self.xfs = iso05[idx, 0]
self.yfs = iso05[idx, 1]
idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))
self.xtoe = self.xfs[idx_toe]
| 33.65
| 81
| 0.653046
| 211
| 1,346
| 3.990521
| 0.412322
| 0.066508
| 0.057007
| 0.052257
| 0.114014
| 0.114014
| 0.114014
| 0
| 0
| 0
| 0
| 0.04116
| 0.205795
| 1,346
| 39
| 82
| 34.512821
| 0.746492
| 0.122585
| 0
| 0
| 0
| 0
| 0.060374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.296296
| 0
| 0.37037
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a80b1c774bd44450fbb371648857468404e7e42
| 3,350
|
py
|
Python
|
aesara/gpuarray/optdb.py
|
anirudhacharya/aesara
|
cbf91122296b68ee2ad592b2312d56f6ff65ba53
|
[
"BSD-3-Clause"
] | 1
|
2021-11-09T10:19:46.000Z
|
2021-11-09T10:19:46.000Z
|
aesara/gpuarray/optdb.py
|
anirudhacharya/aesara
|
cbf91122296b68ee2ad592b2312d56f6ff65ba53
|
[
"BSD-3-Clause"
] | null | null | null |
aesara/gpuarray/optdb.py
|
anirudhacharya/aesara
|
cbf91122296b68ee2ad592b2312d56f6ff65ba53
|
[
"BSD-3-Clause"
] | null | null | null |
from aesara.compile import optdb
from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer
from aesara.graph.optdb import (
EquilibriumDB,
LocalGroupDB,
OptimizationDatabase,
SequenceDB,
)
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
# Not used for an EquilibriumOptimizer. It has the "tracks" that we need for GraphToGPUDB.
gpu_optimizer2 = EquilibriumDB()
gpu_seqopt = SequenceDB()
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register(
"gpuarray_opt",
gpu_seqopt,
optdb.__position__.get("add_destroy_handler", 49.5) - 1,
"gpuarray",
)
pool_db = LocalGroupDB()
pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
pool_db2.__name__ = "pool_db2"
matrix_ops_db = LocalGroupDB()
matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
matrix_ops_db2.__name__ = "matrix_ops_db2"
abstract_batch_norm_db = LocalGroupDB()
abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)
abstract_batch_norm_db2.__name__ = "abstract_batch_norm_db2"
abstract_batch_norm_groupopt = LocalGroupDB()
abstract_batch_norm_groupopt.__name__ = "gpuarray_batchnorm_opts"
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
gpu_optimizer.register(name, local_opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_opt2(tracks, *tags, **kwargs):
"""
Decorator for the new GraphToGPU optimizer.
Takes an extra parameter(Op) compared to register_opt decorator.
Parameters
----------
tracks : List of Op class Or Op instance or None
The Node's Op to which optimization is being applied.
tags : String
The optimization tag to which the optimizer will be registered.
"""
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
if isinstance(local_opt, OptimizationDatabase):
opt = local_opt
else:
opt = local_optimizer(tracks)(local_opt)
gpu_optimizer2.register(name, opt, "fast_run", "gpuarray", *tags)
return local_opt
return f
def register_inplace(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop("name")) or local_opt.__name__
optdb.register(
name,
TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace),
60,
"fast_run",
"inplace",
"gpuarray",
*tags,
)
return local_opt
return f
# Register GPU convolution implementation
# They are tried in a specific order so we can control
# which ones take precedence over others.
abstractconv_groupopt = LocalGroupDB()
abstractconv_groupopt.__name__ = "gpuarray_abstractconv_opts"
register_opt("fast_compile")(abstractconv_groupopt)
class GraphToGPUDB(OptimizationDatabase):
"""
Retrieves the list local optimizers based on the optimizer flag's value
from EquilibriumOptimizer by calling the method query.
"""
def query(self, *tags, **kwtags):
from aesara.gpuarray.opt import GraphToGPU
opt = gpu_optimizer2.query(*tags, **kwtags)
return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)
| 28.632479
| 90
| 0.711343
| 406
| 3,350
| 5.573892
| 0.349754
| 0.060097
| 0.045073
| 0.030491
| 0.201061
| 0.14008
| 0.14008
| 0.125497
| 0.125497
| 0.125497
| 0
| 0.007124
| 0.203881
| 3,350
| 116
| 91
| 28.87931
| 0.841395
| 0.223284
| 0
| 0.238806
| 0
| 0
| 0.083695
| 0.028425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104478
| false
| 0
| 0.059701
| 0
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a810acd6b334888a1432a3e590727946894d380
| 4,579
|
py
|
Python
|
jenkinsapi/node.py
|
imsardine/jenkinsapi
|
d4bfac62a4d01394ff41540c4d8d897ab566f4eb
|
[
"MIT"
] | null | null | null |
jenkinsapi/node.py
|
imsardine/jenkinsapi
|
d4bfac62a4d01394ff41540c4d8d897ab566f4eb
|
[
"MIT"
] | null | null | null |
jenkinsapi/node.py
|
imsardine/jenkinsapi
|
d4bfac62a4d01394ff41540c4d8d897ab566f4eb
|
[
"MIT"
] | null | null | null |
"""
Module for jenkinsapi Node class
"""
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import PostRequired
import logging
try:
from urllib import quote as urlquote
except ImportError:
# Python3
from urllib.parse import quote as urlquote
log = logging.getLogger(__name__)
class Node(JenkinsBase):
"""
Class to hold information on nodes that are attached as slaves
to the master jenkins instance
"""
def __init__(self, baseurl, nodename, jenkins_obj):
"""
Init a node object by providing all relevant pointers to it
:param baseurl: basic url for querying information on a node
:param nodename: hostname of the node
:param jenkins_obj: ref to the jenkins obj
:return: Node obj
"""
self.name = nodename
self.jenkins = jenkins_obj
JenkinsBase.__init__(self, baseurl)
def get_jenkins_obj(self):
return self.jenkins
def __str__(self):
return self.name
def is_online(self):
return not self.poll(tree='offline')['offline']
def is_temporarily_offline(self):
return self.poll(tree='temporarilyOffline')['temporarilyOffline']
def is_jnlpagent(self):
return self._data['jnlpAgent']
def is_idle(self):
return self._data['idle']
def set_online(self):
"""
Set node online.
Before change state verify client state: if node set 'offline'
but 'temporarilyOffline' is not set - client has connection problems
and AssertionError raised.
If after run node state has not been changed raise AssertionError.
"""
self.poll()
# Before change state check if client is connected
if self._data['offline'] and not self._data['temporarilyOffline']:
raise AssertionError("Node is offline and not marked as "
"temporarilyOffline, check client "
"connection: offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
elif self._data['offline'] and self._data['temporarilyOffline']:
self.toggle_temporarily_offline()
if self._data['offline']:
raise AssertionError("The node state is still offline, "
"check client connection:"
" offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
def set_offline(self, message="requested from jenkinsapi"):
"""
Set node offline.
If after run node state has not been changed raise AssertionError.
: param message: optional string explain why you are taking this
node offline
"""
if not self._data['offline']:
self.toggle_temporarily_offline(message)
data = self.poll(tree='offline,temporarilyOffline')
if not data['offline']:
raise AssertionError("The node state is still online:" +
"offline = %s , temporarilyOffline = %s" %
(data['offline'],
data['temporarilyOffline']))
def toggle_temporarily_offline(self, message="requested from jenkinsapi"):
"""
Switches state of connected node (online/offline) and
set 'temporarilyOffline' property (True/False)
Calling the same method again will bring node status back.
:param message: optional string can be used to explain why you
are taking this node offline
"""
initial_state = self.is_temporarily_offline()
url = self.baseurl + \
"/toggleOffline?offlineMessage=" + urlquote(message)
try:
html_result = self.jenkins.requester.get_and_confirm_status(url)
except PostRequired:
html_result = self.jenkins.requester.post_and_confirm_status(
url,
data={})
self.poll()
log.debug(html_result)
state = self.is_temporarily_offline()
if initial_state == state:
raise AssertionError(
"The node state has not changed: temporarilyOffline = %s" %
state)
| 37.227642
| 79
| 0.580913
| 466
| 4,579
| 5.575107
| 0.281116
| 0.036952
| 0.034642
| 0.017321
| 0.265974
| 0.208622
| 0.177059
| 0.177059
| 0.148576
| 0.110855
| 0
| 0.000331
| 0.340249
| 4,579
| 122
| 80
| 37.532787
| 0.859649
| 0.233675
| 0
| 0.144928
| 0
| 0
| 0.198838
| 0.017131
| 0
| 0
| 0
| 0
| 0.057971
| 1
| 0.144928
| false
| 0
| 0.086957
| 0.086957
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8396f2f3ab51a489f606b57146366f183507ea
| 14,346
|
py
|
Python
|
virtualscreening/vina/spark/buried_areas.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 3
|
2015-01-19T20:12:59.000Z
|
2019-02-21T18:43:04.000Z
|
virtualscreening/vina/spark/buried_areas.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 22
|
2015-01-05T16:48:54.000Z
|
2017-01-21T16:36:10.000Z
|
virtualscreening/vina/spark/buried_areas.py
|
rodrigofaccioli/drugdesign
|
de15880af361a010729b1f4fbc8a75a2b36688a6
|
[
"Apache-2.0"
] | 11
|
2015-03-03T13:32:24.000Z
|
2020-04-03T11:22:24.000Z
|
from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
import ConfigParser as configparser
from subprocess import Popen, PIPE
from datetime import datetime
from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter
import os, sys
from os_utils import preparing_path
from gromacs_utils import get_value_from_xvg_sasa
from pdb_io import replace_chain_atom_line
from database_io import load_database
def sorting_buried_area(sc, buried_areaRDD):
sqlCtx = SQLContext(sc)
buried_areaRDD = sc.parallelize(buried_areaRDD)
#buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) )
buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) )
buried_area_table = sqlCtx.createDataFrame(buried_areaRDD)
buried_area_table.registerTempTable("buried_area")
buried_area_sorted_by_buried_total = sqlCtx.sql("SELECT * FROM buried_area ORDER BY buried_total DESC") #buried_lig_lig_perc
return buried_area_sorted_by_buried_total
def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
for area in buried_area_sorted_by_lig_rec_perc:
#splited_line = area[0].split("_-_")
#aux_recep = splited_line[0]
#aux_lig = str(splited_line[1])
#preparing receptor
#receptor = str(str(aux_recep).replace("compl_", " ")).strip()
#preparing ligand
#splited_aux_lig = str(aux_lig).split(get_separator_filename_mode())
#ligand = splited_aux_lig[0]
#model = splited_aux_lig[1]
pose = area[0]
buried_total = "{:.4f}".format(area[1])
#line = receptor+"\t"+ligand+"\t"+model+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = pose+"\t"+str(buried_total)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):
f_buried_area = open(path_file_buried_area,"w")
line = "# buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in buried_area_sorted_by_lig_rec_perc:
#receptor = area[0]
#ligand = area[1]
#model = area[2]
pose = str(str(area[0]).replace("compl_", " ")).strip()
buried_total = "{:.4f}".format(area[1])
#buried_lig_rec_perc = "{:.4f}".format(area[4])
#buried_lig_lig_perc = "{:.4f}".format(area[5])
#line = receptor+"\t"+ligand+"\t"+str(model)+"\t"+str(buried_lig_rec)+"\t"+str(buried_lig_rec_perc)+"\t"+str(buried_lig_lig_perc)+"\n"
line = str(buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def save_normalized_buried_area(path_file_buried_area, full_dataRDD):
f_buried_area = open(path_file_buried_area,"w")
line = "# normalized_buried_area_total[nm2]\tpose"+"\n"
f_buried_area.write(line)
for area in full_dataRDD.collect():
pose = str(str(area[0]).replace("compl_", " ")).strip()
normalized_buried_total = "{:.4f}".format(area[1])
line = str(normalized_buried_total)+"\t"+str(pose)+"\n"
f_buried_area.write(line)
f_buried_area.close()
def loading_lines_from_area_files(line):
line_splited = str(line).split()
#line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) )
line_ret = ( str(line_splited[0]), float(line_splited[1]) )
return line_ret
def get_files_area(mypath):
only_mol2_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
if file.endswith(".area"):
f_path = os.path.join(root,file)
only_mol2_file.append(f_path)
return only_mol2_file
def save_log(finish_time, start_time):
log_file_name = 'vs_buried_areas.log'
current_path = os.getcwd()
path_file = os.path.join(current_path, log_file_name)
log_file = open(path_file, 'w')
diff_time = finish_time - start_time
msg = 'Starting ' + str(start_time) +'\n'
log_file.write(msg)
msg = 'Finishing ' + str(finish_time) +'\n'
log_file.write(msg)
msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\n'
log_file.write(msg)
def main():
config = configparser.ConfigParser()
config.read('config.ini')
#Path for Gromacs project
gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))
#Path where PDB ligand are - They are NOT participated in docking
pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path')
#Path that contains all files for analysis
path_analysis = config.get('DEFAULT', 'path_analysis')
#Ligand Database file
ligand_database = config.get('DEFAULT', 'ligand_database_path_file')
#Path where all pdb receptor are
path_receptor_pdb = config.get('DEFAULT', 'pdb_path')
#Path for saving pdb files of models generated by VS
path_analysis_pdb = get_directory_pdb_analysis(path_analysis)
# Create SPARK config
maxResultSize = str(config.get('SPARK', 'maxResultSize'))
conf = (SparkConf().set("spark.driver.maxResultSize", maxResultSize))
# Create context
sc = SparkContext(conf=conf)
sqlCtx = SQLContext(sc)
#Adding Python Source file
#Path for drugdesign project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
sc.addPyFile(os.path.join(path_spark_drugdesign,"vina_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"gromacs_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"pdb_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"database_io.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign,"json_utils.py"))
#Adding bash scripts
sc.addFile(os.path.join(path_spark_drugdesign,"make_ndx_buried_area_total.sh"))
sc.addFile(os.path.join(path_spark_drugdesign,"make_sasa_rec_buried_area_total.sh"))
#Parameters form command line
#Indicates probe. Example: 0.14
probe = float(sys.argv[1])
#Indicates ndots. Example: 24
ndots = int(sys.argv[2])
#Broadcast
path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb)
gromacs_path = sc.broadcast(gromacs_path)
pdb_ligand_path = sc.broadcast(pdb_ligand_path)
probe = sc.broadcast(probe)
ndots = sc.broadcast(ndots)
start_time = datetime.now()
os.environ["GMX_MAXBACKUP"]="-1"
#Loading all PDB receptor files into memory
list_all_pdb_receptor_files_path = []
all_receptor_for_complex = get_files_pdb(path_receptor_pdb)
for receptor in all_receptor_for_complex:
list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor))
#Computing Buried areas
for pdb_receptor_files in list_all_pdb_receptor_files_path:
#Getting receptor name by fully path
base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0]))
#PDB file loaded into memory is sent by broadcast
pdb_file_receptor = pdb_receptor_files[1]
pdb_file_receptor = sc.broadcast(pdb_file_receptor)
#Loading PDB model files based on receptor into memory
base_file_name_receptor_for_filter = base_file_name_receptor+"_-_"
all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter)
all_model_for_complexRDD = sc.parallelize(all_model_for_complex)
all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect()
# ********** Starting function **********************************************************
def compute_buried_area(pdb_complex):
chZ = "chZ"
sasa_complex = -1.0
sasa_rec = -1.0
sasa_lig = -1.0
buried_total = -1.0
returned_list = []
try:
base_name = get_name_model_pdb(pdb_complex)
ligand_name = get_ligand_from_receptor_ligand_model(base_name)
f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+".pdb")
f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+".ndx")
f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_complex.xvg")
f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_rec.xvg")
f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+"_sasa_lig.xvg")
# Makes the index file with the ligand (chain z) and the rest (non chain z)
script_make_ndx = SparkFiles.get("make_ndx_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_ndx + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface System " + " -output System "+ " -xvg none " + " -o " + f_temp_sasa_complex
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# Makes f_temp_sasa_rec file
script_make_sasa_rec = SparkFiles.get("make_sasa_rec_buried_area_total.sh") #Getting bash script that was copied by addFile command
command = script_make_sasa_rec + " " + gromacs_path.value + " "+ pdb_complex + " "+ f_ndx + " " + f_temp_sasa_rec
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
command = gromacs_path.value +"gmx sasa -f " + pdb_complex + " -s " + pdb_complex + " -nopbc " + " -n " + f_ndx + " -surface chZ " + " -output chZ "+ " -xvg none " + " -o " + f_temp_sasa_lig
process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex)
sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec)
sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig)
buried_total = sasa_rec + sasa_lig - sasa_complex
#Generating result - See column sorting because resultaed file will be created based on this sorting
returned_list = (base_name, buried_total)
except:
returned_list = (base_name, float(0))
#Deleting files
if os.path.exists(f_ndx):
os.remove(f_ndx)
if os.path.exists(f_temp_sasa_complex):
os.remove(f_temp_sasa_complex)
if os.path.exists(f_temp_sasa_rec):
os.remove(f_temp_sasa_rec)
if os.path.exists(f_temp_sasa_lig):
os.remove(f_temp_sasa_lig)
return returned_list
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def save_model_receptor(list_receptor_model_file):
receptor_file = pdb_file_receptor.value #Obtained from broadcast
model_file = list_receptor_model_file[0]
full_path_for_save_complex = list_receptor_model_file[1]
#Open file for writting the complex
f_compl = open(full_path_for_save_complex, "w")
#Insert lines of receptor
for item in receptor_file:
f_compl.write(item)
#Insert lines of model and insert Z chain
for item in model_file:
item = replace_chain_atom_line(item,"d","z")
f_compl.write(item)
f_compl.close()
# ********** Finish function **********************************************************
# ********** Starting function **********************************************************
def build_list_model_for_complex(model):
full_path_model = model[0]
model_file = model[1]
path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast
#Building complex file based on model file name
base_name_model = get_name_model_pdb(full_path_model)
complex_name = "compl_"+base_name_model+".pdb"
full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name)
list_receptor_model_file = (model_file, full_path_for_save_complex)
save_model_receptor(list_receptor_model_file)
list_ret = compute_buried_area(full_path_for_save_complex)
os.remove(full_path_for_save_complex)
return list_ret
# ********** Finish function **********************************************************
all_model_filesRDD = sc.parallelize(all_model_filesRDD)
all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect()
#Saving buried area of receptor
full_area_file = os.path.join(path_analysis,base_file_name_receptor+".area")
save_receptor_buried_area(full_area_file, all_model_filesRDD)
#Loading all area file
all_area_file = os.path.join(path_analysis,"*.area")
buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect()
#Sorting by buried_total column
buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD)
buried_area_sorted_by_buried_total.cache()
buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect()
#Saving buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_buried_areas_total.dat")
save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST)
#Calculating normalized buried area
#Loading database
rdd_database = load_database(sc, ligand_database)
#Creating Dataframe
database_table = sqlCtx.createDataFrame(rdd_database)
database_table.registerTempTable("database")
number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()
number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD)
number_pose_ligand_table.registerTempTable("buried_area_total_sort")
sql = """
SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area
FROM database a
JOIN buried_area_total_sort b ON b.ligand = a.ligand
ORDER BY normalized_buried_area DESC
"""
#Getting all data
full_dataRDD = sqlCtx.sql(sql)
#Saving normalized buried area file
path_file_buried_area = os.path.join(path_analysis, "summary_normalized_buried_areas.dat")
save_normalized_buried_area(path_file_buried_area, full_dataRDD)
#Removing all area files
all_area_files = get_files_area(path_analysis)
for area_file in all_area_files:
os.remove(area_file)
finish_time = datetime.now()
save_log(finish_time, start_time)
main()
| 43.87156
| 253
| 0.748292
| 2,164
| 14,346
| 4.58549
| 0.123845
| 0.061473
| 0.020155
| 0.023985
| 0.424972
| 0.3636
| 0.293964
| 0.249723
| 0.217878
| 0.201552
| 0
| 0.005366
| 0.116688
| 14,346
| 326
| 254
| 44.006135
| 0.777699
| 0.20856
| 0
| 0.149533
| 0
| 0
| 0.106228
| 0.037034
| 0.009346
| 0
| 0
| 0
| 0
| 1
| 0.051402
| false
| 0
| 0.051402
| 0
| 0.126168
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a85a524c6381c0f4e277dd284d072a8b41daaac
| 3,427
|
py
|
Python
|
queue/animal_queue.py
|
cozek/code-practice
|
bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80
|
[
"MIT"
] | null | null | null |
queue/animal_queue.py
|
cozek/code-practice
|
bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80
|
[
"MIT"
] | null | null | null |
queue/animal_queue.py
|
cozek/code-practice
|
bf3098dbeb502cab2e22ce7ea73c2aa05a3caf80
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Any, Union
class Animal:
def __init__(self, name: str) -> None:
self.name = name
def set_order(self, order: int) -> None:
self.order = order
def peek_order(self) -> int:
return self.order
def __str__(self) -> str:
return f"{self.name}"
class Node:
def __init__(self, data: Any):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self) -> None:
self.head = None
self.tail = None
def __str__(self) -> str:
current = self.head
string = f""
while current.next_node is not None:
string += f"{current.data} -> "
current = current.next_node
return string + "END"
def is_empty(self) -> bool:
if self.head is None:
return True
else:
return False
def insert(self, item: Any) -> None:
if self.is_empty():
self.head = Node(item)
self.tail = self.head
else:
new_node = Node(item)
self.tail.next_node = new_node
self.tail = self.tail.next_node
def remove(self) -> Any:
if self.head is None:
raise ("Empty LinkedList!")
else:
data = self.head.data
self.head = self.head.next_node
return data
def peak(self):
return self.head.data
class Dog(Animal):
def __init__(self, name: str):
super().__init__(name)
class Cat(Animal):
def __init__(self, name: str):
super().__init__(name)
class AnimalQueue:
def __init__(self) -> None:
self.dogs = LinkedList()
self.cats = LinkedList()
self.order = 0
def enqueue(self, animal: Union[Dog, Cat]) -> None:
if not isinstance(animal, (Dog, Cat)):
raise Exception("Expected Dog or Cat!")
else:
animal.set_order(self.order)
self.order += 1
if isinstance(animal, Dog):
self.dogs.insert(animal)
elif isinstance(animal, Cat):
self.cats.insert(animal)
def dequeAny(self) -> Union[Dog, Cat]:
if self.dogs.is_empty():
return self.dequeCat()
elif self.cats.is_empty():
return self.dequeDog()
if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order():
return self.dequeCat()
else:
return self.dequeDog()
def print_cats(self) -> str:
string = ""
cat = self.cats.head
while cat is not None:
string += f"{cat.data.name} {cat.data.peek_order()} | "
cat = cat.next_node
return string
def dequeDog(self) -> Dog:
return self.dogs.remove()
def dequeCat(self) -> Cat:
return self.cats.remove()
def main():
q = AnimalQueue()
dogs = [Dog("d1"), Dog("d2"), Dog("d3")]
cats = [Cat("c1"), Cat("c2"), Cat("c3")]
both = []
while cats != []:
both.append(cats.pop())
both.append(dogs.pop())
[q.enqueue(animal) for animal in both]
string = ""
for anim in both:
string += f"{anim.name} {anim.order} | "
print(string)
# print(q.print_cats())
get = q.dequeDog()
print(get.order,get.name)
get = q.dequeAny()
print(get.order,get.name)
if __name__ == "__main__":
main()
| 24.133803
| 79
| 0.541873
| 424
| 3,427
| 4.216981
| 0.188679
| 0.044743
| 0.036913
| 0.028523
| 0.139821
| 0.060403
| 0.04698
| 0.04698
| 0.04698
| 0.04698
| 0
| 0.003915
| 0.329151
| 3,427
| 141
| 80
| 24.304965
| 0.773815
| 0.012547
| 0
| 0.214953
| 0
| 0
| 0.046718
| 0.006801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186916
| false
| 0
| 0.009346
| 0.046729
| 0.383178
| 0.037383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8756b0429224a6d5fdf07d18eb3a9eed2f7a05
| 2,373
|
py
|
Python
|
auth_iam/dashboard/auth/routes.py
|
santiher/dash-auth-example
|
9854bfe953f86a0c7ed97660da30b7b7d1d3069f
|
[
"MIT"
] | 11
|
2020-03-05T18:50:07.000Z
|
2022-02-16T19:45:35.000Z
|
auth_iam/dashboard/auth/routes.py
|
santiher/dash-auth-example
|
9854bfe953f86a0c7ed97660da30b7b7d1d3069f
|
[
"MIT"
] | null | null | null |
auth_iam/dashboard/auth/routes.py
|
santiher/dash-auth-example
|
9854bfe953f86a0c7ed97660da30b7b7d1d3069f
|
[
"MIT"
] | null | null | null |
import os
from functools import wraps
from os.path import join as join_path
from dash import Dash
from flask import make_response, render_template_string, redirect
excluded_resources_endpoints = (
'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',
'/_user', '/auth')
def add_routes(app, authorizer):
"""Adds authentication endpoints to a flask app.
Decorates other endpoints to grant access.
The endpoints are:
* /login
* Method: GET
* /logout
* Method: GET
* Erases cookies
* /auth
* Method: GET
* Validates cookies if present or header authentication
* Header:
'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'
* Sets cookies on login
* Rejects unauthorized users
Parameters
----------
app: flask.Flask or dash.Dash
The flask or dash application
excluded_resources_endpoints: tuple(str)
Tuple with endpoints where access must not be checked.
"""
def login():
ok, _ = authorizer.validate()
if ok:
return make_response(redirect('/'), 307)
return render_template_string(login_template)
def logout():
_, response = authorizer.clean_cookie()
return response
def auth():
_, response = authorizer.validate()
return response
def authorize_endpoint(function):
@wraps(function)
def authorized_function(*args, **kwargs):
ok, response = authorizer.validate()
if ok:
return function(*args, **kwargs)
return response
return authorized_function
if isinstance(app, Dash):
app = app.server
login_template = load_template('login.html')
app.add_url_rule('/auth', '/auth', auth)
app.add_url_rule('/login', '/login', login)
app.add_url_rule('/logout', '/logout', logout)
for endpoint, function in app.view_functions.items():
if endpoint not in excluded_resources_endpoints:
app.view_functions[endpoint] = authorize_endpoint(function)
def load_template(filename):
"""Loads the login html template."""
pyfile_path = os.path.dirname(os.path.abspath(__file__))
path = join_path(pyfile_path, 'templates', filename)
with open(path, 'r') as f:
return f.read().strip()
| 29.296296
| 77
| 0.634218
| 268
| 2,373
| 5.447761
| 0.391791
| 0.012329
| 0.053425
| 0.026712
| 0.038356
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001702
| 0.257059
| 2,373
| 80
| 78
| 29.6625
| 0.826432
| 0.268858
| 0
| 0.119048
| 0
| 0
| 0.072605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.119048
| 0
| 0.47619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8988f59a7e29aadd9cfcc08e9db137ae34f210
| 3,677
|
py
|
Python
|
2021/day15/aoc-2021-d15.py
|
bbornstein/aoc
|
624dacfe591a46aa34e3071b894076cf60091e7d
|
[
"MIT"
] | null | null | null |
2021/day15/aoc-2021-d15.py
|
bbornstein/aoc
|
624dacfe591a46aa34e3071b894076cf60091e7d
|
[
"MIT"
] | null | null | null |
2021/day15/aoc-2021-d15.py
|
bbornstein/aoc
|
624dacfe591a46aa34e3071b894076cf60091e7d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15)
# Author: Ben Bornstein
import collections
import heapq
Point = collections.namedtuple('Point', ['x', 'y'])
Point.__add__ = lambda self, q: Point(self[0] + q[0], self[1] + q[1])
class RiskMap:
def __init__ (self):
"""Creates a new (empty) risk-level map.
Individual risk-levels as specific positions are accessible via
`RiskMap[Point]`.
See also `RiskMap.load()`
"""
self._factor = 1
self._levels = [ ]
self._nrows = 0
self._ncols = 0
def __getitem__ (self, pos):
"""Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`."""
if self._factor > 1:
risk = self._levels[pos.y % self._nrows][pos.x % self._ncols]
risk += pos.y // self._nrows
risk += pos.x // self._ncols
if risk > 9:
risk = risk % 9
else:
risk = self._levels[pos.y][pos.x]
return risk
@staticmethod
def load (filename):
"""Creates a new risk-level map from `filename`."""
rmap = RiskMap()
with open(filename) as stream:
for line in stream.readlines():
rmap.append([ int(c) for c in line.strip() ])
return rmap
@property
def ncols (self):
"""The number of columns in this `RiskMap`."""
return self._factor * self._ncols
@property
def nrows (self):
"""The number of rows in this `RiskMap`."""
return self._factor * self._nrows
def append (self, row):
"""Appends `row` to this `RiskMap`."""
if len(self._levels) == 0:
self._ncols = len(row)
self._levels.append(row)
self._nrows += 1
def neighbors (self, pos):
"""Iterable 4-neighbors (up, down, left, right) for `pos`ition."""
deltas = (0, -1), (0, 1), (-1, 0), (1, 0)
adjacent = ( pos + Point(*delta) for delta in deltas )
yield from ( p for p in adjacent if self.valid(p) )
def resize (self, factor):
"""Resizes this `RiskMap` by setting its expansion factor to `factor`
copies both horizontally and vertically.
"""
self._factor = factor
def valid (self, pos):
"""Indicates whether or not `pos` is valid (inside this `RiskMap`)."""
return pos.y in range(0, self.nrows) and pos.x in range(0, self.ncols)
def search (rmap, start, end):
"""Searches `RiskMap` `rmap` (breadth-first) to find the least risky
path from `start` to `end`. Returns the total risk of that path.
"""
risk = 0
queue = [ (rmap[p], p) for p in rmap.neighbors(start) ]
visited = { start }
heapq.heapify(queue)
while len(queue) > 0:
risk, current = heapq.heappop(queue)
if current == end:
break
for pos in rmap.neighbors(current):
if pos not in visited:
heapq.heappush( queue, ((rmap[pos] + risk), pos) )
visited.add(pos)
return risk
filename = 'aoc-2021-d15.txt'
rmap = RiskMap.load(filename)
start = Point(0, 0)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
# Part 1
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 755
print(f'Part 1: Total Risk = {search(rmap, start, end):4}')
# Part 2
#
# Q: Lowest total risk of any path from the top left to the bottom right?
# A: Total Risk = 3016
rmap.resize(factor=5)
end = Point(rmap.ncols - 1, rmap.nrows - 1)
print(f'Part 2: Total Risk = {search(rmap, start, end)}')
| 25.184932
| 78
| 0.56731
| 499
| 3,677
| 4.116232
| 0.312625
| 0.030672
| 0.014606
| 0.02629
| 0.166504
| 0.148978
| 0.122687
| 0.090555
| 0.063291
| 0.063291
| 0
| 0.024504
| 0.300789
| 3,677
| 145
| 79
| 25.358621
| 0.774407
| 0.283927
| 0
| 0.088235
| 0
| 0
| 0.047354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.029412
| 0
| 0.279412
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8a2f0c0a2dfbb11e77c498d88fd4e6f73817b2
| 2,168
|
py
|
Python
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py
|
limingu/azure-cli-extensions
|
1bc29f089f4da42ab8905e440f2f46d6b5b0aa97
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py
|
limingu/azure-cli-extensions
|
1bc29f089f4da42ab8905e440f2f46d6b5b0aa97
|
[
"MIT"
] | 1
|
2020-06-12T01:39:40.000Z
|
2020-06-12T01:39:40.000Z
|
src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/database_account_list_keys_result_py3.py
|
anpaz-msft/azure-cli-extensions
|
847fd487fe61e83f2a4163a9393edc9555267bc2
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult
class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult):
"""The access keys for the given database account.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar primary_readonly_master_key: Base 64 encoded value of the primary
read-only key.
:vartype primary_readonly_master_key: str
:ivar secondary_readonly_master_key: Base 64 encoded value of the
secondary read-only key.
:vartype secondary_readonly_master_key: str
:ivar primary_master_key: Base 64 encoded value of the primary read-write
key.
:vartype primary_master_key: str
:ivar secondary_master_key: Base 64 encoded value of the secondary
read-write key.
:vartype secondary_master_key: str
"""
_validation = {
'primary_readonly_master_key': {'readonly': True},
'secondary_readonly_master_key': {'readonly': True},
'primary_master_key': {'readonly': True},
'secondary_master_key': {'readonly': True},
}
_attribute_map = {
'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},
'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},
'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'},
'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(DatabaseAccountListKeysResult, self).__init__(**kwargs)
self.primary_master_key = None
self.secondary_master_key = None
| 40.90566
| 98
| 0.66928
| 239
| 2,168
| 5.811715
| 0.389121
| 0.116631
| 0.097912
| 0.069114
| 0.267099
| 0.138229
| 0.138229
| 0.138229
| 0.138229
| 0.12671
| 0
| 0.00559
| 0.174816
| 2,168
| 52
| 99
| 41.692308
| 0.770822
| 0.502306
| 0
| 0
| 0
| 0
| 0.342971
| 0.161515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8aa73cf4c767bf7b906925d1382b404b94f301
| 1,834
|
py
|
Python
|
Google/google_books/scrape_google_books.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_books/scrape_google_books.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_books/scrape_google_books.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
from parsel import Selector
import requests, json, re
params = {
"q": "richard branson",
"tbm": "bks",
"gl": "us",
"hl": "en"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
books_results = []
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
title = book_result.css(".DKV0Md::text").get()
link = book_result.css(".bHexk a::attr(href)").get()
displayed_link = book_result.css(".tjvcx::text").get()
snippet = book_result.css(".cmlJmd span::text").get()
author = book_result.css(".fl span::text").get()
author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
date_published = book_result.css(".fl+ span::text").get()
preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
books_results.append({
"title": title,
"link": link,
"displayed_link": displayed_link,
"snippet": snippet,
"author": author,
"author_link": author_link,
"date_published": date_published,
"preview_link": preview_link,
"more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
"thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
| 39.869565
| 135
| 0.657579
| 254
| 1,834
| 4.606299
| 0.437008
| 0.08547
| 0.1
| 0.05812
| 0.170085
| 0.145299
| 0.107692
| 0.063248
| 0.063248
| 0
| 0
| 0.030225
| 0.152127
| 1,834
| 45
| 136
| 40.755556
| 0.722187
| 0.016903
| 0
| 0
| 0
| 0.054054
| 0.367018
| 0.069406
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054054
| 0
| 0.054054
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8c544c5af946feba8528e8627d4c6fff3edf22
| 3,495
|
py
|
Python
|
werobot/utils.py
|
lilac/WeRobot
|
29fd70631b736a0c339f16f7729ea89f986c8bae
|
[
"MIT"
] | 2
|
2018-06-03T16:32:07.000Z
|
2018-06-03T16:32:10.000Z
|
werobot/utils.py
|
Milleree/WeRoBot
|
f9777f792d55ae70e7262f13e6e3f3667a167036
|
[
"MIT"
] | 9
|
2020-06-05T19:51:33.000Z
|
2022-03-11T23:40:25.000Z
|
werobot/utils.py
|
Milleree/WeRoBot
|
f9777f792d55ae70e7262f13e6e3f3667a167036
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import io
import json
import os
import random
import re
import string
import time
from functools import wraps
from hashlib import sha1
import six
try:
from secrets import choice
except ImportError:
from random import choice
string_types = (six.string_types, six.text_type, six.binary_type)
re_type = type(re.compile("regex_test"))
def get_signature(token, timestamp, nonce, *args):
sign = [token, timestamp, nonce] + list(args)
sign.sort()
sign = to_binary(''.join(sign))
return sha1(sign).hexdigest()
def check_signature(token, timestamp, nonce, signature):
if not (token and timestamp and nonce and signature):
return False
sign = get_signature(token, timestamp, nonce)
return sign == signature
def check_token(token):
return re.match('^[A-Za-z0-9]{3,32}$', token)
def cached_property(method):
prop_name = '_{}'.format(method.__name__)
@wraps(method)
def wrapped_func(self, *args, **kwargs):
if not hasattr(self, prop_name):
setattr(self, prop_name, method(self, *args, **kwargs))
return getattr(self, prop_name)
return property(wrapped_func)
def to_text(value, encoding="utf-8"):
if isinstance(value, six.text_type):
return value
if isinstance(value, six.binary_type):
return value.decode(encoding)
return six.text_type(value)
def to_binary(value, encoding="utf-8"):
if isinstance(value, six.binary_type):
return value
if isinstance(value, six.text_type):
return value.encode(encoding)
return six.binary_type(value)
def is_string(value):
return isinstance(value, string_types)
def byte2int(s, index=0):
"""Get the ASCII int value of a character in a string.
:param s: a string
:param index: the position of desired character
:return: ASCII int value
"""
if six.PY2:
return ord(s[index])
return s[index]
def generate_token(length=''):
if not length:
length = random.randint(3, 32)
length = int(length)
assert 3 <= length <= 32
letters = string.ascii_letters + string.digits
return ''.join(choice(letters) for _ in range(length))
def json_loads(s):
s = to_text(s)
return json.loads(s)
def json_dumps(d):
return json.dumps(d)
def pay_sign_dict(
appid,
pay_sign_key,
add_noncestr=True,
add_timestamp=True,
add_appid=True,
**kwargs
):
"""
支付参数签名
"""
assert pay_sign_key, "PAY SIGN KEY IS EMPTY"
if add_appid:
kwargs.update({'appid': appid})
if add_noncestr:
kwargs.update({'noncestr': generate_token()})
if add_timestamp:
kwargs.update({'timestamp': int(time.time())})
params = kwargs.items()
_params = [
(k.lower(), v) for k, v in kwargs.items() if k.lower() != "appid"
]
_params += [('appid', appid), ('appkey', pay_sign_key)]
_params.sort()
sign = '&'.join(["%s=%s" % (str(p[0]), str(p[1]))
for p in _params]).encode("utf-8")
sign = sha1(sign).hexdigest()
sign_type = 'SHA1'
return dict(params), sign, sign_type
def make_error_page(url):
with io.open(
os.path.join(os.path.dirname(__file__), 'contrib/error.html'),
'r',
encoding='utf-8'
) as error_page:
return error_page.read().replace('{url}', url)
def is_regex(value):
return isinstance(value, re_type)
| 22.403846
| 73
| 0.645207
| 480
| 3,495
| 4.539583
| 0.2875
| 0.041303
| 0.020193
| 0.036714
| 0.117485
| 0.089032
| 0.089032
| 0.089032
| 0
| 0
| 0
| 0.009235
| 0.225465
| 3,495
| 155
| 74
| 22.548387
| 0.795715
| 0.050072
| 0
| 0.058824
| 0
| 0
| 0.04418
| 0
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.147059
| false
| 0
| 0.137255
| 0.039216
| 0.490196
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8ce25aff69e17f6f7281d206c301403a98d23f
| 3,208
|
py
|
Python
|
src/tango_scaling_test/TestDeviceServer/__main__.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | null | null | null |
src/tango_scaling_test/TestDeviceServer/__main__.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | null | null | null |
src/tango_scaling_test/TestDeviceServer/__main__.py
|
rtobar/sdp-prototype
|
9f1527b884bf80daa509a7fe3722160c77260f4f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Test Tango device server for use with scaling tests."""
import sys
import time
import argparse
import tango
from tango.server import run
from TestDevice import TestDevice
def init_callback():
"""Report server start up times.
This callback is executed post server initialisation.
"""
# pylint: disable=global-statement
global START_TIME
db = tango.Database()
elapsed = time.time() - START_TIME
list_devices()
exported_devices = list(db.get_device_exported('test/*'))
num_devices = len(exported_devices)
file = open('results.txt', 'a')
file.write(',{},{}\n'.format(elapsed, elapsed / num_devices))
print('>> Time taken to start devices: {:.4f} s ({:.4f} s/dev)'
.format(elapsed, elapsed / num_devices))
def delete_server():
"""Delete the TestDeviceServer from the tango db."""
db = tango.Database()
db.set_timeout_millis(50000)
server = 'TestDeviceServer/1'
server_list = list(db.get_server_list(server))
if server in server_list:
start_time = time.time()
db.delete_server('TestDeviceServer/1')
print('- Delete server: {:.4f} s'.format(time.time() - start_time))
def register(num_devices):
"""Register devices in the tango db."""
db = tango.Database()
device_info = tango.DbDevInfo()
device_info.server = 'TestDeviceServer/1'
# pylint: disable=protected-access
device_info._class = 'TestDevice'
start_time = time.time()
for device_id in range(num_devices):
device_info.name = 'test/test_device/{:05d}'.format(device_id)
db.add_device(device_info)
elapsed = time.time() - start_time
file = open('results.txt', 'a')
file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices))
print('- Register devices: {:.4f} s ({:.4f} s/device)'
.format(elapsed, elapsed / num_devices))
def list_devices():
"""List tango devices associated with the TestDeviceServer."""
db = tango.Database()
server_instance = 'TestDeviceServer/1'
device_class = 'TestDevice'
devices = list(db.get_device_name(server_instance, device_class))
print('- No. registered devices: {}'.format(len(devices)))
exported_devices = list(db.get_device_exported('test/*'))
print('- No. running devices: {}'.format(len(exported_devices)))
def main(args=None, **kwargs):
"""Run (start) the device server."""
run([TestDevice], verbose=True, msg_stream=sys.stdout,
post_init_callback=init_callback, raises=False,
args=args, **kwargs)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Device registration time.')
PARSER.add_argument('num_devices', metavar='N', type=int,
default=1, nargs='?',
help='Number of devices to start.')
ARGS = PARSER.parse_args()
delete_server()
time.sleep(0.5)
list_devices()
print('* Registering {} devices'.format(ARGS.num_devices))
register(ARGS.num_devices)
list_devices()
print('* Starting server ...')
sys.argv = ['TestDeviceServer', '1', '-v4']
START_TIME = time.time()
main()
| 29.981308
| 77
| 0.65586
| 395
| 3,208
| 5.149367
| 0.303797
| 0.054081
| 0.029499
| 0.047198
| 0.211406
| 0.132743
| 0.075713
| 0.048181
| 0.048181
| 0
| 0
| 0.008956
| 0.199501
| 3,208
| 106
| 78
| 30.264151
| 0.7831
| 0.129676
| 0
| 0.220588
| 0
| 0
| 0.176064
| 0.008367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.088235
| 0
| 0.161765
| 0.102941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8d44634b296be16e3e3fe11b62e194bcce203d
| 14,955
|
py
|
Python
|
test/test_pipeline.py
|
ParikhKadam/haystack
|
8a57f6b16af0bdd41dc02bf1200e0adbdf1da39b
|
[
"Apache-2.0"
] | 1
|
2021-08-04T09:06:43.000Z
|
2021-08-04T09:06:43.000Z
|
test/test_pipeline.py
|
jacksbox/haystack
|
65f1da00cc4b6757752dafb8bf756531fad46dd0
|
[
"Apache-2.0"
] | null | null | null |
test/test_pipeline.py
|
jacksbox/haystack
|
65f1da00cc4b6757752dafb8bf756531fad46dd0
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
import pytest
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \
DocumentSearchPipeline, RootNode
from haystack.retriever.dense import DensePassageRetriever
from haystack.retriever.sparse import ElasticsearchRetriever
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_load_yaml(document_store_with_docs):
# test correct load of indexing pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"),
pipeline_name="indexing_pipeline")
pipeline.run(file_path=Path("samples/pdf/sample_pdf_1.pdf"), top_k_retriever=10, top_k_reader=3)
# test correct load of query pipeline from yaml
pipeline = Pipeline.load_from_yaml(Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="query_pipeline")
prediction = pipeline.run(query="Who made the PDF specification?", top_k_retriever=10, top_k_reader=3)
assert prediction["query"] == "Who made the PDF specification?"
assert prediction["answers"][0]["answer"] == "Adobe Systems"
# test invalid pipeline name
with pytest.raises(Exception):
Pipeline.load_from_yaml(path=Path("samples/pipeline/test_pipeline.yaml"), pipeline_name="invalid")
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever_with_docs, document_store_with_docs", [("elasticsearch", "elasticsearch")], indirect=True
)
def test_graph_creation(reader, retriever_with_docs, document_store_with_docs):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["Query"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.output_2"])
with pytest.raises(AssertionError):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["ES.wrong_edge_label"])
with pytest.raises(Exception):
pipeline.add_node(name="Reader", component=retriever_with_docs, inputs=["InvalidNode"])
with pytest.raises(Exception):
pipeline = Pipeline()
pipeline.add_node(name="ES", component=retriever_with_docs, inputs=["InvalidNode"])
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Who lives in Berlin?"
assert prediction["answers"][0]["answer"] == "Carla"
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
assert len(prediction["answers"]) == 3
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_offsets(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
prediction = pipeline.run(query="Who lives in Berlin?", top_k_retriever=10, top_k_reader=5)
assert prediction["answers"][0]["offset_start"] == 11
assert prediction["answers"][0]["offset_end"] == 16
start = prediction["answers"][0]["offset_start"]
end = prediction["answers"][0]["offset_end"]
assert prediction["answers"][0]["context"][start:end] == prediction["answers"][0]["answer"]
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_single_result(reader, retriever_with_docs):
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
query = "testing finder"
prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1)
assert prediction is not None
assert len(prediction["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_faq_pipeline(retriever, document_store):
documents = [
{"text": "How to test module-1?", 'meta': {"source": "wiki1", "answer": "Using tests for module-1"}},
{"text": "How to test module-2?", 'meta': {"source": "wiki2", "answer": "Using tests for module-2"}},
{"text": "How to test module-3?", 'meta': {"source": "wiki3", "answer": "Using tests for module-3"}},
{"text": "How to test module-4?", 'meta': {"source": "wiki4", "answer": "Using tests for module-4"}},
{"text": "How to test module-5?", 'meta': {"source": "wiki5", "answer": "Using tests for module-5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = FAQPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=3)
assert len(output["answers"]) == 3
assert output["answers"][0]["query"].startswith("How to")
assert output["answers"][0]["answer"].startswith("Using tests")
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["answers"]) == 1
@pytest.mark.elasticsearch
@pytest.mark.parametrize(
"retriever,document_store",
[("embedding", "memory"), ("embedding", "faiss"), ("embedding", "milvus"), ("embedding", "elasticsearch")],
indirect=True,
)
def test_document_search_pipeline(retriever, document_store):
documents = [
{"text": "Sample text for document-1", 'meta': {"source": "wiki1"}},
{"text": "Sample text for document-2", 'meta': {"source": "wiki2"}},
{"text": "Sample text for document-3", 'meta': {"source": "wiki3"}},
{"text": "Sample text for document-4", 'meta': {"source": "wiki4"}},
{"text": "Sample text for document-5", 'meta': {"source": "wiki5"}},
]
document_store.write_documents(documents)
document_store.update_embeddings(retriever)
pipeline = DocumentSearchPipeline(retriever=retriever)
output = pipeline.run(query="How to test this?", top_k_retriever=4)
assert len(output.get('documents', [])) == 4
if isinstance(document_store, ElasticsearchDocumentStore):
output = pipeline.run(query="How to test this?", filters={"source": ["wiki2"]}, top_k_retriever=5)
assert len(output["documents"]) == 1
@pytest.mark.slow
@pytest.mark.elasticsearch
@pytest.mark.parametrize("retriever_with_docs", ["tfidf"], indirect=True)
def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator):
base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)
pipeline = TranslationWrapperPipeline(
input_translator=de_to_en_translator,
output_translator=en_to_de_translator,
pipeline=base_pipeline
)
prediction = pipeline.run(query="Wer lebt in Berlin?", top_k_retriever=10, top_k_reader=3)
assert prediction is not None
assert prediction["query"] == "Wer lebt in Berlin?"
assert "Carla" in prediction["answers"][0]["answer"]
assert prediction["answers"][0]["probability"] <= 1
assert prediction["answers"][0]["probability"] >= 0
assert prediction["answers"][0]["meta"]["meta_field"] == "test1"
assert prediction["answers"][0]["context"] == "My name is Carla and I live in Berlin"
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
def test_join_document_pipeline(document_store_with_docs, reader):
es = ElasticsearchRetriever(document_store=document_store_with_docs)
dpr = DensePassageRetriever(
document_store=document_store_with_docs,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
use_gpu=False,
)
document_store_with_docs.update_embeddings(dpr)
query = "Where does Carla lives?"
# test merge without weights
join_node = JoinDocuments(join_mode="merge")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test merge with weights
join_node = JoinDocuments(join_mode="merge", weights=[1000, 1], top_k_join=2)
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert results["documents"][0].score > 1000
assert len(results["documents"]) == 2
# test concatenate
join_node = JoinDocuments(join_mode="concatenate")
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
results = p.run(query=query)
assert len(results["documents"]) == 3
# test join_node with reader
join_node = JoinDocuments()
p = Pipeline()
p.add_node(component=es, name="R1", inputs=["Query"])
p.add_node(component=dpr, name="R2", inputs=["Query"])
p.add_node(component=join_node, name="Join", inputs=["R1", "R2"])
p.add_node(component=reader, name="Reader", inputs=["Join"])
results = p.run(query=query)
assert results["answers"][0]["answer"] == "Berlin"
def test_parallel_paths_in_pipeline_graph():
class A(RootNode):
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
kwargs["output"] = kwargs["inputs"][0]["output"] + kwargs["inputs"][1]["output"]
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="E", component=E(), inputs=["C"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E"])
output = pipeline.run(query="test")
assert output["output"] == "ABDABCE"
pipeline = Pipeline()
pipeline.add_node(name="A", component=A(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A"])
pipeline.add_node(name="C", component=C(), inputs=["B"])
pipeline.add_node(name="D", component=D(), inputs=["B"])
pipeline.add_node(name="E", component=JoinNode(), inputs=["C", "D"])
output = pipeline.run(query="test")
assert output["output"] == "ABCABD"
def test_parallel_paths_in_pipeline_graph_with_branching():
class AWithOutput1(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_1"
class AWithOutput2(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_2"
class AWithOutputAll(RootNode):
outgoing_edges = 2
def run(self, **kwargs):
kwargs["output"] = "A"
return kwargs, "output_all"
class B(RootNode):
def run(self, **kwargs):
kwargs["output"] += "B"
return kwargs, "output_1"
class C(RootNode):
def run(self, **kwargs):
kwargs["output"] += "C"
return kwargs, "output_1"
class D(RootNode):
def run(self, **kwargs):
kwargs["output"] += "D"
return kwargs, "output_1"
class E(RootNode):
def run(self, **kwargs):
kwargs["output"] += "E"
return kwargs, "output_1"
class JoinNode(RootNode):
def run(self, **kwargs):
if kwargs.get("inputs"):
kwargs["output"] = ""
for input_dict in kwargs["inputs"]:
kwargs["output"] += (input_dict["output"])
return kwargs, "output_1"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput1(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ABEABD"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutput2(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "AC"
pipeline = Pipeline()
pipeline.add_node(name="A", component=AWithOutputAll(), inputs=["Query"])
pipeline.add_node(name="B", component=B(), inputs=["A.output_1"])
pipeline.add_node(name="C", component=C(), inputs=["A.output_2"])
pipeline.add_node(name="D", component=E(), inputs=["B"])
pipeline.add_node(name="E", component=D(), inputs=["B"])
pipeline.add_node(name="F", component=JoinNode(), inputs=["D", "E", "C"])
output = pipeline.run(query="test")
assert output["output"] == "ACABEABD"
| 42.126761
| 119
| 0.664527
| 1,844
| 14,955
| 5.232104
| 0.106833
| 0.0341
| 0.052861
| 0.066957
| 0.784411
| 0.689677
| 0.661795
| 0.633603
| 0.598673
| 0.598673
| 0
| 0.011612
| 0.170779
| 14,955
| 354
| 120
| 42.245763
| 0.76639
| 0.014443
| 0
| 0.578014
| 0
| 0
| 0.187873
| 0.022814
| 0
| 0
| 0
| 0
| 0.141844
| 1
| 0.088652
| false
| 0.010638
| 0.021277
| 0
| 0.219858
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a8db025d17d202dce4f03767b8394c4ff63db8d
| 14,254
|
py
|
Python
|
src/telr/TELR_assembly.py
|
dominik-handler/TELR
|
3e34e54fc959c13fa45dc911facf0d5179fbb34b
|
[
"BSD-2-Clause"
] | 22
|
2020-09-22T21:21:17.000Z
|
2022-01-21T17:52:12.000Z
|
src/telr/TELR_assembly.py
|
dominik-handler/TELR
|
3e34e54fc959c13fa45dc911facf0d5179fbb34b
|
[
"BSD-2-Clause"
] | 6
|
2021-05-07T13:52:30.000Z
|
2022-03-27T18:21:10.000Z
|
src/telr/TELR_assembly.py
|
dominik-handler/TELR
|
3e34e54fc959c13fa45dc911facf0d5179fbb34b
|
[
"BSD-2-Clause"
] | 6
|
2020-10-01T12:47:19.000Z
|
2021-08-13T14:38:11.000Z
|
import sys
import os
import subprocess
import shutil
import time
import logging
from Bio import SeqIO
from multiprocessing import Pool
import pysam
from telr.TELR_utility import mkdir, check_exist, format_time
def get_local_contigs(
assembler,
polisher,
contig_dir,
vcf_parsed,
out,
sample_name,
bam,
raw_reads,
thread,
presets,
polish_iterations,
):
"""Perform local assembly using reads from parsed VCF file in parallel"""
# Prepare reads used for local assembly and polishing
sv_reads_dir = os.path.join(out, "sv_reads")
try:
prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type="sv"
)
except Exception as e:
print(e)
print("Prepare local assembly input data failed, exiting...")
sys.exit(1)
mkdir(contig_dir)
k = 0
asm_pa_list = []
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
contig_name = "_".join([entry[0], entry[1], entry[2]])
# rename variant reads
sv_reads = sv_reads_dir + "/contig" + str(k)
sv_reads_rename = sv_reads_dir + "/" + contig_name + ".reads.fa"
os.rename(sv_reads, sv_reads_rename)
thread_asm = 1
asm_pa = [
sv_reads_rename,
contig_dir,
contig_name,
thread_asm,
presets,
assembler,
polisher,
polish_iterations,
]
asm_pa_list.append(asm_pa)
k = k + 1
# run assembly in parallel
logging.info("Perform local assembly of non-reference TE loci...")
start_time = time.time()
try:
pool = Pool(processes=thread)
contig_list = pool.map(run_assembly_polishing, asm_pa_list)
pool.close()
pool.join()
except Exception as e:
print(e)
print("Local assembly failed, exiting...")
sys.exit(1)
proc_time = time.time() - start_time
# merge all contigs
assembly_passed_loci = set()
merged_contigs = os.path.join(out, sample_name + ".contigs.fa")
with open(merged_contigs, "w") as merged_output_handle:
for contig in contig_list:
if check_exist(contig):
contig_name = os.path.basename(contig).replace(".cns.fa", "")
assembly_passed_loci.add(contig_name)
parsed_contig = os.path.join(contig_dir, contig_name + ".cns.ctg1.fa")
with open(contig, "r") as input:
records = SeqIO.parse(input, "fasta")
for record in records:
if record.id == "ctg1" or record.id == "contig_1":
record.id = contig_name
record.description = "len=" + str(len(record.seq))
SeqIO.write(record, merged_output_handle, "fasta")
with open(parsed_contig, "w") as parsed_output_handle:
SeqIO.write(record, parsed_output_handle, "fasta")
logging.info("Local assembly finished in " + format_time(proc_time))
return merged_contigs, assembly_passed_loci
def run_assembly_polishing(args):
reads = args[0]
asm_dir = args[1]
contig_name = args[2]
thread = args[3]
presets = args[4]
assembler = args[5]
polisher = args[6]
polish_iterations = args[7]
# run assembly
if assembler == "wtdbg2":
asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)
else:
asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)
if not check_exist(asm_cns):
print("assembly failed")
return None
# run polishing
if polish_iterations > 0:
if polisher == "wtdbg2":
asm_cns = run_wtdbg2_polishing(
asm_cns, reads, thread, polish_iterations, presets
)
else:
asm_cns = run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
)
if check_exist(asm_cns):
return asm_cns
else:
return None
def run_flye_polishing(
asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets
):
"""Run Flye polishing"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
"--polish-target",
asm_cns,
presets_flye,
reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
str(polish_iterations),
]
)
except Exception as e:
print(e)
print("Polishing failed, exiting...")
return None
# rename contig file
polished_contig = os.path.join(
tmp_out_dir, "polished_" + str(polish_iterations) + ".fasta"
)
if check_exist(polished_contig):
os.rename(polished_contig, asm_cns)
shutil.rmtree(tmp_out_dir)
return asm_cns
else:
return None
def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):
"""Run wtdbg2 polishing"""
if presets == "pacbio":
presets_minimap2 = "map-pb"
else:
presets_minimap2 = "map-ont"
# polish consensus
threads = str(min(threads, 4))
bam = asm_cns + ".bam"
k = 0
while True:
# align reads to contigs
command = (
"minimap2 -t "
+ threads
+ " -ax "
+ presets_minimap2
+ " -r2k "
+ asm_cns
+ " "
+ reads
+ " | samtools sort -@"
+ threads
+ " > "
+ bam
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to map reads to contig: " + asm_cns)
return
# run wtpoa-cns to get polished contig
cns_tmp = asm_cns + ".tmp"
command = (
"samtools view -F0x900 "
+ bam
+ " | wtpoa-cns -t "
+ threads
+ " -d "
+ asm_cns
+ " -i - -fo "
+ cns_tmp
)
try:
subprocess.run(
command,
shell=True,
timeout=300,
stdout=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
)
except subprocess.TimeoutExpired:
print("fail to polish contig: " + asm_cns)
return
if check_exist(cns_tmp):
os.rename(cns_tmp, asm_cns)
os.remove(bam)
else:
break
k = k + 1
if k >= polish_iterations:
break
if check_exist(asm_cns):
return asm_cns
else:
print("polishing failed for " + asm_cns + "\n")
return None
def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run Flye assembly"""
if presets == "pacbio":
presets_flye = "--pacbio-raw"
else:
presets_flye = "--nano-raw"
tmp_out_dir = os.path.join(asm_dir, contig_name)
mkdir(tmp_out_dir)
try:
subprocess.call(
[
"flye",
presets_flye,
sv_reads,
"--out-dir",
tmp_out_dir,
"--thread",
str(thread),
"--iterations",
"0",
]
)
except Exception as e:
print(e)
print("Assembly failed, exiting...")
return
# rename contigs
contig_path = os.path.join(tmp_out_dir, "assembly.fasta")
contig_path_new = os.path.join(asm_dir, contig_name + ".cns.fa")
if check_exist(contig_path):
os.rename(contig_path, contig_path_new)
# remove tmp files
shutil.rmtree(tmp_out_dir)
return contig_path_new
else:
print("assembly failed")
return None
def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):
"""Run wtdbg2 assembly"""
if presets == "pacbio":
presets_wtdbg2 = "rs"
else:
presets_wtdbg2 = "ont"
prefix = sv_reads.replace(".reads.fa", "")
try:
subprocess.run(
[
"wtdbg2",
"-x",
presets_wtdbg2,
"-q",
"-AS",
"1",
"-g",
"30k",
"-t",
str(thread),
"-i",
sv_reads,
"-fo",
prefix,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to build contig layout for contig: " + contig_name)
return
except Exception as e:
print(e)
print("wtdbg2 failed, exiting...")
return None
# derive consensus
contig_layout = prefix + ".ctg.lay.gz"
if check_exist(contig_layout):
cns_thread = str(min(thread, 4))
consensus = prefix + ".cns.fa"
try:
subprocess.run(
[
"wtpoa-cns",
"-q",
"-t",
cns_thread,
"-i",
contig_layout,
"-fo",
consensus,
],
timeout=300,
)
except subprocess.TimeoutExpired:
print("fail to assemble contig: " + contig_name)
return None
if check_exist(consensus):
consensus_rename = os.path.join(asm_dir, contig_name + ".cns.fa")
os.rename(consensus, consensus_rename)
return consensus_rename
else:
return None
def prep_assembly_inputs(
vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type="sv"
):
"""Prepare reads for local assembly"""
# logging.info("Prepare reads for local assembly")
if read_type == "sv": # TODO: figure out what this does
# extract read IDs
read_ids = os.path.join(out, sample_name + ".id")
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output:
for line in input:
entry = line.replace("\n", "").split("\t")
read_list = entry[8].split(",")
for read in read_list:
output.write(read + "\n")
else: # TODO: think about using this for assembly, filter for cigar reads
window = 1000
samfile = pysam.AlignmentFile(bam, "rb")
read_ids = os.path.join(out, sample_name + ".id")
vcf_parsed_new = vcf_parsed + ".new"
with open(vcf_parsed, "r") as input, open(read_ids, "w") as output, open(
vcf_parsed_new, "w"
) as VCF:
for line in input:
entry = line.replace("\n", "").split("\t")
# get sniffles read list
read_list = entry[8].split(",")
reads_sniffles = set(read_list)
ins_chr = entry[0]
ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)
start = ins_breakpoint - window
end = ins_breakpoint + window
reads = set()
# coverage = 0
for read in samfile.fetch(ins_chr, start, end):
reads.add(read.query_name)
for read in reads:
output.write(read + "\n")
# write
out_line = line.replace("\n", "") + "\t" + str(len(reads))
VCF.write(out_line + "\n")
vcf_parsed = vcf_parsed_new
# generate unique ID list
read_ids_unique = read_ids + ".unique"
command = "cat " + read_ids + " | sort | uniq"
with open(read_ids_unique, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# filter raw reads using read list
subset_fa = os.path.join(out, sample_name + ".subset.fa")
command = "seqtk subseq " + raw_reads + " " + read_ids_unique + " | seqtk seq -a"
with open(subset_fa, "w") as output:
subprocess.call(command, stdout=output, shell=True)
# reorder reads
subset_fa_reorder = out + "/" + sample_name + ".subset.reorder.fa"
extract_reads(subset_fa, read_ids, subset_fa_reorder)
# separate reads into multiple files, using csplit
mkdir(reads_dir)
csplit_prefix = reads_dir + "/contig"
m = []
k = 1
with open(vcf_parsed, "r") as input:
for line in input:
entry = line.replace("\n", "").split("\t")
if read_type == "sv":
k = k + 2 * (len(entry[8].split(",")))
else:
k = k + 2 * int(entry[14])
m.append(k)
if len(m) == 1:
subprocess.call(["cp", subset_fa_reorder, reads_dir + "/contig0"])
elif len(m) == 0:
print("No insertion detected, exiting...")
else:
m = m[:-1]
index = " ".join(str(i) for i in m)
command = (
"csplit -s -f " + csplit_prefix + " -n 1 " + subset_fa_reorder + " " + index
)
subprocess.call(command, shell=True)
# remove tmp files
os.remove(read_ids)
os.remove(read_ids_unique)
os.remove(subset_fa)
os.remove(subset_fa_reorder)
def extract_reads(reads, list, out):
"""Extract reads from fasta using read ID list"""
record_dict = SeqIO.index(reads, "fasta")
with open(out, "wb") as output_handle, open(list, "r") as ID:
for entry in ID:
entry = entry.replace("\n", "")
output_handle.write(record_dict.get_raw(entry))
| 30.32766
| 88
| 0.525116
| 1,601
| 14,254
| 4.480325
| 0.15178
| 0.020075
| 0.023561
| 0.022306
| 0.352293
| 0.300293
| 0.269483
| 0.249268
| 0.212463
| 0.185418
| 0
| 0.009323
| 0.367897
| 14,254
| 469
| 89
| 30.392324
| 0.786792
| 0.059913
| 0
| 0.428934
| 0
| 0
| 0.084258
| 0
| 0
| 0
| 0
| 0.002132
| 0
| 1
| 0.020305
| false
| 0.007614
| 0.025381
| 0
| 0.093909
| 0.045685
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a900957322aa8d59dab3c2935590611098dad34
| 28,015
|
py
|
Python
|
pygmt/tests/test_clib.py
|
aliciaha1997/pygmt
|
a10af5d8deb3bf3090eab4b6492bcf8cf722cb71
|
[
"BSD-3-Clause"
] | null | null | null |
pygmt/tests/test_clib.py
|
aliciaha1997/pygmt
|
a10af5d8deb3bf3090eab4b6492bcf8cf722cb71
|
[
"BSD-3-Clause"
] | null | null | null |
pygmt/tests/test_clib.py
|
aliciaha1997/pygmt
|
a10af5d8deb3bf3090eab4b6492bcf8cf722cb71
|
[
"BSD-3-Clause"
] | 1
|
2021-11-03T07:47:18.000Z
|
2021-11-03T07:47:18.000Z
|
# pylint: disable=protected-access
"""
Test the wrappers for the C API.
"""
import os
from contextlib import contextmanager
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
import xarray as xr
from packaging.version import Version
from pygmt import Figure, clib
from pygmt.clib.conversion import dataarray_to_matrix
from pygmt.clib.session import FAMILIES, VIAS
from pygmt.exceptions import (
GMTCLibError,
GMTCLibNoSessionError,
GMTInvalidInput,
GMTVersionError,
)
from pygmt.helpers import GMTTempFile
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
with clib.Session() as _lib:
gmt_version = Version(_lib.info["version"])
@contextmanager
def mock(session, func, returns=None, mock_func=None):
"""
Mock a GMT C API function to make it always return a given value.
Used to test that exceptions are raised when API functions fail by
producing a NULL pointer as output or non-zero status codes.
Needed because it's not easy to get some API functions to fail without
inducing a Segmentation Fault (which is a good thing because libgmt usually
only fails with errors).
"""
if mock_func is None:
def mock_api_function(*args): # pylint: disable=unused-argument
"""
A mock GMT API function that always returns a given value.
"""
return returns
mock_func = mock_api_function
get_libgmt_func = session.get_libgmt_func
def mock_get_libgmt_func(name, argtypes=None, restype=None):
"""
Return our mock function.
"""
if name == func:
return mock_func
return get_libgmt_func(name, argtypes, restype)
setattr(session, "get_libgmt_func", mock_get_libgmt_func)
yield
setattr(session, "get_libgmt_func", get_libgmt_func)
def test_getitem():
"""
Test that I can get correct constants from the C lib.
"""
ses = clib.Session()
assert ses["GMT_SESSION_EXTERNAL"] != -99999
assert ses["GMT_MODULE_CMD"] != -99999
assert ses["GMT_PAD_DEFAULT"] != -99999
assert ses["GMT_DOUBLE"] != -99999
with pytest.raises(GMTCLibError):
ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement
def test_create_destroy_session():
"""
Test that create and destroy session are called without errors.
"""
# Create two session and make sure they are not pointing to the same memory
session1 = clib.Session()
session1.create(name="test_session1")
assert session1.session_pointer is not None
session2 = clib.Session()
session2.create(name="test_session2")
assert session2.session_pointer is not None
assert session2.session_pointer != session1.session_pointer
session1.destroy()
session2.destroy()
# Create and destroy a session twice
ses = clib.Session()
for __ in range(2):
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
ses.create("session1")
assert ses.session_pointer is not None
ses.destroy()
with pytest.raises(GMTCLibNoSessionError):
ses.session_pointer # pylint: disable=pointless-statement
def test_create_session_fails():
"""
Check that an exception is raised when failing to create a session.
"""
ses = clib.Session()
with mock(ses, "GMT_Create_Session", returns=None):
with pytest.raises(GMTCLibError):
ses.create("test-session-name")
# Should fail if trying to create a session before destroying the old one.
ses.create("test1")
with pytest.raises(GMTCLibError):
ses.create("test2")
def test_destroy_session_fails():
"""
Fail to destroy session when given bad input.
"""
ses = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
ses.destroy()
ses.create("test-session")
with mock(ses, "GMT_Destroy_Session", returns=1):
with pytest.raises(GMTCLibError):
ses.destroy()
ses.destroy()
def test_call_module():
"""
Run a command to see if call_module works.
"""
data_fname = os.path.join(TEST_DATA_DIR, "points.txt")
out_fname = "test_call_module.txt"
with clib.Session() as lib:
with GMTTempFile() as out_fname:
lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name))
assert os.path.exists(out_fname.name)
output = out_fname.read().strip()
assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338"
def test_call_module_invalid_arguments():
"""
Fails for invalid module arguments.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("info", "bogus-data.bla")
def test_call_module_invalid_name():
"""
Fails when given bad input.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.call_module("meh", "")
def test_call_module_error_message():
"""
Check is the GMT error message was captured.
"""
with clib.Session() as lib:
try:
lib.call_module("info", "bogus-data.bla")
except GMTCLibError as error:
assert "Module 'info' failed with status code" in str(error)
assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error)
def test_method_no_session():
"""
Fails when not in a session.
"""
# Create an instance of Session without "with" so no session is created.
lib = clib.Session()
with pytest.raises(GMTCLibNoSessionError):
lib.call_module("gmtdefaults", "")
with pytest.raises(GMTCLibNoSessionError):
lib.session_pointer # pylint: disable=pointless-statement
def test_parse_constant_single():
"""
Parsing a single family argument correctly.
"""
lib = clib.Session()
for family in FAMILIES:
parsed = lib._parse_constant(family, valid=FAMILIES)
assert parsed == lib[family]
def test_parse_constant_composite():
"""
Parsing a composite constant argument (separated by |) correctly.
"""
lib = clib.Session()
test_cases = ((family, via) for family in FAMILIES for via in VIAS)
for family, via in test_cases:
composite = "|".join([family, via])
expected = lib[family] + lib[via]
parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)
assert parsed == expected
def test_parse_constant_fails():
"""
Check if the function fails when given bad input.
"""
lib = clib.Session()
test_cases = [
"SOME_random_STRING",
"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR",
"GMT_IS_DATASET|NOT_A_PROPER_VIA",
"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX",
"NOT_A_PROPER_FAMILY|ALSO_INVALID",
]
for test_case in test_cases:
with pytest.raises(GMTInvalidInput):
lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)
# Should also fail if not given valid modifiers but is using them anyway.
# This should work...
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS
)
# But this shouldn't.
with pytest.raises(GMTInvalidInput):
lib._parse_constant(
"GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None
)
def test_create_data_dataset():
"""
Run the function to make sure it doesn't fail badly.
"""
with clib.Session() as lib:
# Dataset from vectors
data_vector = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_VECTOR",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0], # columns, rows, layers, dtype
)
# Dataset from matrices
data_matrix = lib.create_data(
family="GMT_IS_DATASET|GMT_VIA_MATRIX",
geometry="GMT_IS_POINT",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
assert data_vector != data_matrix
def test_create_data_grid_dim():
"""
Create a grid ignoring range and inc.
"""
with clib.Session() as lib:
# Grids from matrices using dim
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[10, 20, 1, 0],
)
def test_create_data_grid_range():
"""
Create a grid specifying range and inc instead of dim.
"""
with clib.Session() as lib:
# Grids from matrices using range and int
lib.create_data(
family="GMT_IS_GRID|GMT_VIA_MATRIX",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
def test_create_data_fails():
"""
Check that create_data raises exceptions for invalid input and output.
"""
# Passing in invalid mode
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="Not_a_valid_mode",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# Passing in invalid geometry
with pytest.raises(GMTInvalidInput):
with clib.Session() as lib:
lib.create_data(
family="GMT_IS_GRID",
geometry="Not_a_valid_geometry",
mode="GMT_CONTAINER_ONLY",
dim=[0, 0, 1, 0],
ranges=[150.0, 250.0, -20.0, 20.0],
inc=[0.1, 0.2],
)
# If the data pointer returned is None (NULL pointer)
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
with mock(lib, "GMT_Create_Data", returns=None):
lib.create_data(
family="GMT_IS_DATASET",
geometry="GMT_IS_SURFACE",
mode="GMT_CONTAINER_ONLY",
dim=[11, 10, 2, 0],
)
def test_virtual_file():
"""
Test passing in data via a virtual file with a Dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (5, 3)
for dtype in dtypes:
with clib.Session() as lib:
family = "GMT_IS_DATASET|GMT_VIA_MATRIX"
geometry = "GMT_IS_POINT"
dataset = lib.create_data(
family=family,
geometry=geometry,
mode="GMT_CONTAINER_ONLY",
dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype
)
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
lib.put_matrix(dataset, matrix=data)
# Add the dataset to a virtual file and pass it along to gmt info
vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset)
with lib.open_virtual_file(*vfargs) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtual_file_fails():
"""
Check that opening and closing virtual files raises an exception for non-
zero return codes.
"""
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IN|GMT_IS_REFERENCE",
None,
)
# Mock Open_VirtualFile to test the status check when entering the context.
# If the exception is raised, the code won't get to the closing of the
# virtual file.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
print("Should not get to this code")
# Test the status check when closing the virtual file
# Mock the opening to return 0 (success) so that we don't open a file that
# we won't close later.
with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock(
lib, "GMT_Close_VirtualFile", returns=1
):
with pytest.raises(GMTCLibError):
with lib.open_virtual_file(*vfargs):
pass
print("Shouldn't get to this code either")
def test_virtual_file_bad_direction():
"""
Test passing an invalid direction argument.
"""
with clib.Session() as lib:
vfargs = (
"GMT_IS_DATASET|GMT_VIA_MATRIX",
"GMT_IS_POINT",
"GMT_IS_GRID", # The invalid direction argument
0,
)
with pytest.raises(GMTInvalidInput):
with lib.open_virtual_file(*vfargs):
print("This should have failed")
def test_virtualfile_from_vectors():
"""
Test the automation for transforming vectors to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 10
for dtype in dtypes:
x = np.arange(size, dtype=dtype)
y = np.arange(size, size * 2, 1, dtype=dtype)
z = np.arange(size * 2, size * 3, 1, dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_one_string_or_object_column(dtype):
"""
Test passing in one column with string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings))
assert output == expected
@pytest.mark.parametrize("dtype", [str, object])
def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):
"""
Test passing in two columns of string or object dtype into virtual file
dataset.
"""
size = 5
x = np.arange(size, dtype=np.int32)
y = np.arange(size, size * 2, 1, dtype=np.int32)
strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype)
strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:
with GMTTempFile() as outfile:
lib.call_module("convert", f"{vfile} ->{outfile.name}")
output = outfile.read(keep_tabs=True)
expected = "".join(
f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2)
)
assert output == expected
def test_virtualfile_from_vectors_transpose():
"""
Test transforming matrix columns to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(*data.T) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T]
)
expected = "{}\n".format(bounds)
assert output == expected
def test_virtualfile_from_vectors_diff_size():
"""
Test the function fails for arrays of different sizes.
"""
x = np.arange(5)
y = np.arange(6)
with clib.Session() as lib:
with pytest.raises(GMTInvalidInput):
with lib.virtualfile_from_vectors(x, y):
print("This should have failed")
def test_virtualfile_from_matrix():
"""
Test transforming a matrix to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (7, 5)
for dtype in dtypes:
data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds)
assert output == expected
def test_virtualfile_from_matrix_slice():
"""
Test transforming a slice of a larger array to virtual file dataset.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
shape = (10, 6)
for dtype in dtypes:
full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)
rows = 5
cols = 3
data = full_data[:rows, :cols]
with clib.Session() as lib:
with lib.virtualfile_from_matrix(data) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T]
)
expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds)
assert output == expected
def test_virtualfile_from_vectors_pandas():
"""
Pass vectors to a dataset using pandas Series.
"""
dtypes = "float32 float64 int32 int64 uint32 uint64".split()
size = 13
for dtype in dtypes:
data = pd.DataFrame(
data=dict(
x=np.arange(size, dtype=dtype),
y=np.arange(size, size * 2, 1, dtype=dtype),
z=np.arange(size * 2, size * 3, 1, dtype=dtype),
)
)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
[
"<{:.0f}/{:.0f}>".format(i.min(), i.max())
for i in (data.x, data.y, data.z)
]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_virtualfile_from_vectors_arraylike():
"""
Pass array-like vectors to a dataset.
"""
size = 13
x = list(range(0, size, 1))
y = tuple(range(size, size * 2, 1))
z = range(size * 2, size * 3, 1)
with clib.Session() as lib:
with lib.virtualfile_from_vectors(x, y, z) as vfile:
with GMTTempFile() as outfile:
lib.call_module("info", "{} ->{}".format(vfile, outfile.name))
output = outfile.read(keep_tabs=True)
bounds = "\t".join(
["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)]
)
expected = "<vector memory>: N = {}\t{}\n".format(size, bounds)
assert output == expected
def test_extract_region_fails():
"""
Check that extract region fails if nothing has been plotted.
"""
Figure()
with pytest.raises(GMTCLibError):
with clib.Session() as lib:
lib.extract_region()
def test_extract_region_two_figures():
"""
Extract region should handle multiple figures existing at the same time.
"""
# Make two figures before calling extract_region to make sure that it's
# getting from the current figure, not the last figure.
fig1 = Figure()
region1 = np.array([0, 10, -20, -10])
fig1.coast(region=region1, projection="M6i", frame=True, land="black")
fig2 = Figure()
fig2.basemap(region="US.HI+r5", projection="M6i", frame=True)
# Activate the first figure and extract the region from it
# Use in a different session to avoid any memory problems.
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig1._name))
with clib.Session() as lib:
wesn1 = lib.extract_region()
npt.assert_allclose(wesn1, region1)
# Now try it with the second one
with clib.Session() as lib:
lib.call_module("figure", "{} -".format(fig2._name))
with clib.Session() as lib:
wesn2 = lib.extract_region()
npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))
def test_write_data_fails():
"""
Check that write data raises an exception for non-zero return codes.
"""
# It's hard to make the C API function fail without causing a Segmentation
# Fault. Can't test this if by giving a bad file name because if
# output=='', GMT will just write to stdout and spaces are valid file
# names. Use a mock instead just to exercise this part of the code.
with clib.Session() as lib:
with mock(lib, "GMT_Write_Data", returns=1):
with pytest.raises(GMTCLibError):
lib.write_data(
"GMT_IS_VECTOR",
"GMT_IS_POINT",
"GMT_WRITE_SET",
[1] * 6,
"some-file-name",
None,
)
def test_dataarray_to_matrix_works():
"""
Check that dataarray_to_matrix returns correct output.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flipud(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])
def test_dataarray_to_matrix_negative_x_increment():
"""
Check if dataarray_to_matrix returns correct output with flipped x.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=5, stop=9, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=0, stop=4, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=data)
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_negative_x_and_y_increment():
"""
Check that dataarray_to_matrix returns correct output with flipped x/y.
"""
data = np.diag(v=np.arange(3))
x = np.linspace(start=4, stop=0, num=3)
y = np.linspace(start=9, stop=5, num=3)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
matrix, region, inc = dataarray_to_matrix(grid)
npt.assert_allclose(actual=matrix, desired=np.fliplr(data))
npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])
npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])
def test_dataarray_to_matrix_dims_fails():
"""
Check that it fails for > 2 dims.
"""
# Make a 3D regular grid
data = np.ones((10, 12, 11), dtype="float32")
x = np.arange(11)
y = np.arange(12)
z = np.arange(10)
grid = xr.DataArray(data, coords=[("z", z), ("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_dataarray_to_matrix_inc_fails():
"""
Check that it fails for variable increments.
"""
data = np.ones((4, 5), dtype="float64")
x = np.linspace(0, 1, 5)
y = np.logspace(2, 3, 4)
grid = xr.DataArray(data, coords=[("y", y), ("x", x)])
with pytest.raises(GMTInvalidInput):
dataarray_to_matrix(grid)
def test_get_default():
"""
Make sure get_default works without crashing and gives reasonable results.
"""
with clib.Session() as lib:
assert lib.get_default("API_GRID_LAYOUT") in ["rows", "columns"]
assert int(lib.get_default("API_CORES")) >= 1
assert Version(lib.get_default("API_VERSION")) >= Version("6.2.0")
def test_get_default_fails():
"""
Make sure get_default raises an exception for invalid names.
"""
with clib.Session() as lib:
with pytest.raises(GMTCLibError):
lib.get_default("NOT_A_VALID_NAME")
def test_info_dict():
"""
Make sure the clib.Session.info dict is working.
"""
# Check if there are no errors or segfaults from getting all of the
# properties.
with clib.Session() as lib:
assert lib.info
# Mock GMT_Get_Default to return always the same string
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Put 'bla' in the value buffer.
"""
value.value = b"bla"
return 0
ses = clib.Session()
ses.create("test-session")
with mock(ses, "GMT_Get_Default", mock_func=mock_defaults):
# Check for an empty dictionary
assert ses.info
for key in ses.info:
assert ses.info[key] == "bla"
ses.destroy()
def test_fails_for_wrong_version():
"""
Make sure the clib.Session raises an exception if GMT is too old.
"""
# Mock GMT_Get_Default to return an old version
def mock_defaults(api, name, value): # pylint: disable=unused-argument
"""
Return an old version.
"""
if name == b"API_VERSION":
value.value = b"5.4.3"
else:
value.value = b"bla"
return 0
lib = clib.Session()
with mock(lib, "GMT_Get_Default", mock_func=mock_defaults):
with pytest.raises(GMTVersionError):
with lib:
assert lib.info["version"] != "5.4.3"
# Make sure the session is closed when the exception is raised.
with pytest.raises(GMTCLibNoSessionError):
assert lib.session_pointer
| 34.332108
| 85
| 0.60464
| 3,696
| 28,015
| 4.443182
| 0.126623
| 0.032152
| 0.030142
| 0.034161
| 0.563451
| 0.510961
| 0.46523
| 0.440324
| 0.419072
| 0.37876
| 0
| 0.021568
| 0.271783
| 28,015
| 815
| 86
| 34.374233
| 0.783393
| 0.179083
| 0
| 0.470019
| 0
| 0
| 0.111762
| 0.020874
| 0
| 0
| 0
| 0
| 0.088975
| 1
| 0.088975
| false
| 0.001934
| 0.025145
| 0
| 0.123791
| 0.007737
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9169fbe2dd0a7e667174a77f2109a3f57e8580
| 1,808
|
py
|
Python
|
Prime Factorization/prime_factorization_II.py
|
rayvantsahni/Let-us-Math
|
571ee70452feae0b15f37d46de658b0c0251bd3d
|
[
"MIT"
] | 2
|
2020-08-06T07:09:38.000Z
|
2020-09-12T02:32:23.000Z
|
Prime Factorization/prime_factorization_II.py
|
rayvantsahni/Math-is-Fun
|
571ee70452feae0b15f37d46de658b0c0251bd3d
|
[
"MIT"
] | null | null | null |
Prime Factorization/prime_factorization_II.py
|
rayvantsahni/Math-is-Fun
|
571ee70452feae0b15f37d46de658b0c0251bd3d
|
[
"MIT"
] | 1
|
2021-08-30T14:17:28.000Z
|
2021-08-30T14:17:28.000Z
|
def get_primes(n):
primes = [] # stores the prime numbers within the reange of the number
sieve = [False] * (n + 1) # stores boolean values indicating whether a number is prime or not
sieve[0] = sieve[1] = True # marking 0 and 1 as not prime
for i in range(2, n + 1): # loops over all the numbers to check for prime numbers
if sieve[i]: # checks whether a number is not prime
continue # skips the loop if the number is not a prime number
primes.append(i) # adds a number into list if it is a prime number
for j in range(i ** 2, n + 1, i): # loops over all multiples of the prime number starting from the sqaure of the prime number
sieve[j] = True # marks the multiple of the prime number as not prime
return primes # returns the list containing prime numbers
def get_factorization(n):
prime_factors = [] # stores the prime factorization of the number
for prime in get_primes(n): # looping over all the prime numbers
while n != 1: # keeps diving the number by a certain prime number until the number is 1
if n % prime == 0: # checks if the number is divisible by a particular prime number
prime_factors.append(prime) # add the prime factor in the list if it divides the number
n /= prime # reducing the number after dividing it by the prime number
else:
break # if the number is not divisible by the paricular prime number then the inner loop breaks and the number is further divided by the next prime number until the number becomes 1
return prime_factors # returns the list containing the prime factorization of the number
if __name__ == "__main__":
n = int(input("Enter a number: "))
print(get_factorization(n))
| 54.787879
| 198
| 0.672566
| 286
| 1,808
| 4.199301
| 0.318182
| 0.089925
| 0.045795
| 0.032473
| 0.121565
| 0.053289
| 0
| 0
| 0
| 0
| 0
| 0.009893
| 0.27323
| 1,808
| 32
| 199
| 56.5
| 0.90411
| 0.606195
| 0
| 0
| 0
| 0
| 0.034632
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a91ba22fcba12ba8237fcf117a449485cdd3de1
| 31,466
|
py
|
Python
|
pandas/core/indexes/range.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2017-12-14T19:50:52.000Z
|
2020-04-07T16:47:23.000Z
|
pandas/core/indexes/range.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-07-24T17:35:03.000Z
|
2021-07-24T17:35:03.000Z
|
pandas/core/indexes/range.py
|
mujtahidalam/pandas
|
526468c8fe6fc5157aaf2fce327c5ab2a3350f49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2018-01-26T08:33:54.000Z
|
2018-01-26T08:33:54.000Z
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Dtype
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(target):
# checks/conversions/roundings are delegated to general method
return super()._get_indexer(target, method=method, tolerance=tolerance)
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
)
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
if not isinstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so fall back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to get a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return op(self._int64index, other)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
| 32.777083
| 88
| 0.556664
| 3,741
| 31,466
| 4.520984
| 0.144079
| 0.016496
| 0.008455
| 0.005321
| 0.203689
| 0.140306
| 0.119435
| 0.113404
| 0.090345
| 0.068941
| 0
| 0.012967
| 0.326003
| 31,466
| 959
| 89
| 32.811262
| 0.784515
| 0.205841
| 0
| 0.1875
| 0
| 0
| 0.029477
| 0
| 0
| 0
| 0
| 0.001043
| 0.003472
| 1
| 0.102431
| false
| 0.001736
| 0.039931
| 0.020833
| 0.326389
| 0.001736
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a921ddf5fe02b1831b2b73b31bdcdcfebea2ba6
| 708
|
py
|
Python
|
model.py
|
Hasanweight/pytorch-chatbot-master
|
7a3b58af7e5284f1f3f7f7b0aeb3f19d9ee3cbc1
|
[
"MIT"
] | null | null | null |
model.py
|
Hasanweight/pytorch-chatbot-master
|
7a3b58af7e5284f1f3f7f7b0aeb3f19d9ee3cbc1
|
[
"MIT"
] | null | null | null |
model.py
|
Hasanweight/pytorch-chatbot-master
|
7a3b58af7e5284f1f3f7f7b0aeb3f19d9ee3cbc1
|
[
"MIT"
] | 1
|
2020-11-17T07:04:35.000Z
|
2020-11-17T07:04:35.000Z
|
import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, hidden_size)
self.l4 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.l4(out)
# no activation and no softmax at the end
return out
| 30.782609
| 61
| 0.59887
| 102
| 708
| 3.970588
| 0.323529
| 0.17284
| 0.123457
| 0.133333
| 0.328395
| 0.328395
| 0.276543
| 0
| 0
| 0
| 0
| 0.015905
| 0.289548
| 708
| 23
| 62
| 30.782609
| 0.789264
| 0.055085
| 0
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9277485abaa1ad23562bb5f41c412cb9cb7cd7
| 6,927
|
py
|
Python
|
jwql/utils/logging_functions.py
|
hover2pi/jwql
|
0a97fe618c007883ffbced88ac1cb45a667fcb3c
|
[
"BSD-3-Clause"
] | null | null | null |
jwql/utils/logging_functions.py
|
hover2pi/jwql
|
0a97fe618c007883ffbced88ac1cb45a667fcb3c
|
[
"BSD-3-Clause"
] | null | null | null |
jwql/utils/logging_functions.py
|
hover2pi/jwql
|
0a97fe618c007883ffbced88ac1cb45a667fcb3c
|
[
"BSD-3-Clause"
] | null | null | null |
""" Logging functions for the ``jwql`` automation platform.
This module provides decorators to log the execution of modules. Log
files are written to the ``logs/`` directory in the ``jwql`` central
storage area, named by module name and timestamp, e.g.
``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``
Authors
-------
- Catherine Martlin 2018
- Alex Viana, 2013 (WFC3 QL Version)
Use
---
To log the execution of a module, use:
::
import os
import logging
from jwql.logging.logging_functions import configure_logging
from jwql.logging.logging_functions import log_info
from jwql.logging.logging_functions import log_fail
@log_info
@log_fail
def my_main_function():
pass
if __name__ == '__main__':
module = os.path.basename(__file__).replace('.py', '')
configure_logging(module)
my_main_function()
Dependencies
------------
The user must have a configuration file named ``config.json``
placed in the ``utils`` directory.
References
----------
This code is adopted and updated from python routine
``logging_functions.py`` written by Alex Viana, 2013 for the WFC3
Quicklook automation platform.
"""
import datetime
import getpass
import importlib
import logging
import os
import pwd
import socket
import sys
import time
import traceback
from functools import wraps
from jwql.utils.permissions import set_permissions
from jwql.utils.utils import get_config, ensure_dir_exists
LOG_FILE_LOC = ''
PRODUCTION_BOOL = ''
def configure_logging(module, production_mode=True, path='./'):
"""Configure the log file with a standard logging format.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environement.
path : str
Where to write the log if user-supplied path; default to working dir.
"""
# Determine log file location
if production_mode:
log_file = make_log_file(module)
else:
log_file = make_log_file(module, production_mode=False, path=path)
global LOG_FILE_LOC
global PRODUCTION_BOOL
LOG_FILE_LOC = log_file
PRODUCTION_BOOL = production_mode
# Create the log file and set the permissions
logging.basicConfig(filename=log_file,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S %p',
level=logging.INFO)
set_permissions(log_file)
def make_log_file(module, production_mode=True, path='./'):
"""Create the log file name based on the module name.
The name of the ``log_file`` is a combination of the name of the
module being logged and the current datetime.
Parameters
----------
module : str
The name of the module being logged.
production_mode : bool
Whether or not the output should be written to the production
environment.
path : str
Where to write the log if user-supplied path; default to
working dir.
Returns
-------
log_file : str
The full path to where the log file will be written to.
"""
timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')
filename = '{0}_{1}.log'.format(module, timestamp)
user = pwd.getpwuid(os.getuid()).pw_name
settings = get_config()
admin_account = settings['admin_account']
log_path = settings['log_dir']
exempt_modules = []
if user != admin_account and module not in exempt_modules and production_mode:
module = os.path.join('dev', module)
if production_mode:
log_file = os.path.join(log_path, module, filename)
else:
log_file = os.path.join(path, filename)
ensure_dir_exists(os.path.dirname(log_file))
return log_file
def log_info(func):
"""Decorator to log useful system information.
This function can be used as a decorator to log user environment
and system information. Future packages we want to track can be
added or removed as necessary.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
# Log environment information
logging.info('User: ' + getpass.getuser())
logging.info('System: ' + socket.gethostname())
logging.info('Python Version: ' + sys.version.replace('\n', ''))
logging.info('Python Executable Path: ' + sys.executable)
# Read in setup.py file to build list of required modules
settings = get_config()
setup_file_name = settings['setup_file']
with open(setup_file_name) as setup:
for line in setup:
if line[0:8] == "REQUIRES":
module_required = line[12:-2]
module_list = module_required.split(',')
# Clean up the module list
module_list = [module.replace('"', '').replace("'", '').replace(' ', '') for module in module_list]
module_list = [module.split('=')[0] for module in module_list]
# Log common module version information
for module in module_list:
try:
mod = importlib.import_module(module)
logging.info(module + ' Version: ' + mod.__version__)
logging.info(module + ' Path: ' + mod.__path__[0])
except ImportError as err:
logging.warning(err)
# Call the function and time it
t1_cpu = time.clock()
t1_time = time.time()
func(*a, **kw)
t2_cpu = time.clock()
t2_time = time.time()
# Log execution time
hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)
minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)
hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)
minutes_time, seconds_time = divmod(remainder_time, 60)
logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))
logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))
return wrapped
def log_fail(func):
"""Decorator to log crashes in the decorated code.
Parameters
----------
func : func
The function to decorate.
Returns
-------
wrapped : func
The wrapped function.
"""
@wraps(func)
def wrapped(*a, **kw):
try:
# Run the function
func(*a, **kw)
logging.info('Completed Successfully')
except Exception:
logging.critical(traceback.format_exc())
logging.critical('CRASHED')
return wrapped
| 28.044534
| 111
| 0.627111
| 877
| 6,927
| 4.801596
| 0.270239
| 0.036571
| 0.011874
| 0.011399
| 0.260271
| 0.20209
| 0.180005
| 0.142009
| 0.142009
| 0.131085
| 0
| 0.013223
| 0.268515
| 6,927
| 246
| 112
| 28.158537
| 0.817841
| 0.420384
| 0
| 0.177778
| 0
| 0
| 0.083025
| 0.011105
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.022222
| 0.166667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a92d260f5ba3c3243955569573ecad3cecaf8e9
| 2,079
|
py
|
Python
|
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
|
jiaxiaolei/my_snap_demo
|
0444077c763e029eb67af7242537cebb3c3d6aa4
|
[
"Apache-2.0"
] | null | null | null |
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
|
jiaxiaolei/my_snap_demo
|
0444077c763e029eb67af7242537cebb3c3d6aa4
|
[
"Apache-2.0"
] | 4
|
2019-11-20T02:45:19.000Z
|
2019-12-03T03:14:15.000Z
|
bcloud-snap/bcloud-3.9.1/bcloud/hasher.py
|
jiaxiaolei/my_snap_demo
|
0444077c763e029eb67af7242537cebb3c3d6aa4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014-2015 LiuLang <[email protected]>
# Use of this source code is governed by GPLv3 license that can be found
# in http://www.gnu.org/licenses/gpl-3.0.html
import hashlib
import os
import zlib
CHUNK = 2 ** 20
def crc(path):
_crc = 0
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_crc = zlib.crc32(chunk, _crc)
fh.close()
return '%X' % (_crc & 0xFFFFFFFF)
def md5(path, start=0, stop=-1):
_md5 = hashlib.md5()
fh = open(path, 'rb')
if start > 0:
fh.seek(start)
if stop == -1:
stop = os.path.getsize(path)
pos = start
while pos < stop:
size = min(CHUNK, stop - pos)
chunk = fh.read(size)
if not chunk:
break
pos += len(chunk)
_md5.update(chunk)
fh.close()
return _md5.hexdigest()
def sha1(path):
_sha1 = hashlib.sha1()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha1.update(chunk)
fh.close()
return _sha1.hexdigest()
def sha224(path):
_sha224 = hashlib.sha224()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha224.update(chunk)
fh.close()
return _sha224.hexdigest()
def sha256(path):
_sha256 = hashlib.sha256()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha256.update(chunk)
fh.close()
return _sha256.hexdigest()
def sha384(path):
_sha384 = hashlib.sha384()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha384.update(chunk)
fh.close()
return _sha384.hexdigest()
def sha512(path):
_sha512 = hashlib.sha512()
fh = open(path, 'rb')
while True:
chunk = fh.read(CHUNK)
if not chunk:
break
_sha512.update(chunk)
fh.close()
return _sha512.hexdigest()
| 21.65625
| 72
| 0.556037
| 268
| 2,079
| 4.231343
| 0.272388
| 0.080247
| 0.061728
| 0.074074
| 0.402116
| 0.275132
| 0.275132
| 0.275132
| 0.275132
| 0.275132
| 0
| 0.065341
| 0.322751
| 2,079
| 95
| 73
| 21.884211
| 0.740057
| 0.082732
| 0
| 0.493827
| 0
| 0
| 0.008412
| 0
| 0
| 0
| 0.005258
| 0
| 0
| 1
| 0.08642
| false
| 0
| 0.037037
| 0
| 0.209877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a963372962a426bfe2a29c3f4ef8694684f359b
| 1,448
|
py
|
Python
|
Simulator/Geometry/RectOverlap.py
|
cuixiongyi/RBE595
|
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
|
[
"MIT"
] | null | null | null |
Simulator/Geometry/RectOverlap.py
|
cuixiongyi/RBE595
|
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
|
[
"MIT"
] | null | null | null |
Simulator/Geometry/RectOverlap.py
|
cuixiongyi/RBE595
|
fc5c6aa6c479eb14186a9168e47724b7b3d06cde
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot
__author__ = 'xiongyi'
line1 = [(200, 100), (200, 400)]
line2 = [(190, 190), (210, 210)]
def overlap():
l1p1x = line1[0][0]
l1p1y = line1[0][1]
l1p2x = line1[1][0]
l1p2y = line1[1][1]
# make sure p1x < p2x
if l1p1x > l1p2x:
tmp = l1p1x
l1p1x = l1p2x
l1p2x = tmp
# make sure p1y < p2y
if l1p1y > l1p2y:
tmp = l1p1y
l1p1y = l1p2y
l1p2y = tmp
l2p1x = line2[0][0]
l2p1y = line2[0][1]
l2p2x = line2[1][0]
l2p2y = line2[1][1]
# make sure p1x < p2x
if l2p1x > l2p2x:
tmp = l2p1x
l2p1x = l2p2x
l2p2x = tmp
# make sure p1y < p2y
if l2p1y > l2p2y:
tmp = l2p1y
l2p1y = l2p2y
l2p2y = tmp
# line2 rectangle is inside line1 rect
if l1p1x < l2p2x and l1p2x > l2p1x and l1p1y < l2p2y and l1p2y > l2p1y:
return True
# line2 rectangle is inside line1 rect
if l1p1x > l2p2x and l1p2x < l2p1x and l1p1y > l2p2y and l1p2y < l2p1y:
return True
if l1p1x > l2p2x or l1p2x < l2p1x:
return False
if l1p1y > l2p2y or l1p2y < l2p1y:
return False
return True
if __name__ == '__main__':
matplotlib.pyplot.plot((line1[0][0],line1[1][0]),(line1[0][1],line1[1][1]))
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot((line2[0][0],line2[1][0]),(line2[0][1],line2[1][1]))
print(overlap())
matplotlib.pyplot.show()
| 26.814815
| 79
| 0.566989
| 201
| 1,448
| 4.024876
| 0.233831
| 0.098888
| 0.044499
| 0.024722
| 0.318912
| 0.318912
| 0.271941
| 0.227441
| 0.227441
| 0.227441
| 0
| 0.197015
| 0.305939
| 1,448
| 53
| 80
| 27.320755
| 0.60796
| 0.105663
| 0
| 0.113636
| 0
| 0
| 0.011646
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0
| 0.022727
| 0
| 0.159091
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a96a020d6c369841c24ae3ddad9a09c8b54550c
| 4,434
|
py
|
Python
|
gino/loader.py
|
p4l1ly/gino
|
bbe63ed841bf989a0f47b6cae64db85b0b606794
|
[
"BSD-3-Clause"
] | null | null | null |
gino/loader.py
|
p4l1ly/gino
|
bbe63ed841bf989a0f47b6cae64db85b0b606794
|
[
"BSD-3-Clause"
] | null | null | null |
gino/loader.py
|
p4l1ly/gino
|
bbe63ed841bf989a0f47b6cae64db85b0b606794
|
[
"BSD-3-Clause"
] | null | null | null |
from sqlalchemy import select
from sqlalchemy.schema import Column
from .declarative import Model
class Loader:
@classmethod
def get(cls, value):
from .crud import Alias
if isinstance(value, Loader):
rv = value
elif isinstance(value, type) and issubclass(value, Model):
rv = ModelLoader(value)
elif isinstance(value, Alias):
rv = AliasLoader(value)
elif isinstance(value, Column):
rv = ColumnLoader(value)
elif isinstance(value, tuple):
rv = TupleLoader(value)
elif callable(value):
rv = CallableLoader(value)
else:
rv = ValueLoader(value)
return rv
@property
def query(self):
rv = select(self.get_columns())
from_clause = self.get_from()
if from_clause is not None:
rv = rv.select_from(from_clause)
return rv.execution_options(loader=self)
def do_load(self, row, context):
raise NotImplementedError
def get_columns(self):
return []
def get_from(self):
return None
def __getattr__(self, item):
return getattr(self.query, item)
class ModelLoader(Loader):
def __init__(self, model, *column_names, **extras):
self.model = model
self._distinct = None
if column_names:
self.columns = [getattr(model, name) for name in column_names]
else:
self.columns = model
self.extras = dict((key, self.get(value))
for key, value in extras.items())
self.on_clause = None
def _do_load(self, row):
rv = self.model()
for c in self.columns:
if c in row:
rv.__values__[c.name] = row[c]
return rv
def do_load(self, row, context):
distinct = True
if self._distinct:
if context is None:
context = {}
ctx = context.setdefault(self._distinct, {})
key = tuple(row[col] for col in self._distinct)
if key == (None,) * len(key):
return None, None
rv = ctx.get(key)
if rv is None:
rv = self._do_load(row)
ctx[key] = rv
else:
distinct = False
else:
rv = self._do_load(row)
for key, value in self.extras.items():
value, distinct_ = value.do_load(row, context)
if distinct_ is not None:
setattr(rv, key, value)
return rv, distinct
def get_columns(self):
yield from self.columns
for subloader in self.extras.values():
yield from subloader.get_columns()
def get_from(self):
rv = self.model
for key, subloader in self.extras.items():
from_clause = subloader.get_from()
if from_clause is not None:
rv = rv.outerjoin(from_clause,
getattr(subloader, 'on_clause', None))
return rv
def load(self, *column_names, **extras):
if column_names:
self.columns = [getattr(self.model, name) for name in column_names]
self.extras.update((key, self.get(value))
for key, value in extras.items())
return self
def on(self, on_clause):
self.on_clause = on_clause
return self
def distinct(self, *columns):
self._distinct = columns
return self
class AliasLoader(ModelLoader):
def __init__(self, alias, *column_names, **extras):
super().__init__(alias, *column_names, **extras)
class ColumnLoader(Loader):
def __init__(self, column):
self.column = column
def do_load(self, row, context):
return row[self.column], True
class TupleLoader(Loader):
def __init__(self, values):
self.loaders = (self.get(value) for value in values)
def do_load(self, row, context):
return tuple(loader.do_load(row, context)[0]
for loader in self.loaders), True
class CallableLoader(Loader):
def __init__(self, func):
self.func = func
def do_load(self, row, context):
return self.func(row, context), True
class ValueLoader(Loader):
def __init__(self, value):
self.value = value
def do_load(self, row, context):
return self.value, True
| 28.063291
| 79
| 0.570591
| 525
| 4,434
| 4.655238
| 0.152381
| 0.027005
| 0.025777
| 0.037234
| 0.195581
| 0.176759
| 0.13257
| 0.085106
| 0.058101
| 0.058101
| 0
| 0.000339
| 0.334687
| 4,434
| 157
| 80
| 28.242038
| 0.828136
| 0
| 0
| 0.225806
| 0
| 0
| 0.00203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185484
| false
| 0
| 0.032258
| 0.056452
| 0.403226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9862396c2189c4e0deacb6232ab6ab3fc808e2
| 5,999
|
py
|
Python
|
lib/ioe_pot.py
|
ifurusato/ros
|
77b1361e78f68f00ba2d3e3db908bb5ce0f973f5
|
[
"MIT"
] | 9
|
2020-10-12T08:49:55.000Z
|
2021-07-23T14:20:05.000Z
|
lib/ioe_pot.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 12
|
2020-07-22T19:08:58.000Z
|
2022-02-03T03:17:03.000Z
|
lib/ioe_pot.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 3
|
2020-07-19T20:43:19.000Z
|
2022-03-02T09:15:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-09-19
# modified: 2020-09-19
#
import sys, colorsys
import ioexpander as io
from colorama import init, Fore, Style
init()
from lib.logger import Logger
# ..............................................................................
class Potentiometer(object):
'''
Configures an IO Expander Potentiometer breakout, returning an analog
value scaled to a specified range. For a center-zero pot simply
specify the minimum value as (-1.0 * out_max).
'''
def __init__(self, config, level):
super().__init__()
self._log = Logger('ioe', level)
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('ioe_potentiometer')
# 0x18 for IO Expander, 0x0E for the potentiometer breakout
# self._i2c_addr = 0x0E
self._i2c_addr = _config.get('i2c_address')
self._pin_red = _config.get('pin_red')
self._pin_green = _config.get('pin_green')
self._pin_blue = _config.get('pin_blue')
self._log.info("pins: red: {}; green: {}; blue: {}".format(self._pin_red, self._pin_green, self._pin_blue))
self._pot_enc_a = 12
self._pot_enc_b = 3
self._pot_enc_c = 11
self._max_value = 3.3 # maximum voltage (3.3v supply)
self._brightness = _config.get('brightness') # effectively max fraction of period LED will be on
self._period = int(255 / self._brightness) # add a period large enough to get 0-255 steps at the desired brightness
_in_min = _config.get('in_min') # minimum analog value from IO Expander
_in_max = _config.get('in_max') # maximum analog value from IO Expander
self.set_input_limits(_in_min, _in_max)
_out_min = _config.get('out_min') # minimum scaled output value
_out_max = _config.get('out_max') # maximum scaled output value
self.set_output_limits(_out_min, _out_max)
# now configure IO Expander
self._ioe = io.IOE(i2c_addr=self._i2c_addr)
self._ioe.set_mode(self._pot_enc_a, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_b, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_c, io.ADC)
self._ioe.output(self._pot_enc_a, 1)
self._ioe.output(self._pot_enc_b, 0)
self._ioe.set_pwm_period(self._period)
self._ioe.set_pwm_control(divider=2) # PWM as fast as we can to avoid LED flicker
self._ioe.set_mode(self._pin_red, io.PWM, invert=True)
self._ioe.set_mode(self._pin_green, io.PWM, invert=True)
self._ioe.set_mode(self._pin_blue, io.PWM, invert=True)
self._log.info("running LED with {} brightness steps.".format(int(self._period * self._brightness)))
self._log.info("ready.")
# ..........................................................................
def set_input_limits(self, in_min, in_max):
self._in_min = in_min
self._in_max = in_max
self._log.info('input range:\t{:>5.2f}-{:<5.2f}'.format(self._in_min, self._in_max))
# ..........................................................................
def set_output_limits(self, out_min, out_max):
self._out_min = out_min
self._out_max = out_max
self._log.info('output range:\t{:>5.2f}-{:<5.2f}'.format(self._out_min, self._out_max))
# ..........................................................................
def get_value(self):
value = self._max_value - self._ioe.input(self._pot_enc_c)
self._log.debug(Fore.BLACK + 'value: {:<5.2f}'.format(value))
return value
# ..........................................................................
def set_rgb(self, value):
h = value / self._max_value # time.time() / 10.0
r, g, b = [int(c * self._period * self._brightness) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
self._ioe.output(self._pin_red, r)
self._ioe.output(self._pin_green, g)
self._ioe.output(self._pin_blue, b)
self._log.debug('value: {:<5.2f}; rgb: {},{},{}'.format(value, r, g, b))
# ..........................................................................
def get_scaled_value(self, update_led=True):
'''
Return a scaled value while also updating the RGB LED if the
argument is True (the default).
'''
_value = self.get_value()
if update_led:
self.set_rgb(_value)
return self.scale_value(_value) # as float
# # ..........................................................................
# def x_get_scaled_value(self):
# '''
# (out_max - out_min)(value - in_min)
# f(x) = ----------------------------------- + out_min
# in_max - in_min
# where: a = 0.0, b = 1.0, min = 0, max = 330.
# '''
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# ..........................................................................
def scale_value(self, value):
'''
(out_max - out_min)(value - in_min)
f(x) = ----------------------------------- + out_min
in_max - in_min
where e.g.: a = 0.0, b = 1.0, min = 0, max = 330.
'''
return (( self._out_max - self._out_min ) * ( value - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
#EOF
| 44.437037
| 138
| 0.543091
| 787
| 5,999
| 3.818297
| 0.233799
| 0.028286
| 0.02995
| 0.030283
| 0.300166
| 0.209983
| 0.183028
| 0.175042
| 0.160399
| 0.160399
| 0
| 0.020466
| 0.234372
| 5,999
| 134
| 139
| 44.768657
| 0.633791
| 0.399067
| 0
| 0
| 0
| 0
| 0.08856
| 0.014518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102941
| false
| 0
| 0.058824
| 0
| 0.220588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9978555063ed5f44aba19723290d6745163dd2
| 2,806
|
py
|
Python
|
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
|
LukHad/AccountBook
|
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
|
[
"MIT"
] | null | null | null |
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
|
LukHad/AccountBook
|
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
|
[
"MIT"
] | null | null | null |
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
|
LukHad/AccountBook
|
8da3ebbd2a824efb9d50f7695ceaaa6cf2370cd8
|
[
"MIT"
] | null | null | null |
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
import matplotlib.pyplot as plt
import matplotlib
import datetime
from TransactionBook.model.Filter import Filter
from datetime import datetime
from kivy.uix.popup import Popup
from kivy.properties import NumericProperty, ReferenceListProperty
from kivy.uix.checkbox import CheckBox
from kivy.core.window import Window
class MultiSelectPopUp(Popup):
pHint_x = NumericProperty(0.7)
pHint_y = NumericProperty(0.7)
pHint = ReferenceListProperty(pHint_x, pHint_y)
def __init__(self, title, option_list, option_init=None, callback=None, multiselect=True, **kwargs):
super().__init__(**kwargs)
self.title = title
self.callback = callback
self.main_layout = AnchorLayout()
if option_init is None:
option_init = [True] * len(option_list)
self.grid = GridLayout(cols=1)
self.opt_boxes = []
self.labels = []
for i, opt in enumerate(option_list):
box = BoxLayout(orientation='horizontal')
check_box = CheckBox(active=option_init[i])
if not multiselect:
check_box.group = "Single_Select_Only_Group"
label = Label(text=str(opt))
self.opt_boxes.append(check_box)
self.labels.append(label)
box.add_widget(check_box)
box.add_widget(label)
self.grid.add_widget(box)
cancel_button = Button(text="Cancel")
cancel_button.bind(on_press=self.cancel_callback)
ok_button = Button(text="Ok")
ok_button.bind(on_press=self.ok_callback)
box = BoxLayout(orientation='horizontal')
box.add_widget(cancel_button)
box.add_widget(ok_button)
self.grid.add_widget(box)
self.main_layout.add_widget(self.grid)
self.content = self.main_layout
self.size_hint = self.pHint
Window.release_all_keyboards()
self.open()
def ok_callback(self, _):
selection = []
for i, check_box in enumerate(self.opt_boxes):
if check_box.active:
selection.append(self.labels[i].text)
self.callback(selection)
self.dismiss()
def cancel_callback(self, _):
self.dismiss()
if __name__ == "__main__":
from kivy.base import runTouchApp
def cb(list_of_selection):
print(list_of_selection)
c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True])
runTouchApp(c)
| 35.075
| 125
| 0.679259
| 349
| 2,806
| 5.252149
| 0.297994
| 0.052373
| 0.048009
| 0.024004
| 0.044735
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003688
| 0.227014
| 2,806
| 80
| 126
| 35.075
| 0.841402
| 0
| 0
| 0.086957
| 0
| 0
| 0.028144
| 0.00855
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.246377
| 0
| 0.362319
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9ada50ee04b4224d0c5731fe46fe28317d335c
| 19,192
|
py
|
Python
|
lib/tuner_interface.py
|
jefflundberg/locast2plex
|
3ab747a13c47888507c08f17d0afacad09894019
|
[
"MIT"
] | null | null | null |
lib/tuner_interface.py
|
jefflundberg/locast2plex
|
3ab747a13c47888507c08f17d0afacad09894019
|
[
"MIT"
] | null | null | null |
lib/tuner_interface.py
|
jefflundberg/locast2plex
|
3ab747a13c47888507c08f17d0afacad09894019
|
[
"MIT"
] | null | null | null |
import subprocess
import threading
import time
import errno
import socket
import urllib
import pathlib
from io import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
import lib.stations as stations
import lib.epg2xml as epg2xml
import lib.channels_m3u as channels_m3u
from lib.templates import templates
# with help from https://www.acmesystems.it/python_http
# and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler
class PlexHttpHandler(BaseHTTPRequestHandler):
# using class variables since this should only be set once
config = None
hdhr_station_scan = False
rmg_station_scans = []
local_locast = None
location = None
def do_GET(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
# paths and logic mostly pulled from telly:routes.go: https://github.com/tellytv/telly
if (contentPath == '/') and (not self.config['main']['use_old_plex_interface']):
self.do_response(200,
'application/xml',
templates['xmlRmgIdentification'].format(self.config['main']['reporting_friendly_name']))
elif (contentPath == '/') or (contentPath == '/device.xml'):
templateName = 'xmlDiscover'
if self.config['main']['use_old_plex_interface']:
templateName = 'xmlDiscoverOld'
self.do_response(200,
'application/xml',
templates[templateName].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/discover.json':
self.do_response(200,
'application/json',
templates['jsonDiscover'].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['reporting_firmware_name'],
self.config['main']['tuner_count'],
self.config['main']['reporting_firmware_ver'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/lineup_status.json':
if self.hdhr_station_scan:
returnJSON = templates['jsonLineupStatus']
else:
returnJSON = templates['jsonLineupComplete'].replace("Antenna", self.config['main']['tuner_type'])
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.json': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnJSON = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
returnJSON = returnJSON + templates['jsonLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
if (index + 1) != len(station_list):
returnJSON = returnJSON + ','
returnJSON = "[" + returnJSON + "]"
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.xml': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnXML = ''
for list_key in station_list:
sid = str(list_key)
returnXML = returnXML + templates['xmlLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
returnXML = "<Lineup>" + returnXML + "</Lineup>"
self.do_response(200, 'application/xml', returnXML)
elif contentPath.startswith('/watch'):
self.do_tuning(contentPath.replace('/watch/', ''))
elif contentPath.startswith('/auto/v'):
self.do_tuning(contentPath.replace('/auto/v', ''))
elif ((contentPath.startswith('/devices/' + self.config['main']['uuid'] + '/media/')) and
(not self.config['main']['use_old_plex_interface'])):
channel_no = contentPath.replace('/devices/' + self.config['main']['uuid'] + '/media/', '')
channel_no = urllib.parse.unquote(channel_no).replace('id://', '').replace('/', '')
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
for sid in station_list:
if station_list[sid]['channel'] == channel_no:
break
self.do_tuning(sid)
elif contentPath == '/xmltv.xml':
self.do_response(200, 'application/xml', epg2xml.get_epg(self.config, self.location))
elif contentPath == '/channels.m3u':
self.do_response(200, 'application/vnd.apple.mpegurl', channels_m3u.get_channels_m3u(self.config, self.location, base_url))
elif contentPath == '/debug.json':
self.do_response(200, 'application/json')
elif ((contentPath == '/devices/' + self.config['main']['uuid']) and
(not self.config['main']['use_old_plex_interface'])):
tuner_list = ""
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
tuner_list = tuner_list + templates['xmlRmgTunerIdle'].format(str(index))
elif scan_status == 'Scan':
tuner_list = tuner_list + templates['xmlRmgTunerScanning'].format(str(index))
else:
# otherwise, we're streaming, and the value will be the channel triplet
formatted_xml = templates['xmlRmgTunerStreaming'].format(str(index), scan_status)
tuner_list = tuner_list + formatted_xml
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceIdentity'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url,
tuner_list))
elif((contentPath == '/devices/' + self.config['main']['uuid'] + '/channels') and
(not self.config['main']['use_old_plex_interface'])):
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
channelXML = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
tmpXML = templates['xmlRmgDeviceChannelItem'].format(station_list[sid]['channel'],
station_list[sid]['friendlyName'])
channelXML = channelXML + tmpXML
self.do_response(200, 'application/xml', templates['xmlRmgDeviceChannels'].format(index + 1, channelXML))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scanners') and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200, 'application/xml', templates['xmlRmgScanProviders'].format(self.location['city']))
else:
print("Unknown request to " + contentPath)
self.do_response(501, 'text/html', templates['htmlError'].format('501 - Not Implemented'))
return
def do_POST(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if contentPath == '/lineup.post':
if queryData['scan'] == 'start':
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200, 'text/html')
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
elif queryData['scan'] == 'abort':
self.do_response(200, 'text/html')
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown scan command " + queryData['scan'])
self.do_response(400, 'text/html', templates['htmlError'].format(queryData['scan'] + ' is not a valid scan command'))
elif ((contentPath.startswith('/devices/discover') or contentPath.startswith('/devices/probe')) and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceDiscover'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200,
'application/xml',
templates['xmlRmgScanStatus'])
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.local_locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown request to " + contentPath)
return
def do_DELETE(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
def do_tuning(self, sid):
channelUri = self.local_locast.get_station_stream_uri(sid)
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
tuner_found = False
# keep track of how many tuners we can use at a time
for index, scan_status in enumerate(self.rmg_station_scans):
# the first idle tuner gets it
if scan_status == 'Idle':
self.rmg_station_scans[index] = station_list[sid]['channel']
tuner_found = True
break
if tuner_found:
self.send_response(200)
self.send_header('Content-type', 'video/mpeg; codecs="avc1.4D401E')
self.end_headers()
ffmpeg_command = [self.config['main']['ffmpeg_path'],
"-i", channelUri,
"-c:v", "copy",
"-c:a", "copy",
"-f", "mpegts",
"-nostats", "-hide_banner",
"-loglevel", "warning",
"pipe:1"]
ffmpeg_proc = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)
# get initial videodata. if that works, then keep grabbing it
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
while True:
if not videoData:
break
else:
# from https://stackoverflow.com/questions/9932332
try:
self.wfile.write(videoData)
time.sleep(0.1)
except IOError as e:
# Check we hit a broken pipe when trying to write back to the client
if e.errno in [errno.EPIPE, errno.ECONNABORTED, errno.ECONNRESET, errno.ECONNREFUSED]:
break
else:
raise
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
# Send SIGTERM to shutdown ffmpeg
ffmpeg_proc.terminate()
try:
# ffmpeg writes a bit of data out to stderr after it terminates,
# need to read any hanging data to prevent a zombie process.
ffmpeg_proc.communicate()
except ValueError:
print("Connection Closed")
self.rmg_station_scans[index] = 'Idle'
else:
self.send_response(400, 'All tuners already in use.')
self.send_header('Content-type', 'text/html')
self.end_headers()
reply_str = templates['htmlError'].format('All tuners already in use.')
self.wfile.write(reply_str.encode('utf-8'))
def do_response(self, code, mime, reply_str):
self.send_response(code)
self.send_header('Content-type', mime)
self.end_headers()
if reply_str:
self.wfile.write(reply_str.encode('utf-8'))
# mostly from https://github.com/ZeWaren/python-upnp-ssdp-example
# and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port
class PlexHttpServer(threading.Thread):
def __init__(self, serverSocket, config, locast_service, location):
threading.Thread.__init__(self)
PlexHttpHandler.config = config
self.bind_ip = config["main"]["bind_ip"]
self.bind_port = config["main"]["bind_port"]
PlexHttpHandler.stations = stations
PlexHttpHandler.local_locast = locast_service
PlexHttpHandler.location = location
# init station scans
tmp_rmg_scans = []
for x in range(int(config['main']['tuner_count'])):
tmp_rmg_scans.append('Idle')
PlexHttpHandler.rmg_station_scans = tmp_rmg_scans
self.socket = serverSocket
self.daemon = True
self.start()
def run(self):
httpd = HTTPServer((self.bind_ip, int(self.bind_port)), PlexHttpHandler, False)
httpd.socket = self.socket
httpd.server_bind = self.server_close = lambda self: None
httpd.serve_forever()
def start(config, locast, location):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((config["main"]['bind_ip'], int(config["main"]['bind_port'])))
serverSocket.listen(int(config["main"]["concurrent_listeners"]))
print("Now listening for requests.")
for i in range(int(config["main"]["concurrent_listeners"])):
PlexHttpServer(serverSocket, config, locast, location)
| 43.12809
| 169
| 0.546321
| 1,868
| 19,192
| 5.445931
| 0.190578
| 0.052099
| 0.060552
| 0.026737
| 0.572594
| 0.518333
| 0.493758
| 0.446378
| 0.433599
| 0.412956
| 0
| 0.010612
| 0.33712
| 19,192
| 444
| 170
| 43.225225
| 0.789027
| 0.056534
| 0
| 0.474026
| 0
| 0
| 0.135914
| 0.022671
| 0
| 0
| 0
| 0.002252
| 0
| 1
| 0.025974
| false
| 0
| 0.042208
| 0
| 0.097403
| 0.016234
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9cd2106529aad0ea2a1405ec139e1af2cab3e4
| 1,130
|
py
|
Python
|
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
|
dcs3spp/cookiecutter-django-api
|
d575dda07930743c05a27eb968489867831d97de
|
[
"Apache-1.1"
] | null | null | null |
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
|
dcs3spp/cookiecutter-django-api
|
d575dda07930743c05a27eb968489867831d97de
|
[
"Apache-1.1"
] | null | null | null |
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py
|
dcs3spp/cookiecutter-django-api
|
d575dda07930743c05a27eb968489867831d97de
|
[
"Apache-1.1"
] | null | null | null |
from django import template
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
@login_required(login_url="/login/")
def index(request):
context = {}
context["segment"] = "index"
html_template = loader.get_template("index.html")
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split("/")[-1]
context["segment"] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template("page-404.html")
return HttpResponse(html_template.render(context, request))
except: # noqa: E722
html_template = loader.get_template("page-500.html")
return HttpResponse(html_template.render(context, request))
| 35.3125
| 71
| 0.718584
| 137
| 1,130
| 5.781022
| 0.343066
| 0.121212
| 0.090909
| 0.106061
| 0.512626
| 0.439394
| 0.282828
| 0.282828
| 0
| 0
| 0
| 0.010741
| 0.176106
| 1,130
| 31
| 72
| 36.451613
| 0.839957
| 0.09646
| 0
| 0.333333
| 0
| 0
| 0.06883
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9d8f1b16e1dbb065ddd8280ce1c889563a6417
| 4,831
|
py
|
Python
|
JupyterHTMLSlides/core.py
|
williamegomezo/JupyterSlides
|
403fe15e360eb1d79bf813b923eb569a81ab0934
|
[
"MIT"
] | 1
|
2019-07-26T20:59:47.000Z
|
2019-07-26T20:59:47.000Z
|
JupyterHTMLSlides/core.py
|
williamegomezo/JupyterSlides
|
403fe15e360eb1d79bf813b923eb569a81ab0934
|
[
"MIT"
] | null | null | null |
JupyterHTMLSlides/core.py
|
williamegomezo/JupyterSlides
|
403fe15e360eb1d79bf813b923eb569a81ab0934
|
[
"MIT"
] | null | null | null |
import random
import string
import os
from IPython.display import display, HTML
from .utils import html_loader
from .utils import get_content
from jinja2 import Template
class JupyterSlides:
def __init__(
self,
content_path='./content.yaml',
table_contents=False
):
self.set_base_dirs()
self.set_source_dirs()
self.content = get_content(content_path)
self.render_init_templates()
if table_contents:
self.render_table_contents()
def set_base_dirs(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.base_template_dir = f'{self.module_path}/src/templates'
self.base_css_dir = f'{self.module_path}/src/assets/css'
self.base_js_dir = f'{self.module_path}/src/js'
def set_source_dirs(self):
self.called_from_path = os.getcwd()
folders = self.called_from_path.split('/')
self.source_path = '/'.join(folders[:folders.index('talks')])
self.template_dir = f'{self.source_path}/src/templates'
self.css_dir = f'{self.source_path}/src/css'
self.js_dir = f'{self.source_path}/src/js'
def render_init_templates(self):
self.render(
template='init',
data={'dir': self.module_path},
template_dir=self.base_template_dir
)
if os.path.isfile(f'{self.template_dir}/init.html'):
self.render(
template=f'init',
data=self.content.get('init_vars', {})
)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
def render_table_contents(self):
if os.path.isfile(f'{self.template_dir}/table-contents.html'):
contents_template_dir = self.template_dir
else:
contents_template_dir = self.base_template_dir
self.render(
template='table-contents',
data=self.generate_table_contents(),
template_dir=contents_template_dir,
render_type='slide'
)
def parse_template(self, template=None, data={}, template_dir=None, render_type=None):
if not template_dir:
if os.path.isfile(f'{self.template_dir}/{template}.html'):
html = html_loader(f'file:{self.template_dir}/{template}.html')
else:
template = 'basic-slide'
html = html_loader(f'file:{self.base_template_dir}/{template}.html')
else:
if not os.path.isfile(f'{template_dir}/{template}.html'):
template = 'basic-slide'
template_dir = self.base_template_dir
html = html_loader(
f'file:{template_dir}/{template}.html')
if render_type == 'slide':
html = '<div id="{{ data["slide_id"] }}" class="slide-container">' + \
html + '</div>'
tm = Template(html)
return tm.render(data=data)
def render(self, template=None, data={}, navigation=False, template_dir=None, render_type=None):
html = self.parse_template(
template=template,
data=data,
template_dir=template_dir,
render_type=render_type
)
if navigation:
navigation_template = self.parse_template(
template='navigation',
template_dir=template_dir
)
html += navigation_template
display(HTML(html))
def render_content(self, key):
data = self.content.get(key)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
if data.get('slides'):
for el in data.get('slides'):
template = el.get('template')
self.render(template=template, data=el, render_type='slide')
@staticmethod
def randomUUID(stringLength=20):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def generate_table_contents(self):
table = {}
items = []
for _, item in self.content.items():
for sub_item in item['slides']:
sub_item['slide_id'] = \
str(item['indice']) + '.' + str(sub_item['indice']) +\
sub_item['content_title']
item['slide_id'] = item['slides'][0]['slide_id']
items.append(item)
table['title'] = 'Table of Contents'
table['eyebrow'] = 'Table of Contents'
table['items'] = items
return table
| 33.783217
| 100
| 0.580211
| 556
| 4,831
| 4.818345
| 0.169065
| 0.12318
| 0.041807
| 0.049645
| 0.263531
| 0.222844
| 0.114595
| 0.106756
| 0.095558
| 0.095558
| 0
| 0.001182
| 0.299524
| 4,831
| 142
| 101
| 34.021127
| 0.790485
| 0.00828
| 0
| 0.165289
| 0
| 0
| 0.1551
| 0.094064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082645
| false
| 0
| 0.057851
| 0
| 0.173554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9e11dd86387cdd76e5db9dfd7ce9770e952aef
| 30,203
|
py
|
Python
|
tests/test_wallet.py
|
NickeZ/lightning
|
f376a9c24cc71d139393196dea86b5a39aee7db8
|
[
"MIT"
] | 1
|
2020-05-07T22:28:20.000Z
|
2020-05-07T22:28:20.000Z
|
tests/test_wallet.py
|
satoshinakamoto007/lightning
|
ff968e773074061d6f76cb81c6c61a1047ffaef1
|
[
"MIT"
] | 1
|
2020-05-03T00:56:31.000Z
|
2020-05-03T00:56:31.000Z
|
tests/test_wallet.py
|
satoshinakamoto007/lightning
|
ff968e773074061d6f76cb81c6c61a1047ffaef1
|
[
"MIT"
] | null | null | null |
from decimal import Decimal
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError, Millisatoshi
from utils import (
only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT,
VALGRIND
)
import os
import pytest
import subprocess
import time
import unittest
@unittest.skipIf(TEST_NETWORK != 'regtest', "Test relies on a number of example addresses valid only in regtest")
def test_withdraw(node_factory, bitcoind):
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.rpc.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, 2 * amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
l1.bitcoin.generate_block(1)
sync_blockheight(l1.bitcoin, [l1])
# Check that there are no unconfirmed outputs (change should be confirmed)
for o in l1.rpc.listfunds()['outputs']:
assert o['status'] == 'confirmed'
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 2 * amount
# Now make sure an additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', 2 * amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', 2 * amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', 2 * amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', 2 * amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', 2 * amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', 2 * amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', 2 * amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', 2 * amount)
# Should have 6 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 6
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, 'all')
# Add some funds to withdraw
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
withdrawal = l1.rpc.withdraw(waddr, 2 * amount, utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(withdrawal['tx'])
assert decode['txid'] == withdrawal['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
def test_minconf_withdraw(node_factory, bitcoind):
"""Issue 2518: ensure that ridiculous confirmation levels don't overflow
The number of confirmations is used to compute a maximum height that is to
be accepted. If the current height is smaller than the number of
confirmations we wrap around and just select everything. The fix is to
clamp the maxheight parameter to a positive small number.
"""
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
with pytest.raises(RpcError):
l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999)
def test_addfunds_from_block(node_factory, bitcoind):
"""Send funds to the daemon without telling it explicitly
"""
# Previous runs with same bitcoind can leave funds!
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 0.1)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 10000000
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
# Send all our money to a P2WPKH address this time.
addr = l1.rpc.newaddr("bech32")['bech32']
l1.rpc.withdraw(addr, "all")
bitcoind.generate_block(1)
time.sleep(1)
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_txprepare(node_factory, bitcoind):
"""Test the deprecated old-style:
txprepare {destination} {satoshi} {feerate} {minconf}
"""
amount = 10**4
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
addr = l1.rpc.newaddr()['bech32']
for i in range(7):
l1.fundwallet(10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 7)
# Array type
with pytest.raises(RpcError, match=r'.* should be an amount in satoshis or all, not .*'):
l1.rpc.call('txprepare', [addr, 'slow'])
with pytest.raises(RpcError, match=r'Need set \'satoshi\' field.'):
l1.rpc.call('txprepare', [addr])
with pytest.raises(RpcError, match=r'Could not parse destination address.*'):
l1.rpc.call('txprepare', [Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'normal'])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), None, 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100)])
# Object type
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'destination': addr, 'feerate': 'slow'})
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'satoshi': Millisatoshi(amount * 100), 'feerate': '10perkw', 'minconf': 2})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw', 'minconf': 1})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw'})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100)})
def test_txprepare_multi(node_factory, bitcoind):
amount = 10000000
l1 = node_factory.get_node(random_hsm=True)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = []
for i in range(9):
outputs.append({l1.rpc.newaddr()['bech32']: Millisatoshi(amount * 100)})
prep = l1.rpc.txprepare(outputs=outputs)
l1.rpc.txdiscard(prep['txid'])
def test_txprepare(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare(outputs=[{addr: Millisatoshi(amount * 3 * 1000)}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# 4 inputs, 2 outputs (3 if we have a fee output).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 2 if not chainparams['feeoutput'] else 3
# One output will be correct.
outnum = [i for i, o in enumerate(decode['vout']) if o['value'] == Decimal(amount * 3) / 10**8][0]
for i, o in enumerate(decode['vout']):
if i == outnum:
assert o['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert o['scriptPubKey']['addresses'] == [addr]
else:
assert o['scriptPubKey']['type'] in ['witness_v0_keyhash', 'fee']
# Now prepare one with no change.
prep2 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep2['unsigned_tx'])
assert decode['txid'] == prep2['txid']
# 6 inputs, 1 outputs.
assert len(decode['vin']) == 6
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be paid.
assert decode['vout'][0]['value'] < Decimal(amount * 6) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 6) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# If I cancel the first one, I can get those first 4 outputs.
discard = l1.rpc.txdiscard(prep['txid'])
assert discard['txid'] == prep['txid']
assert discard['unsigned_tx'] == prep['unsigned_tx']
prep3 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep3['unsigned_tx'])
assert decode['txid'] == prep3['txid']
# 4 inputs, 1 outputs.
assert len(decode['vin']) == 4
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 4) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 4) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# Cannot discard twice.
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
# Discard everything, we should now spend all inputs.
l1.rpc.txdiscard(prep2['txid'])
l1.rpc.txdiscard(prep3['txid'])
prep4 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep4['unsigned_tx'])
assert decode['txid'] == prep4['txid']
# 10 inputs, 1 outputs.
assert len(decode['vin']) == 10
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 10) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 10) / 10**8 - Decimal(0.0003)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
l1.rpc.txdiscard(prep4['txid'])
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
prep5 = l1.rpc.txprepare([{addr:
Millisatoshi(amount * 3.5 * 1000)}], utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
# We should have a change output, so this is exact
assert len(decode['vout']) == 3 if chainparams['feeoutput'] else 2
assert decode['vout'][1]['value'] == Decimal(amount * 3.5) / 10**8
assert decode['vout'][1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][1]['scriptPubKey']['addresses'] == [addr]
# Discard prep4 and get all funds again
l1.rpc.txdiscard(prep5['txid'])
with pytest.raises(RpcError, match=r'this destination wants all satoshi. The count of outputs can\'t be more than 1'):
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)},
{addr: 'all'}])
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 500 + 100000)},
{addr: Millisatoshi(amount * 3 * 500 - 100000)}])
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# 4 inputs, 3 outputs(include change).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 4 if chainparams['feeoutput'] else 3
# One output will be correct.
for i in range(3 + chainparams['feeoutput']):
if decode['vout'][i - 1]['value'] == Decimal('0.01500100'):
outnum1 = i - 1
elif decode['vout'][i - 1]['value'] == Decimal('0.01499900'):
outnum2 = i - 1
else:
changenum = i - 1
assert decode['vout'][outnum1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum1]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][outnum2]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum2]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][changenum]['scriptPubKey']['type'] == 'witness_v0_keyhash'
def test_txsend(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)}])
out = l1.rpc.txsend(prep['txid'])
# Cannot discard after send!
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
wait_for(lambda: prep['txid'] in bitcoind.rpc.getrawmempool())
# Signed tx should have same txid
decode = bitcoind.rpc.decoderawtransaction(out['tx'])
assert decode['txid'] == prep['txid']
bitcoind.generate_block(1)
# Change output should appear.
if decode['vout'][0]['value'] == Decimal(amount * 3) / 10**8:
changenum = 1
elif decode['vout'][1]['value'] == Decimal(amount * 3) / 10**8:
changenum = 0
else:
assert False
# Those spent outputs are gone, but change output has arrived.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10 - len(decode['vin']) + 1)
# Change address should appear in listfunds()
assert decode['vout'][changenum]['scriptPubKey']['addresses'][0] in [f['address'] for f in l1.rpc.listfunds()['outputs']]
def test_txprepare_restart(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(may_fail=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# L1 will forget all about it.
l1.restart()
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# This will also work if we simply kill it.
l1.restart(clean=False)
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
# It should have logged this for each output.
for i in decode['vin']:
assert l1.daemon.is_in_log('wallet: reserved output {}/{} reset to available'.format(i['txid'], i['vout']))
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee outputs throw off our output matching logic")
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Tests annotations which are compiled only with experimental features")
def test_transaction_annotations(node_factory, bitcoind):
l1, l2, l3 = node_factory.get_nodes(3)
l1.fundwallet(10**6)
# We should now have a transaction that gave us the funds in the
# transactions table...
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
out = outputs[0]
idx = out['output']
assert(idx in [0, 1] and out['value'] == 10**6)
# ... and it should have an annotation on the output reading 'deposit'
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 1)
tx = txs[0]
output = tx['outputs'][idx]
assert(output['type'] == 'deposit' and output['satoshis'] == '1000000000msat')
# ... and all other output should be change, and have no annotations
types = []
for i, o in enumerate(tx['outputs']):
if i == idx:
continue
if 'type' in o:
types.append(o['type'])
else:
types.append(None)
assert(set([None]) == set(types))
##########################################################################
# Let's now open a channel. The opener should get the funding transaction
# annotated as channel open and deposit.
l1.connect(l2)
fundingtx = l1.rpc.fundchannel(l2.info['id'], 10**5)
# We should have one output available, and it should be unconfirmed
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'unconfirmed')
# It should also match the funding txid:
assert(outputs[0]['txid'] == fundingtx['txid'])
# Confirm the channel and check that the output changed to confirmed
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l1, l2])
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
# We should have 2 transactions, the second one should be the funding tx
# (we are ordering by blockheight and txindex, so that order should be ok)
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 2 and txs[1]['hash'] == fundingtx['txid'])
# Check the annotated types
types = [o['type'] for o in txs[1]['outputs']]
changeidx = 0 if types[0] == 'deposit' else 1
fundidx = 1 - changeidx
assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding')
# And check the channel annotation on the funding output
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and len(peers[0]['channels']) == 1)
scid = peers[0]['channels'][0]['short_channel_id']
assert(txs[1]['outputs'][fundidx]['channel'] == scid)
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsm_secret_encryption(node_factory):
l1 = node_factory.get_node(may_fail=True) # May fail when started without key
password = "reckful\n"
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Test we can encrypt an already-existing and not encrypted hsm_secret
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
id = l1.rpc.getinfo()["id"]
l1.stop()
# Test we cannot start the same wallet without specifying --encrypted-hsm
l1.daemon.opts.pop("encrypted-hsm")
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
# Test we cannot restore the same wallet with another password
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password[2:].encode("utf-8"))
assert(l1.daemon.proc.wait() == 1)
assert(l1.daemon.is_in_log("Wrong password for encrypted hsm_secret."))
# Test we can restore the same wallet with the same password
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert id == l1.rpc.getinfo()["id"]
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsmtool_secret_decryption(node_factory):
l1 = node_factory.get_node()
password = "reckless\n"
hsm_path = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "hsm_secret")
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Encrypt the master seed
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
node_id = l1.rpc.getinfo()["id"]
l1.stop()
# We can't use a wrong password !
cmd_line = ["tools/hsmtool", "decrypt", hsm_path, "A wrong pass"]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(cmd_line)
# Decrypt it with hsmtool
cmd_line[3] = password[:-1]
subprocess.check_call(cmd_line)
# Then test we can now start it without password
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# Test we can encrypt it offline
cmd_line[1] = "encrypt"
subprocess.check_call(cmd_line)
# Now we need to pass the encrypted-hsm startup option
l1.stop()
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
l1.daemon.opts.update({"encrypted-hsm": None})
master_fd, slave_fd = os.openpty()
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# And finally test that we can also decrypt if encrypted with hsmtool
cmd_line[1] = "decrypt"
subprocess.check_call(cmd_line)
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
# this test does a 'listtransactions' on a yet unconfirmed channel
def test_fundchannel_listtransaction(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2)
l1.fundwallet(10**6)
l1.connect(l2)
txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid']
# next call warned about SQL Accessing a null column
# and crashed the daemon for accessing random memory or null
txs = l1.rpc.listtransactions()['transactions']
tx = [t for t in txs if t['hash'] == txid][0]
assert tx['blockheight'] == 0
def test_withdraw_nlocktime(node_factory):
"""
Test that we don't set the nLockTime to 0 for withdrawal transactions.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**4)
addr = l1.rpc.newaddr()["bech32"]
tx = l1.rpc.withdraw(addr, 10**3)["tx"]
nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"]
tip = node_factory.bitcoind.rpc.getblockcount()
assert nlocktime > 0 and nlocktime <= tip
@flaky
@unittest.skipIf(VALGRIND, "A big loop is used to check fuzz.")
def test_withdraw_nlocktime_fuzz(node_factory, bitcoind):
"""
Test that we eventually fuzz nLockTime for withdrawal transactions.
Marked flaky "just in case" as we fuzz from 0 to 100 with a 10%
probability.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**8)
for i in range(100):
addr = l1.rpc.newaddr()["bech32"]
withdraw = l1.rpc.withdraw(addr, 10**3)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.
format(withdraw["txid"]))
decoded = bitcoind.rpc.decoderawtransaction(withdraw["tx"])
tip = node_factory.bitcoind.rpc.getblockcount()
assert decoded["locktime"] > 0
if decoded["locktime"] < tip:
return
else:
raise Exception("No transaction with fuzzed nLockTime !")
| 40.704852
| 130
| 0.65957
| 4,049
| 30,203
| 4.854532
| 0.129662
| 0.028744
| 0.026048
| 0.035409
| 0.61045
| 0.553368
| 0.523555
| 0.474664
| 0.439866
| 0.394129
| 0
| 0.043469
| 0.196437
| 30,203
| 741
| 131
| 40.759784
| 0.766419
| 0.171241
| 0
| 0.474359
| 0
| 0
| 0.194274
| 0.02964
| 0
| 0
| 0
| 0
| 0.196581
| 1
| 0.029915
| false
| 0.021368
| 0.023504
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9ed02f0755897cb2a1b2ac5fabcbb264f6bbee
| 18,025
|
py
|
Python
|
microbepy/plot/mutation_plot.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | 1
|
2019-05-04T00:31:05.000Z
|
2019-05-04T00:31:05.000Z
|
microbepy/plot/mutation_plot.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | null | null | null |
microbepy/plot/mutation_plot.py
|
ScienceStacks/MicrobEPy
|
704435e66c58677bab24f27820458870092924e2
|
[
"MIT"
] | null | null | null |
"""Provides plots of mutations for Isolates and Lines."""
from microbepy.common import constants as cn
from microbepy.common.dataframe_sorter import DataframeSorter
from microbepy.common.isolate import Isolate
from microbepy.common import util
from microbepy.correlation import genome_correlation
from microbepy.data.model_data_provider import ModelDataProvider
from microbepy.data import util_data
from microbepy.plot.mutation_cofraction import MutationCofraction
from microbepy.plot.util_plot import PlotParms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
COLORS = ['red', 'green', 'blue']
SPECIES = {cn.SPECIES_MIX_DVH: "DVH",
cn.SPECIES_MIX_MMP: "MMP",
None: "both"}
FONTSIZE_TITLE = 16
FONTSIZE_LABEL = 8
MAX_LINES = 9
MIN_FRACTION = 0.25
THRESHOLD_FRAC = 0.2
MAX_SIGLVL = 0.01
COLORBAR_MIN = 1.0
COLORBAR_MAX = 4.0
class MutationLinePlot(object):
"""
Plot mutations by occurrences within Lines.
"""
def __init__(self, mutation_column=cn.GGENE_ID, species=None,
is_plot=True):
"""
:param str mutation_column:
:param bool is_plot:
"""
self._mutation_column = mutation_column
self._is_plot = is_plot
self._species = species
self.cofraction = MutationCofraction(species=self._species,
mutation_column=mutation_column)
def plotTransfers(self,
parms=PlotParms(is_initialize=False),
is_unit_fraction = False,
is_cluster_mutations=True):
"""
Does a stacked bar plot of mutation frequency for all transfers.
:param bool is_unit_fraction: round fraction to 1
:param bool is_cluster_mutations: Group similar mutations together
:return pd.DataFrame: row=mutation, col=line + transfer, value is fraction
"""
permitted_mutations = self.cofraction.ordered_mutations
transfers = self.cofraction.transfers
num_transfers = len(transfers)
fig, axes = plt.subplots(nrows=num_transfers, ncols=1)
dfs = []
for idx, transfer in enumerate(transfers):
parms[cn.PLT_YTICKLABELS] = True
if self._species is None:
parms[cn.PLT_TITLE] = "%d" % transfer
else:
parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer)
if idx == 0:
parms[cn.PLT_YLABEL] = True
else:
parms[cn.PLT_YLABEL] = False
if idx < num_transfers - 1:
parms[cn.PLT_LEGEND] = False
parms[cn.PLT_XLABEL] = False
parms[cn.PLT_XTICKLABELS] = False
else:
parms[cn.PLT_LEGEND] = True
parms[cn.PLT_XLABEL] = True
parms[cn.PLT_XTICKLABELS] = True
df = self.plotLine(transfer,
parms=parms, is_plot=False,
ax=axes[idx], permitted_mutations=permitted_mutations,
is_unit_fraction=is_unit_fraction)
df[cn.TRANSFER] = transfer
dfs.append(df)
if self._is_plot:
plt.show()
return pd.concat(dfs)
def plotLine(self, transfer,
parms=PlotParms(is_initialize=False),
is_unit_fraction=False,
is_plot=None, ax=None, permitted_mutations=None):
"""
Does a stacked bar plot of mutation frequency by line
with colors
:params int transfer:
:params PlotParms parms:
:params Axis ax: axis to use in plot
:param list-str permitted_mutations: to use and how they
are ordered if None, then use alphabetical order
:param bool is_unit_fraction: round non-zero fraction to 1
:return pd.DataFrame: row=mutation, col=line, value is fraction
"""
if is_plot is None:
is_plot = self._is_plot
parms.setTrueIfAbsent(cn.PLT_XLABEL)
parms.setTrueIfAbsent(cn.PLT_XTICKLABELS)
#
df_plot = self.cofraction.makeLineDF(
permitted_mutations=permitted_mutations,
transfer=transfer)
if is_unit_fraction:
df_plot = df_plot.applymap(
lambda v: 1 if v> MIN_FRACTION else v)
# Do the plot
if not cn.PLT_FIGSIZE in parms:
parms[cn.PLT_FIGSIZE] = (12, 8)
if ax is None:
ax = df_plot.plot(kind='bar', stacked=True,
figsize=parms[cn.PLT_FIGSIZE], legend=None)
else:
df_plot.plot(kind='bar', stacked=True,
legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE])
ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label
if parms.isFalse(cn.PLT_XTICKLABELS):
labels = ax.get_xticklabels()
new_labels = np.repeat("", len(labels))
ax.set_xticklabels(new_labels)
if parms.isFalse(cn.PLT_YTICKLABELS):
labels = ax.get_yticklabels()
new_labels = np.repeat("", len(labels))
ax.set_yticklabels(new_labels)
if cn.PLT_TITLE in parms:
title = parms[cn.PLT_TITLE]
else:
title = "%s Mutations" % SPECIES[self._species]
xpos = int(len(df_plot)*0.5)
ypos = MAX_LINES - 3
ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE)
ax.set_ylim([0, MAX_LINES])
if parms.isTrue(cn.PLT_YLABEL):
if is_unit_fraction:
label = "No. Lines"
else:
label = "Fraction"
ax.set_ylabel(label , fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_XLABEL):
ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_LEGEND):
ax.legend(loc=(1,2))
#ax.legend()
if is_plot:
plt.show()
return df_plot
def _makeMutationSiglvlMatrix(self,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None, min_fraction=MIN_FRACTION):
"""
Creates a significance level matrix for mutations.
:param int transfer: transfer time for row mutations
:param int other_transfer: transfer time for column mutations
:param float min_fraction: minimum fractional occurrence of
a mutation within a line for it to be considered
:return pd.DataFrame: row index and columns are mutations
"""
def makeDF(transfer):
df_line = self.cofraction.makeLineDF(transfer=transfer)
df_binary = df_line.applymap(
lambda v: 0 if np.isnan(v) else v)
df_binary = df_line.applymap(
lambda v: 1.0 if v > min_fraction else 0)
return df_binary.transpose()
#
if other_transfer is None:
other_transfer = transfer
#
df_binary_rows = makeDF(transfer)
df_binary_columns = makeDF(other_transfer)
df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows,
df_other=df_binary_columns)
return df_matrix
def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
max_siglvl=MAX_SIGLVL):
"""
Constructs a the dataframe used for heatmap.
:param int transfer:
:param float max_siglvl:
:return pd.DataFrame: mutations, mutations,
values are -log10 significance level
"""
df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer,
other_transfer=other_transfer)
sorter = DataframeSorter(df_matrix)
df_sort = sorter.orderBoth()
#
df_transformed = df_sort.applymap(lambda v: np.log10(v))
df_transformed = df_transformed.applymap(lambda v: -v)
ubound = -np.log10(max_siglvl)
df_plot = df_transformed.applymap(
lambda v: np.nan if v < ubound else v)
sorter = DataframeSorter(df_plot)
df_plot = sorter.deleteNanRowsAndColumns()
return df_plot
def plotCofractions(self, is_time_lag=False,
threshold_frac=THRESHOLD_FRAC,
is_difference_frac=False,
is_differenced=False,
is_compress=False,
parms=PlotParms(), **kwargs):
"""
Does a subplots of the fraction of lines in which mutations co-occur.
:param bool is_time_lag: construct time lag subplots
:param bool is_differenced: Computes the difference in
count fractions
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
transfer=transfer, other_transfer=other_transfer,
threshold_frac=threshold_frac)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
return df
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms, heat_range=[0, 1.0], **kwargs)
def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation correlation significance levels.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
return self._plotSiglvlDF(transfer=transfer,
max_siglvl=max_siglvl,
other_transfer=other_transfer)
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
**kwargs)
def _plotTransfers(self, funcDF, is_time_lag,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation mutations over transfers.
:param Function funcDF: has kwargs transfer, other_transfer;
returns a dataframe of mutations as columns and index;
values are used in the heatmap.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
NCOLS = 3
plot_pos = {1:1, 2:3, 3:4, 4:6}
NPLOTS = 6
transfers = self.cofraction.transfers
if is_time_lag:
pairs = [p for p in zip(transfers[:-1], transfers[1:])]
else:
pairs = [p for p in zip(transfers[:-1], transfers[:-1])]
#
# Calculate the column order
df = funcDF(transfer=cn.TRANSFER_1000G,
other_transfer=cn.TRANSFER_1000G)
df = df.fillna(0)
# Set up for plots
nrows = 2 if (len(pairs) == 4) else 3
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
result = {}
for idx, pair in enumerate(pairs):
idx += 1
ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx])
if idx < len(pairs):
is_plot = False
else:
is_plot = True
if idx in [1, 2, 5]:
parms[cn.PLT_XAXISTICKTOP] = True
else:
parms[cn.PLT_XAXISTICKTOP] = False
if idx == 4:
parms[cn.PLT_COLORBAR] = True
else:
parms[cn.PLT_COLORBAR] = False
transfer = pair[0]
other_transfer = pair[1]
df = funcDF(transfer=transfer, other_transfer=other_transfer)
df = df.applymap(lambda v: np.nan if v == 0 else v)
self._plotTransferCompare(df,
transfer=transfer, other_transfer=other_transfer,
ordered_columns=self.cofraction.ordered_mutations,
is_center_colorbar=True,
fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs)
result[pair] = df
return result
def plotSiglvl(self, max_siglvl=MAX_SIGLVL,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_center_colorbar = True,
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence significance
levels.
:param float max_siglvl: maximum significance level
:return pd.DataFrame: columns, rows are mutations
"""
df_plot = self._plotSiglvlDF(transfer=transfer,
other_transfer=other_transfer,
max_siglvl=max_siglvl)
self._plotTransferCompare(df_plot,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
ordered_mutations=self.cofraction.ordered_mutations,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
**kwargs)
return df_plot
def plotCofraction(self,
threshold_frac=THRESHOLD_FRAC,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_difference_frac=False,
is_differenced=False,
is_center_colorbar=True,
is_compress=False,
parms=PlotParms(),
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence fractions.
:param int transfer: Transfer for which plot is done
:param bool is_differenced: Computes the difference in
count fractions
:param bool is_compress: Eliminate rows/columns
with 0 values
:return pd.DataFrame: columns, rows are mutations
"""
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
threshold_frac=threshold_frac,
transfer=transfer, other_transfer=other_transfer,
**kwargs)
df = df.applymap(lambda v: np.nan
if np.abs(v) < threshold_frac else v)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer, **kwargs)
df = df.applymap(lambda v: np.nan if v < threshold_frac else v)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
is_include_missing_mutations = False
else:
is_include_missing_mutations = True
ordered_columns = self.cofraction.ordered_mutations
self._plotTransferCompare(df,
heat_range=[0, 1.0],
ordered_columns=ordered_columns,
parms=parms,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
is_include_missing_mutations=is_include_missing_mutations,
**kwargs)
return df
def _plotTransferCompare(self,
df_plot,
heat_range,
ordered_columns=None,
is_center_colorbar=True,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
ax=None,
fig=None,
is_include_missing_mutations=True,
parms=PlotParms(),
is_plot=None):
"""
Constructs a heatmap comparing values for mutations from two transfers.
:param pd.DataFrame df_plot: index and columns are mutations;
values are plotted on the heatmap
:param list-str ordered_columns: order in which columns appear
:param bool is_center_colorbar: center the colorbar in the plot
:param float, float: values on the heatmap range
:param int transfer:
:param int other_transfer: Allow comparisons across time
:param Matplotlib.Axes ax:
:param PlotParms parms: Parameters for the plot
:param bool is_plot: Overrides constructor plotting directive
:param bool is_include_missing_mutations:
"""
def makeLabel(transfer, column, is_include_column=False):
if is_include_column:
label = "%d-%s" % (transfer, column)
else:
label = "%d" % transfer
return label
def setValue(a_dict, key, default):
if not key in a_dict.keys():
a_dict[key] = default
#
if is_plot is None:
is_plot = self._is_plot
elif not self._is_plot:
is_plot = self._is_plot
if ordered_columns is None:
ordered_columns = list(set(df_plot.columns.tolist()).union(
df_plot.index))
# Do the plot
if not cn.PLT_COLORBAR in parms:
parms[cn.PLT_COLORBAR] = True
if other_transfer is None:
other_transfer = transfer
if ax is None:
if fig is None:
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
ax = fig.add_subplot(1, 1, 1)
# Order the columns
if is_include_missing_mutations:
columns = df_plot.columns.tolist()
missing_columns = set(ordered_columns).difference(columns)
extended_ordered_columns = list(ordered_columns)
extended_ordered_columns.extend(
set(columns).difference(ordered_columns))
for col in missing_columns:
df_plot[col] = np.nan
df_plot.loc[col, :] = np.nan
df_plot = df_plot.reindex(extended_ordered_columns)
df_plot = df_plot[extended_ordered_columns]
rows = df_plot.columns.tolist()
columns = df_plot.columns.tolist()
else:
extended_ordered_columns = ordered_columns
rows = df_plot.index.tolist()
columns = df_plot.columns.tolist()
mutations = df_plot.columns.tolist()
# Set up plot information
parms[cn.PLT_XLABEL] = ""
setValue(parms, cn.PLT_COLORBAR, True)
xpos = 1.05*len(columns)
ypos = -0.05*len(rows)
parms[cn.PLT_XLABEL] = ""
xlabel = makeLabel(other_transfer, self._mutation_column)
parms[cn.PLT_YLABEL] = makeLabel(
transfer, self._mutation_column)
ax.text(xpos, ypos, xlabel, fontsize=parms.fontsize_label)
#
# Construct the plot
plot = ax.pcolor(df_plot, cmap='jet', vmin=heat_range[0],
vmax=heat_range[1])
if parms.isTrue(cn.PLT_COLORBAR):
if is_center_colorbar:
# Colorbar positions: left, bottom, width, height
cbaxes = fig.add_axes([.45, 0.2, 0.01, 0.5])
cb = fig.colorbar(plot, cax = cbaxes, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
else:
cb = fig.colorbar(plot, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
row_labels = df_plot.columns.tolist()
col_labels = df_plot.index.tolist()
if parms.isTrue(cn.PLT_XAXISTICKTOP):
ax.xaxis.tick_top()
ax.set_xticks(np.arange(0.5, len(row_labels)))
ax.set_xticklabels(row_labels, rotation=90,
fontsize=parms.fontsize_label)
ax.set_yticks(np.arange(0.5, len(col_labels)))
ax.set_yticklabels(col_labels,
fontsize=parms.fontsize_label)
#parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
if is_plot:
parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
ylabel = makeLabel(transfer, self._mutation_column)
xpos = -3
ypos = 0.5*len(rows)
ypos = -1
ax.set_ylabel(ylabel, fontsize=parms.fontsize_label,
x=xpos, y=ypos)
#plt.show()
parms.do(is_plot=is_plot)
else:
parms.do(is_plot=is_plot)
| 35.62253
| 78
| 0.676782
| 2,384
| 18,025
| 4.918624
| 0.134648
| 0.017056
| 0.023879
| 0.02439
| 0.409006
| 0.32381
| 0.262664
| 0.218489
| 0.14907
| 0.140884
| 0
| 0.00837
| 0.231123
| 18,025
| 505
| 79
| 35.693069
| 0.837723
| 0.198391
| 0
| 0.349333
| 0
| 0
| 0.00663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042667
| false
| 0
| 0.034667
| 0.002667
| 0.114667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9ed7740bcb98fbae13ca6bc7e08c9cb1a32fd1
| 4,384
|
py
|
Python
|
semantic-segmentation/deeplabv3plus/dataset_utils.py
|
shikisawamura/nnabla-examples
|
baf4e4cc620dedbf4368683325c0fb868676850d
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:25.000Z
|
2020-08-03T12:49:25.000Z
|
semantic-segmentation/deeplabv3plus/dataset_utils.py
|
takuseno/nnabla-examples
|
070d25078ad3d5458744dbfd390cdd926e20e573
|
[
"Apache-2.0"
] | null | null | null |
semantic-segmentation/deeplabv3plus/dataset_utils.py
|
takuseno/nnabla-examples
|
070d25078ad3d5458744dbfd390cdd926e20e573
|
[
"Apache-2.0"
] | 1
|
2020-04-25T06:11:28.000Z
|
2020-04-25T06:11:28.000Z
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from scipy.misc import imread
from args import get_args
import matplotlib.pyplot as plt
def get_color():
# RGB format
return np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [120, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128], [224, 224, 192]])
def encode_label(label):
'''
Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2
'''
h, w, c = label.shape
new_label = np.zeros((h, w, 1), dtype=np.int32)
cls_to_clr_map = get_color()
for i in range(cls_to_clr_map.shape[0]):
#new_label[(label == cls_to_clr_map[i])[:,:,0]] = i
#new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i
print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2)))
if i == 21:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255
else:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i
return new_label
# this method should generate train-image.txt and train-label.txt
def generate_path_files(data_dir, train_file, val_file):
ti = open('train_image.txt', 'w')
tl = open('train_label.txt', 'w')
vi = open('val_image.txt', 'w')
vl = open('val_label.txt', 'w')
rootdir = data_dir
train_text_file = open(train_file, "r")
lines = [line[:-1] for line in train_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
ti.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
tl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
val_text_file = open(val_file, "r")
lines = [line[:-1] for line in val_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
vi.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
vl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
ti.close()
tl.close()
vi.close()
vl.close()
def main():
'''
Arguments:
train-file = txt file containing randomly selected image filenames to be taken as training set.
val-file = txt file containing randomly selected image filenames to be taken as validation set.
data-dir = dataset directory
Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir=""
'''
args = get_args()
data_dir = args.data_dir
if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'):
os.makedirs(data_dir+'SegmentationClass/' + 'encoded/')
for filename in os.listdir(data_dir+'SegmentationClass/'):
if os.path.isdir(data_dir+'SegmentationClass/' + filename):
continue
label = imread(data_dir+'SegmentationClass/' +
filename).astype('float32')
label = encode_label(label)
np.save(data_dir+'SegmentationClass/' + 'encoded/' +
filename.split('.')[0] + '.npy', label)
generate_path_files(args.data_dir, args.train_file, args.val_file)
if __name__ == '__main__':
main()
| 38.79646
| 334
| 0.619297
| 641
| 4,384
| 4.109204
| 0.296412
| 0.055809
| 0.091116
| 0.082384
| 0.353455
| 0.31549
| 0.290812
| 0.290812
| 0.23918
| 0.23918
| 0
| 0.05344
| 0.227418
| 4,384
| 112
| 335
| 39.142857
| 0.72424
| 0.280338
| 0
| 0.16129
| 0
| 0
| 0.155239
| 0.033635
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.064516
| false
| 0
| 0.080645
| 0.016129
| 0.177419
| 0.016129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9edfbe7de3c135419c8254312b876a5177e47f
| 10,044
|
py
|
Python
|
train.py
|
shamilcm/fairseq-py
|
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T07:33:12.000Z
|
2021-04-20T07:33:12.000Z
|
train.py
|
shamilcm/fairseq-py
|
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
|
[
"BSD-3-Clause"
] | null | null | null |
train.py
|
shamilcm/fairseq-py
|
ceb2f1200c9e5b8bf42a1033e7638d3e8586609a
|
[
"BSD-3-Clause"
] | 3
|
2018-04-20T11:00:16.000Z
|
2020-04-25T09:31:14.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import collections
import os
import torch
import math
from fairseq import bleu, data, options, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.multiprocessing_trainer import MultiprocessingTrainer
from fairseq.progress_bar import progress_bar
from fairseq.sequence_generator import SequenceGenerator
def main():
parser = options.get_parser('Trainer')
dataset_args = options.add_dataset_args(parser)
dataset_args.add_argument('--max-tokens', default=0, type=int, metavar='N',
help='maximum number of tokens in a batch')
dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
help='batch size')
dataset_args.add_argument('--test-batch-size', default=32, type=int, metavar='N',
help='batch size for test set')
dataset_args.add_argument('--valid-batch-size', default=32, type=int, metavar='N',
help='batch size for validation set')
dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list ofdata subsets '
' to use for validation (train, valid, valid1,test, test1)')
dataset_args.add_argument('--test-subset', default='test', metavar='SPLIT',
help='comma separated list ofdata subset '
'to use for testing (train, valid, test)')
dataset_args.add_argument('--valid-script', nargs='+', metavar='PATH', help='path to external validation script (optional).')
options.add_optimization_args(parser)
options.add_checkpoint_args(parser)
options.add_model_args(parser)
args = utils.parse_args_and_arch(parser)
print(args)
if args.no_progress_bar:
progress_bar.enabled = False
progress_bar.print_interval = args.log_interval
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
torch.manual_seed(args.seed)
# Setting args.max_tokens to infinity(same as setting to None)
if args.max_tokens == 0:
args.max_tokens = None
# Load dataset
dataset = data.load_with_check(args.data, args.source_lang, args.target_lang)
if args.source_lang is None or args.target_lang is None:
# record inferred languages in args, so that it's saved in checkpoints
args.source_lang, args.target_lang = dataset.src, dataset.dst
print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
for split in dataset.splits:
print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
num_gpus = torch.cuda.device_count()
print('| using {} GPUs (with max tokens per GPU = {})'.format(num_gpus, args.max_tokens))
# Build model
print('| model {}'.format(args.arch))
model = utils.build_model(args, dataset)
criterion = utils.build_criterion(args, dataset)
# Start multiprocessing
trainer = MultiprocessingTrainer(args, model)
# Load the latest checkpoint if one is available
epoch, batch_offset = trainer.load_checkpoint(os.path.join(args.save_dir, args.restore_file))
# Train until the learning rate gets too small
val_loss = None
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch <= max_epoch:
# train for one epoch
train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus)
# evaluate on validate set
for k, subset in enumerate(args.valid_subset.split(',')):
val_loss = validate(args, epoch, trainer, criterion, dataset, subset, num_gpus)
if k == 0:
if not args.no_save:
# save checkpoint
trainer.save_checkpoint(args, epoch, 0, val_loss, validation_script=args.valid_script)
# only use first validation loss to update the learning schedule
lr = trainer.lr_step(val_loss, epoch)
epoch += 1
batch_offset = 0
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
# Generate on test set and compute BLEU score
for beam in [1, 5, 10, 20]:
for subset in args.test_subset.split(','):
scorer = score_test(args, trainer.get_model(), dataset, subset, beam,
cuda_device=(0 if num_gpus > 0 else None))
print('| Test on {} with beam={}: {}'.format(subset, beam, scorer.result_string()))
# Stop multiprocessing
trainer.stop()
def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus):
"""Train the model for one epoch."""
itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size,
test_batch_size=args.test_batch_size,
valid_batch_size=args.valid_batch_size,
num_workers=args.workers,
max_tokens=args.max_tokens, seed=args.seed, epoch=epoch,
max_positions=args.max_positions,
sample_without_replacement=args.sample_without_replacement)
loss_meter = AverageMeter()
bsz_meter = AverageMeter() # sentences per batch
wpb_meter = AverageMeter() # words per batch
wps_meter = TimeMeter() # words per second
clip_meter = AverageMeter() # % of updates clipped
gnorm_meter = AverageMeter() # gradient norm
desc = '| epoch {:03d}'.format(epoch)
lr = trainer.get_lr()
with progress_bar(itr, desc, leave=False) as t:
for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):
loss, grad_norm = trainer.train_step(sample, criterion)
ntokens = sum(s['ntokens'] for s in sample)
src_size = sum(s['src_tokens'].size(0) for s in sample)
loss_meter.update(loss, ntokens)
bsz_meter.update(src_size)
wpb_meter.update(ntokens)
wps_meter.update(ntokens)
clip_meter.update(1 if grad_norm > args.clip_norm else 0)
gnorm_meter.update(grad_norm)
t.set_postfix(collections.OrderedDict([
('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)),
('wps', '{:5d}'.format(round(wps_meter.avg))),
('wpb', '{:5d}'.format(round(wpb_meter.avg))),
('bsz', '{:5d}'.format(round(bsz_meter.avg))),
('lr', lr),
('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),
('gnorm', '{:.4f}'.format(gnorm_meter.avg)),
]))
if i == 0:
# ignore the first mini-batch in words-per-second calculation
wps_meter.reset()
if args.save_interval > 0 and (i + 1) % args.save_interval == 0:
trainer.save_checkpoint(args, epoch, i + 1)
fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}'
fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}'
fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}'
t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg),
round(wps_meter.elapsed_time),
round(wps_meter.avg),
round(wpb_meter.avg),
round(bsz_meter.avg),
lr, clip_meter.avg * 100,
gnorm_meter.avg))
def validate(args, epoch, trainer, criterion, dataset, subset, ngpus):
"""Evaluate the model on the validation set and return the average loss."""
itr = dataset.dataloader(subset, batch_size=None,
max_tokens=args.max_tokens,
max_positions=args.max_positions)
loss_meter = AverageMeter()
desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset)
with progress_bar(itr, desc, leave=False) as t:
for _, sample in data.skip_group_enumerator(t, ngpus):
ntokens = sum(s['ntokens'] for s in sample)
loss = trainer.valid_step(sample, criterion)
loss_meter.update(loss, ntokens)
t.set_postfix(loss='{:.2f}'.format(loss_meter.avg))
val_loss = loss_meter.avg
t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}'
.format(val_loss, math.pow(2, val_loss)))
# update and return the learning rate
return val_loss
def score_test(args, model, dataset, subset, beam, cuda_device):
"""Evaluate the model on the test set and return the BLEU scorer."""
translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam)
if torch.cuda.is_available():
translator.cuda()
scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions)
for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device):
scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu())
return scorer
if __name__ == '__main__':
main()
| 44.052632
| 129
| 0.616587
| 1,268
| 10,044
| 4.72082
| 0.219243
| 0.020047
| 0.01871
| 0.029402
| 0.244571
| 0.150017
| 0.118276
| 0.078851
| 0.055129
| 0.055129
| 0
| 0.010172
| 0.26593
| 10,044
| 227
| 130
| 44.246696
| 0.801709
| 0.107427
| 0
| 0.062893
| 0
| 0.012579
| 0.124958
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025157
| false
| 0
| 0.056604
| 0
| 0.09434
| 0.056604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9f03cac960929d8e8a292c8e92367e90e1a3eb
| 7,311
|
py
|
Python
|
storm_control/sc_library/log_timing.py
|
jeffmoffitt/storm-control
|
522add1e196e0b7964f574481fd90c20a74b575e
|
[
"MIT"
] | null | null | null |
storm_control/sc_library/log_timing.py
|
jeffmoffitt/storm-control
|
522add1e196e0b7964f574481fd90c20a74b575e
|
[
"MIT"
] | null | null | null |
storm_control/sc_library/log_timing.py
|
jeffmoffitt/storm-control
|
522add1e196e0b7964f574481fd90c20a74b575e
|
[
"MIT"
] | 1
|
2020-11-10T06:39:18.000Z
|
2020-11-10T06:39:18.000Z
|
#!/usr/bin/env python
"""
This parses a log file series (i.e. log, log.1, log.2, etc..) and
outputs timing and call frequency information for HAL messages.
Hazen 5/18
"""
from datetime import datetime
import os
pattern = '%Y-%m-%d %H:%M:%S,%f'
class Message(object):
"""
Storage for the timing of a single message.
"""
def __init__(self, m_type = None, source = None, time = None, zero_time = None, **kwds):
super().__init__(**kwds)
self.created_time = None
self.m_type = m_type
self.n_workers = 0
self.processing_time = None
self.queued_time = None
self.source = source
self.temp = self.parseTime(time)
self.created(zero_time)
def created(self, time):
t_time = self.parseTime(time)
self.created_time = (self.temp - t_time).total_seconds()
def getCreatedTime(self):
"""
Returns the time when the message was created relative to first
time in the log file in seconds.
"""
return self.created_time
def getNWorkers(self):
"""
Return the number of workers (QRunnables) that were employed
to process this message.
"""
return self.n_workers
def getProcessingTime(self):
"""
Return time to process in seconds.
"""
return self.processing_time
def getQueuedTime(self):
"""
Return time queued in seconds.
"""
return self.queued_time
def getSource(self):
"""
Returns the source of a message.
"""
return self.source
def getType(self):
"""
Return the message type.
"""
return self.m_type
def incNWorkers(self):
self.n_workers += 1
def isComplete(self):
"""
Returns true if we have all the timing data for this message.
"""
return (self.processing_time != None)
def parseTime(self, time):
return datetime.strptime(time, pattern)
def processed(self, time):
t_time = self.parseTime(time)
self.processing_time = (t_time - self.temp).total_seconds()
def sent(self, time):
t_time = self.parseTime(time)
self.queued_time = (t_time - self.temp).total_seconds()
self.temp = t_time
def getIterable(dict_or_list):
"""
Returns an iterable given a dictionary of a list.
"""
if isinstance(dict_or_list, dict):
iterable = list(dict_or_list.values())
elif isinstance(dict_or_list, list):
iterable = dict_or_list
else:
raise Exception("Unknown type '" + str(type(dict_or_list)) + "'")
return iterable
def groupByMsgType(messages):
"""
Returns a dictionary keyed by message type, with a list of one or
more message objects per message type.
"""
return groupByX(lambda x : x.getType(),
messages)
def groupBySource(messages):
"""
Returns a dictionary keyed by message source, with a list of one or
more message objects per message source.
"""
return groupByX(lambda x : x.getSource(),
messages)
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
# Ignore messages that we don't have all the timing for.
if msg.isComplete() or not ignore_incomplete:
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp
def logTiming(basename, ignore_incomplete = False):
"""
Returns a dictionary of Message objects keyed by their ID number.
"""
zero_time = None
messages = {}
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
fname = basename + ".out" + ext
if not os.path.exists(fname):
print(fname, "not found.")
continue
with open(fname) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:"))
except ValueError:
continue
if zero_time is None:
zero_time = time
# Message queued.
if (command.startswith("queued,")):
[m_id, source, m_type] = command.split(",")[1:]
messages[m_id] = Message(m_type = m_type,
source = source,
time = time,
zero_time = zero_time)
# Message sent.
elif (command.startswith("sent,")):
m_id = command.split(",")[1]
messages[m_id].sent(time)
# Message processed.
elif (command.startswith("processed,")):
m_id = command.split(",")[1]
messages[m_id].processed(time)
elif (command.startswith("worker done,")):
m_id = command.split(",")[1]
messages[m_id].incNWorkers()
# Ignore messages that we don't have all the timing for.
if not ignore_incomplete:
temp = {}
for m_id in messages:
msg = messages[m_id]
if msg.isComplete():
temp[m_id] = msg
return temp
else:
return messages
def processingTime(messages):
"""
Returns the total processing time for a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getProcessingTime()
else:
accum_time += msg.getProcessingTime()
return accum_time
def queuedTime(messages):
"""
Returns the total queued time for a a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getQueuedTime()
else:
accum_time += msg.getQueuedTime()
return accum_time
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <log file>")
exit()
messages = logTiming(sys.argv[1])
groups = groupByMsgType(messages)
print()
print("All messages:")
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total queued time {0:.3f} seconds".format(queuedTime(groups)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
print()
print("Film messages:")
groups = groupByMsgType(groupBySource(messages)["film"])
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
| 27.90458
| 93
| 0.548078
| 843
| 7,311
| 4.637011
| 0.221827
| 0.01407
| 0.015349
| 0.016628
| 0.303914
| 0.271681
| 0.257099
| 0.221796
| 0.174981
| 0.174981
| 0
| 0.007938
| 0.345233
| 7,311
| 261
| 94
| 28.011494
| 0.808648
| 0.169744
| 0
| 0.248276
| 0
| 0
| 0.058793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.02069
| 0.006897
| 0.275862
| 0.075862
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8a9f1f85d541893b6f50e7a4580e2b294f4022fb
| 1,830
|
py
|
Python
|
django_simple_jsonschema/management/commands/check_schema.py
|
38elements/django-simple-jsonschema
|
ab08aaa3453c40a41d443869643113f23eb40db6
|
[
"MIT"
] | 1
|
2017-04-27T20:15:46.000Z
|
2017-04-27T20:15:46.000Z
|
django_simple_jsonschema/management/commands/check_schema.py
|
38elements/django-simple-jsonschema
|
ab08aaa3453c40a41d443869643113f23eb40db6
|
[
"MIT"
] | null | null | null |
django_simple_jsonschema/management/commands/check_schema.py
|
38elements/django-simple-jsonschema
|
ab08aaa3453c40a41d443869643113f23eb40db6
|
[
"MIT"
] | 2
|
2016-02-20T10:53:09.000Z
|
2018-07-12T14:47:01.000Z
|
from django.core.management.base import BaseCommand
from django.utils import termcolors
from jsonschema import Draft4Validator
from jsonschema.exceptions import SchemaError
import json
class Command(BaseCommand):
can_import_settings = True
@property
def _jsonschema_exist(self):
from django.conf import settings
if not hasattr(settings, 'SIMPLE_JSONSCHEMA'):
return False
return True
@property
def _jsonschema_errors(self):
from django.conf import settings
errors = []
schemas = settings.SIMPLE_JSONSCHEMA
for url, schema in schemas.items():
try:
Draft4Validator.check_schema(schema)
except SchemaError as e:
errors.append({
'url': url,
'error': e,
'schema': json.dumps(schema, indent=4, sort_keys=True)
})
return errors
def handle(self, *args, **options):
success = termcolors.make_style(fg='green')
error = termcolors.make_style(fg='red')
if not self._jsonschema_exist:
not_exist = '[' + error('ERROR') + '] SIMPLE_JSONSCHEMA is not exist in settings.'
self.stdout.write(not_exist)
return
errors = self._jsonschema_errors
if len(errors):
for e in errors:
title = '\n[' + error('ERROR') + '] schema of ' + str(e['url']) + ' is invalid.'
self.stdout.write(title)
self.stdout.write('path: ' + str(list(e['error'].path)))
self.stdout.write('message: ' + e['error'].message)
self.stdout.write('schema:\n' + e['schema'] + '\n')
else:
self.stdout.write('[' + success('SUCCESS') + '] All jsonschemas are OK.')
| 35.192308
| 96
| 0.572678
| 196
| 1,830
| 5.25
| 0.382653
| 0.058309
| 0.087464
| 0.048591
| 0.062196
| 0.062196
| 0
| 0
| 0
| 0
| 0
| 0.002394
| 0.315301
| 1,830
| 51
| 97
| 35.882353
| 0.818835
| 0
| 0
| 0.088889
| 0
| 0
| 0.10929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.177778
| 0
| 0.377778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa0f73f3e1949691f35856c47f4d0a99caef5b9
| 4,247
|
py
|
Python
|
lib/interface.py
|
keke185321/combine-copy-
|
de2eba77d8db5c9c1908aac1262590b80c2348ce
|
[
"Apache-2.0"
] | null | null | null |
lib/interface.py
|
keke185321/combine-copy-
|
de2eba77d8db5c9c1908aac1262590b80c2348ce
|
[
"Apache-2.0"
] | null | null | null |
lib/interface.py
|
keke185321/combine-copy-
|
de2eba77d8db5c9c1908aac1262590b80c2348ce
|
[
"Apache-2.0"
] | null | null | null |
import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
| 31.932331
| 82
| 0.533318
| 622
| 4,247
| 3.61254
| 0.327974
| 0.040053
| 0.028482
| 0.025367
| 0.078327
| 0.032933
| 0.020472
| 0.020472
| 0
| 0
| 0
| 0.036128
| 0.309159
| 4,247
| 132
| 83
| 32.174242
| 0.729721
| 0.046621
| 0
| 0.072289
| 0
| 0
| 0.007341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084337
| false
| 0.012048
| 0.036145
| 0.048193
| 0.216867
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa1a1e63a87d2e580e76379c3a2ac6b8f3e051d
| 16,125
|
py
|
Python
|
nltk/tag/brill.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/tag/brill.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | null | null | null |
nltk/tag/brill.py
|
FGDBTKD/nltk
|
384e46e82789c7f47a7fb521ef976f82c3c4c3fb
|
[
"Apache-2.0"
] | 1
|
2019-10-18T08:58:45.000Z
|
2019-10-18T08:58:45.000Z
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Marcus Uneson <[email protected]>
# based on previous (nltk2) version by
# Christopher Maloof, Edward Loper, Steven Bird
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict, Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set(int(tid) for tid in tids)
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, str(tpl)))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
| 37.941176
| 107
| 0.57631
| 1,908
| 16,125
| 4.780398
| 0.207547
| 0.043416
| 0.035522
| 0.02423
| 0.329679
| 0.276614
| 0.255126
| 0.242079
| 0.198224
| 0.182655
| 0
| 0.022901
| 0.276961
| 16,125
| 424
| 108
| 38.03066
| 0.759413
| 0.284155
| 0
| 0.37247
| 0
| 0.008097
| 0.094085
| 0.002066
| 0
| 0
| 0
| 0
| 0.004049
| 1
| 0.080972
| false
| 0
| 0.02834
| 0.012146
| 0.194332
| 0.093117
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa1f2759e7626cdb380e9f05aa634b55bf1bbc2
| 7,812
|
py
|
Python
|
superglue_parsers/wsc.py
|
agentsolaris/xlnn
|
0ab07d1ac526cadc2964379aef0a44927e0618eb
|
[
"Apache-2.0"
] | null | null | null |
superglue_parsers/wsc.py
|
agentsolaris/xlnn
|
0ab07d1ac526cadc2964379aef0a44927e0618eb
|
[
"Apache-2.0"
] | null | null | null |
superglue_parsers/wsc.py
|
agentsolaris/xlnn
|
0ab07d1ac526cadc2964379aef0a44927e0618eb
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
xlnet_tokens = []
xlnet_token_ids = []
xlnet_token_masks = []
xlnet_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
xlnet_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
else:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
xlnet_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
else:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ xlnet_tokens_sub1
+ xlnet_tokens_sub2
+ xlnet_tokens_sub3
+ xlnet_tokens_sub4
+ xlnet_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
xlnet_tokens.append(tokens)
xlnet_token_ids.append(torch.LongTensor(token_ids))
xlnet_token_masks.append(torch.LongTensor(token_masks))
xlnet_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": xlnet_tokens,
"token_ids": xlnet_token_ids,
"token_masks": xlnet_token_masks,
"token_segments": xlnet_token_segments,
},
Y_dict={"labels": labels},
)
| 33.101695
| 88
| 0.592422
| 996
| 7,812
| 4.344378
| 0.13755
| 0.068639
| 0.041599
| 0.031199
| 0.510515
| 0.458516
| 0.403282
| 0.403282
| 0.393113
| 0.373007
| 0
| 0.031467
| 0.300307
| 7,812
| 235
| 89
| 33.242553
| 0.760154
| 0.059268
| 0
| 0.24
| 0
| 0
| 0.056753
| 0
| 0
| 0
| 0
| 0
| 0.011429
| 1
| 0.011429
| false
| 0
| 0.04
| 0
| 0.091429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa22dad95839c5aa4e52f5c6ec5b084424226d6
| 1,534
|
py
|
Python
|
simplimental/simplimental.py
|
TimmyCarbone/simplimental
|
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
|
[
"MIT"
] | 2
|
2015-11-25T15:12:05.000Z
|
2017-06-22T16:36:58.000Z
|
simplimental/simplimental.py
|
TimmyCarbone/simplimental
|
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
|
[
"MIT"
] | null | null | null |
simplimental/simplimental.py
|
TimmyCarbone/simplimental
|
e46a0e63ce33e36b1e4ca3a473ad15d0732614ed
|
[
"MIT"
] | null | null | null |
import re
import json
__all__ = ["Simplimental"]
class Simplimental:
def __init__(self, text="This is not a bad idea"):
self.text = text
with open('simplimental/data/afinn.json') as data_file:
self.dictionary = json.load(data_file)
no_punctunation = re.sub(r"[^a-zA-Z ]+", " ", self.text)
self.tokens = no_punctunation.lower().split(" ")
for t in self.tokens:
if len(t) < 3 and t not in ["no"]:
self.tokens.remove(t)
def negativity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score > 0 else 0
if score < 0:
hits -= score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def positivity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score < 0 else 0
if score > 0:
hits += score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def analyze(self):
negativity = self.negativity()
positivity = self.positivity()
return {
"score": positivity["score"] - negativity["score"],
"comparative": positivity["comparative"] - negativity["comparative"],
}
| 21.605634
| 72
| 0.612777
| 216
| 1,534
| 4.287037
| 0.268519
| 0.118791
| 0.056156
| 0.030238
| 0.50324
| 0.50324
| 0.50324
| 0.50324
| 0.50324
| 0.50324
| 0
| 0.012648
| 0.226858
| 1,534
| 70
| 73
| 21.914286
| 0.768128
| 0
| 0
| 0.462963
| 0
| 0
| 0.1206
| 0.018253
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa2d7e8d015afdc94844a8b1cce4b350015d579
| 3,637
|
py
|
Python
|
Python/Examples/Macros/SettingsAxesOptimization.py
|
archformco/RoboDK-API
|
b3d0cad6a83f505811e2be273453ccb4579324f1
|
[
"MIT"
] | 161
|
2018-03-23T01:27:08.000Z
|
2022-03-23T12:18:35.000Z
|
Python/Examples/Macros/SettingsAxesOptimization.py
|
OxideDevX/RoboDK-API
|
50357c38b2fcf58cf82d9b7bf61021cb900fd358
|
[
"MIT"
] | 26
|
2018-11-19T10:18:58.000Z
|
2022-03-28T18:37:11.000Z
|
Python/Examples/Macros/SettingsAxesOptimization.py
|
OxideDevX/RoboDK-API
|
50357c38b2fcf58cf82d9b7bf61021cb900fd358
|
[
"MIT"
] | 85
|
2018-03-22T19:25:35.000Z
|
2022-03-30T04:46:59.000Z
|
# This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
| 28.414063
| 133
| 0.62854
| 532
| 3,637
| 4.167293
| 0.31015
| 0.021651
| 0.013532
| 0.027064
| 0.204781
| 0.167794
| 0.156969
| 0.156969
| 0.156969
| 0.156969
| 0
| 0.063401
| 0.236734
| 3,637
| 127
| 134
| 28.637795
| 0.735231
| 0.428375
| 0
| 0.225
| 0
| 0
| 0.200098
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025
| 0
| 0.025
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa372fac8202953aac93a2529989a1508f2b506
| 1,072
|
py
|
Python
|
tests/test_grammar.py
|
Vipul97/SLR-Parser
|
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
|
[
"MIT"
] | 5
|
2018-10-30T04:09:46.000Z
|
2020-03-17T04:47:06.000Z
|
tests/test_grammar.py
|
Vipul97/SLR-Parser
|
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
|
[
"MIT"
] | null | null | null |
tests/test_grammar.py
|
Vipul97/SLR-Parser
|
3de5609235d173d29ad9bd9ed7bdfe2a813ab1bd
|
[
"MIT"
] | 5
|
2019-06-16T20:16:46.000Z
|
2020-04-14T06:44:32.000Z
|
from slr_parser.grammar import Grammar
import unittest
class TestGrammar(unittest.TestCase):
def test_grammar(self):
with open('tests/test_grammar.txt') as grammar_file:
self.G = Grammar(grammar_file.read())
self.assertDictEqual(
{'E': {('E', '+', 'T'), ('T',)}, 'T': {('T', '*', 'F'), ('F',)}, 'F': {('(', 'E', ')'), ('id',)}},
self.G.grammar)
self.assertEqual('E', self.G.start)
self.assertSetEqual({'+', '*', '(', ')', 'id'}, self.G.terminals)
self.assertSetEqual({'E', 'T', 'F'}, self.G.nonterminals)
self.assertSetEqual({'+', '*', '(', ')', 'id', 'E', 'T', 'F'}, self.G.symbols)
self.grammar_str = ["""E -> E + T
e -> T
T -> T * F | F
F -> ( E )
F -> id""", """E -> E ^ + T
E -> T
T -> T * F | F
F -> ( E )
F -> id"""]
with self.assertRaises(ValueError):
Grammar(self.grammar_str[0])
with self.assertRaises(ValueError):
Grammar(self.grammar_str[1])
if __name__ == '__main__':
unittest.main()
| 29.777778
| 114
| 0.488806
| 128
| 1,072
| 3.96875
| 0.304688
| 0.027559
| 0.023622
| 0.023622
| 0.301181
| 0.269685
| 0.269685
| 0.255906
| 0.055118
| 0.055118
| 0
| 0.002558
| 0.270522
| 1,072
| 35
| 115
| 30.628571
| 0.647059
| 0
| 0
| 0.214286
| 0
| 0
| 0.157649
| 0.020522
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.035714
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa613f84bb4cdd381d01e4e99ee1eab1597c53c
| 1,732
|
py
|
Python
|
tests/test_merge.py
|
jmerizia/parallel-pytorch
|
d27b2fd145d25f1329a039c99b8895783bfc71e5
|
[
"MIT"
] | null | null | null |
tests/test_merge.py
|
jmerizia/parallel-pytorch
|
d27b2fd145d25f1329a039c99b8895783bfc71e5
|
[
"MIT"
] | null | null | null |
tests/test_merge.py
|
jmerizia/parallel-pytorch
|
d27b2fd145d25f1329a039c99b8895783bfc71e5
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
from mpi4py import MPI
from parallel_pytorch.ops import tensor_merge
from parallel_pytorch.utils import abort_on_exception
@abort_on_exception
def test_1():
worker_shape = [2, 2]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
if comm.Get_rank() == 0:
x = torch.tensor([[0, 1], [4, 5]])
elif comm.Get_rank() == 1:
x = torch.tensor([[2, 3], [6, 7]])
elif comm.Get_rank() == 2:
x = torch.tensor([[8, 9], [12, 13]])
elif comm.Get_rank() == 3:
x = torch.tensor([[10, 11], [14, 15]])
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
if comm.Get_rank() == 0:
e = torch.tensor([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
])
assert torch.allclose(x, e), f'{x} != {e}'
@abort_on_exception
def test_2():
x_shape = [2, 2]
worker_shape = [1, 1]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
volume = np.array(x_shape).prod()
x = torch.arange(volume).view(x_shape)
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
e = torch.tensor([[0, 1], [2, 3]])
assert torch.allclose(x, e), f'{x} != {e}'
def run_all():
test_1()
test_2()
if __name__ == '__main__':
run_all()
| 29.355932
| 103
| 0.561778
| 261
| 1,732
| 3.509579
| 0.241379
| 0.084061
| 0.078603
| 0.061135
| 0.576419
| 0.495633
| 0.495633
| 0.460699
| 0.408297
| 0.408297
| 0
| 0.054575
| 0.2806
| 1,732
| 58
| 104
| 29.862069
| 0.680578
| 0
| 0
| 0.340426
| 0
| 0
| 0.016166
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.06383
| false
| 0
| 0.106383
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa6533a09d6a4b3ba6f06626bf481622c2da357
| 542
|
py
|
Python
|
day07/main.py
|
tebriel/aoc2021
|
65ca19be3ad66dc52eee9ca31cf12306695a24e9
|
[
"Unlicense"
] | null | null | null |
day07/main.py
|
tebriel/aoc2021
|
65ca19be3ad66dc52eee9ca31cf12306695a24e9
|
[
"Unlicense"
] | null | null | null |
day07/main.py
|
tebriel/aoc2021
|
65ca19be3ad66dc52eee9ca31cf12306695a24e9
|
[
"Unlicense"
] | null | null | null |
"""Day 07"""
def process(filename):
with open(filename) as infile:
positions = [int(x) for x in infile.readline().strip().split(',')]
min_x = min(positions)
max_x = max(positions)
costs = {x: 0 for x in range(min_x, max_x + 1)}
for pos in costs.keys():
for crab in positions:
distance = abs(pos - crab)
costs[pos] += ((distance * distance) + distance) // 2
print(f"Day 07: {min(costs.values())}")
if __name__ == '__main__':
process('test.txt')
process('input.txt')
| 25.809524
| 74
| 0.573801
| 75
| 542
| 3.986667
| 0.506667
| 0.033445
| 0.040134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017413
| 0.258303
| 542
| 20
| 75
| 27.1
| 0.726368
| 0.01107
| 0
| 0
| 0
| 0
| 0.103774
| 0.039623
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa6ff7f14bd0c2736eb3afb641dd73452250888
| 1,276
|
py
|
Python
|
src/ceres_infer/utils.py
|
pritchardlabatpsu/cga
|
0a71c672b1348cebc724560643fd908d636fc133
|
[
"MIT"
] | null | null | null |
src/ceres_infer/utils.py
|
pritchardlabatpsu/cga
|
0a71c672b1348cebc724560643fd908d636fc133
|
[
"MIT"
] | null | null | null |
src/ceres_infer/utils.py
|
pritchardlabatpsu/cga
|
0a71c672b1348cebc724560643fd908d636fc133
|
[
"MIT"
] | 1
|
2022-02-08T01:06:20.000Z
|
2022-02-08T01:06:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
utilities
@author: boyangzhao
"""
import pandas as pd
import re
def int2ordinal(n):
# partially based on https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement
if (type(n) is int) or n.isdigit():
if type(n) is not int:
n = int(n)
return "%d%s"%(n,{1:"st",2:"nd",3:"rd"}.get(n if n<20 else n%10,"th"))
else:
return n
def getFeatGene(x, firstOnly = False):
# get gene
if pd.isnull(x):
return ''
r = re.findall('([^,\()]*)\s(\(([^,]*)\)\s)*\[([^,]*)\]',x)
if firstOnly:
return r[0][0]
else:
return [n[0] for n in r]
def getFeatSource(x, firstOnly = False):
# get the data source
if(pd.isnull(x)):
return ''
r = re.findall('[^,\()]*\s(\([^,]*\)\s)*\[([^,]*)\]',x)
if firstOnly:
return [n[1] for n in r][0]
else:
return [n[1] for n in r]
def pd_filter(df, idx):
# filters a pandas data frame, given idx
# this is a safe filter such that if one of the idx is not found, they are ignored
if idx is None:
return df
if type(idx) is not list:
idx = [idx]
idx = [n for n in idx if n in df.index]
return df.loc[idx, :]
| 24.075472
| 96
| 0.530564
| 198
| 1,276
| 3.414141
| 0.434343
| 0.022189
| 0.035503
| 0.031065
| 0.193787
| 0.174556
| 0.174556
| 0.139053
| 0.139053
| 0.139053
| 0
| 0.025219
| 0.285266
| 1,276
| 52
| 97
| 24.538462
| 0.716009
| 0.245298
| 0
| 0.21875
| 0
| 0
| 0.090622
| 0.077977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa76a43878c4baa56da24cd2df4e08dd1f12800
| 4,779
|
py
|
Python
|
MAIN/Screens/Settings/category_2/__init__.py
|
aragubas/fogoso
|
bd24e049ee994410320e87fb3706c95bd8c9801f
|
[
"Apache-2.0"
] | null | null | null |
MAIN/Screens/Settings/category_2/__init__.py
|
aragubas/fogoso
|
bd24e049ee994410320e87fb3706c95bd8c9801f
|
[
"Apache-2.0"
] | null | null | null |
MAIN/Screens/Settings/category_2/__init__.py
|
aragubas/fogoso
|
bd24e049ee994410320e87fb3706c95bd8c9801f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3.7
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -- Imports -- #
from ENGINE import APPDATA as reg
from ENGINE import UTILS as utils
import ENGINE as tge
from Fogoso.MAIN import ClassesUtils as gameObjs
from Fogoso import MAIN as gameMain
import pygame, sys
import importlib
import time
from random import randint
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton
OptionsScreen_NumberFormatting = gameObjs.UpDownButton
ElementsX = 0
ElementsY = 0
def Initialize():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton(0,0,14)
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton(0,0,14)
OptionsScreen_NumberFormatting = gameObjs.UpDownButton(0,0,14)
def Update():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
global ElementsX
global ElementsY
if OptionsScreen_DebugModeEnabled .ButtonState == 2 or OptionsScreen_DebugModeEnabled.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "True")
if OptionsScreen_RandomWindowTitle .ButtonState == 2 or OptionsScreen_RandomWindowTitle.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "True")
if OptionsScreen_NumberFormatting .ButtonState == 2 or OptionsScreen_NumberFormatting.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "True")
OptionsScreen_DebugModeEnabled.Set_X(ElementsX + 20)
OptionsScreen_RandomWindowTitle.Set_X(ElementsX + 20)
OptionsScreen_NumberFormatting.Set_X(ElementsX + 20)
OptionsScreen_DebugModeEnabled.Set_Y(ElementsY + 50)
OptionsScreen_RandomWindowTitle.Set_Y(ElementsY + 75)
OptionsScreen_NumberFormatting.Set_Y(ElementsY + 100)
def Render(DISPLAY):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Render(DISPLAY)
OptionsScreen_RandomWindowTitle.Render(DISPLAY)
OptionsScreen_NumberFormatting.Render(DISPLAY)
# -- Debug Mode -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/debug_mode") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled")), (240, 240, 240), ElementsX + 95, ElementsY + 52, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Random Title -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/random_title") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title")), (240, 240, 240), ElementsX + 95, ElementsY + 77, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Number Formatting -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/number_formatting") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers")), (240, 240, 240), ElementsX + 95, ElementsY + 102, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
def EventUpdate(event):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Update(event)
OptionsScreen_RandomWindowTitle.Update(event)
OptionsScreen_NumberFormatting.Update(event)
| 42.669643
| 306
| 0.765432
| 527
| 4,779
| 6.776091
| 0.26945
| 0.105853
| 0.070568
| 0.090731
| 0.540185
| 0.509941
| 0.462335
| 0.380006
| 0.380006
| 0.239429
| 0
| 0.022555
| 0.146474
| 4,779
| 112
| 307
| 42.669643
| 0.852905
| 0.138523
| 0
| 0.264706
| 0
| 0
| 0.118006
| 0.087222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.132353
| 0
| 0.191176
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aa8401fd27f8fa99c12308b325e2e4f0cfa3068
| 2,872
|
py
|
Python
|
tests/test.py
|
kjanik70/tflearn
|
db5176773299b67a2a75c5889fb2aba7fd0fea8a
|
[
"MIT"
] | 10,882
|
2016-03-31T16:03:11.000Z
|
2022-03-26T03:00:27.000Z
|
tests/test.py
|
min0355/tflearn
|
db5176773299b67a2a75c5889fb2aba7fd0fea8a
|
[
"MIT"
] | 1,079
|
2016-04-02T06:14:16.000Z
|
2022-02-27T10:04:47.000Z
|
tests/test.py
|
min0355/tflearn
|
db5176773299b67a2a75c5889fb2aba7fd0fea8a
|
[
"MIT"
] | 3,014
|
2016-03-31T16:03:26.000Z
|
2022-03-30T20:36:53.000Z
|
'''
This file contains test cases for tflearn
'''
import tensorflow.compat.v1 as tf
import tflearn
import unittest
class TestActivations(unittest.TestCase):
'''
This class contains test cases for the functions in tflearn/activations.py
'''
PLACES = 4 # Number of places to match when testing floating point values
def test_linear(self):
f = tflearn.linear
# Case 1
x = tf.placeholder(tf.float32, shape=())
self.assertEqual(f(x), x)
# Case 2
x = tf.placeholder(tf.int64, shape=())
self.assertEqual(f(x), x)
def test_tanh(self):
f = tflearn.tanh
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}),
0.4621, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}),
-0.2449, places=TestActivations.PLACES)
def test_leaky_relu(self):
f = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}),
1, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}),
-0.2, places=TestActivations.PLACES)
# Case 4
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}),
-1, places=TestActivations.PLACES)
def test_apply_activation(self):
lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.constant(-0.25, tf.float32)
with tf.Session() as sess:
# Case 1: 'linear'
self.assertEqual(
sess.run(tflearn.activation(x, 'linear')),
-0.25)
# Case 2: 'relu'
self.assertEqual(
sess.run(tflearn.activation(x, 'relu')),
0)
# Case 3: 'leaky_relu'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'leaky_relu')),
-0.025, places=TestActivations.PLACES)
# Case 4: 'tanh'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'tanh')),
-0.2449, places=TestActivations.PLACES)
# Case 5: lrelu_02 (callable)
self.assertAlmostEqual(
sess.run(tflearn.activation(x, lrelu_02)),
-0.05, places=TestActivations.PLACES)
if __name__ == "__main__":
unittest.main()
| 30.88172
| 82
| 0.547354
| 344
| 2,872
| 4.488372
| 0.218023
| 0.054404
| 0.129534
| 0.145078
| 0.653497
| 0.533031
| 0.488342
| 0.331606
| 0.331606
| 0.306347
| 0
| 0.04186
| 0.326253
| 2,872
| 93
| 83
| 30.88172
| 0.756072
| 0.11734
| 0
| 0.326923
| 0
| 0
| 0.012846
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 1
| 0.076923
| false
| 0
| 0.057692
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aaa6ef648c6ab0a8f38e3df5ebf0a4f712b233a
| 2,313
|
py
|
Python
|
infrastructure-provisioning/src/general/api/install_libs.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 66
|
2020-10-03T08:36:48.000Z
|
2022-03-20T23:16:20.000Z
|
infrastructure-provisioning/src/general/api/install_libs.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 48
|
2019-02-28T12:11:33.000Z
|
2020-09-15T08:27:08.000Z
|
infrastructure-provisioning/src/general/api/install_libs.py
|
roolrd/incubator-datalab
|
2045207ecd1b381193f1a1ec143cc968716ad989
|
[
"Apache-2.0"
] | 44
|
2019-01-14T10:31:55.000Z
|
2020-09-22T17:53:33.000Z
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import os
import sys
import subprocess
if __name__ == "__main__":
success = True
try:
subprocess.run('cd /root; fab install-libs', shell=True, check=True)
except:
success = False
reply = dict()
reply['request_id'] = os.environ['request_id']
if success:
reply['status'] = 'ok'
else:
reply['status'] = 'err'
reply['response'] = dict()
try:
with open("/root/result.json") as f:
reply['response']['result'] = json.loads(f.read())
except:
reply['response']['result'] = {"error": "Failed to open result.json"}
reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'],
os.environ['project_name'],
os.environ['request_id'])
with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id']), 'w') as response_file:
response_file.write(json.dumps(reply))
try:
subprocess.run('chmod 666 /response/*', shell=True, check=True)
except:
success = False
if not success:
sys.exit(1)
| 35.584615
| 105
| 0.565932
| 267
| 2,313
| 4.820225
| 0.47191
| 0.048951
| 0.037296
| 0.041958
| 0.156954
| 0.156954
| 0.156954
| 0.10101
| 0.10101
| 0.10101
| 0
| 0.007545
| 0.25508
| 2,313
| 65
| 106
| 35.584615
| 0.739408
| 0.400778
| 0
| 0.235294
| 0
| 0
| 0.232064
| 0.043192
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aab4acf40735c2dc3547887c3be02d0b2808eff
| 1,584
|
py
|
Python
|
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
|
GuoSuiming/mindspore
|
48afc4cfa53d970c0b20eedfb46e039db2a133d5
|
[
"Apache-2.0"
] | 55
|
2020-12-17T10:26:06.000Z
|
2022-03-28T07:18:26.000Z
|
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 1
|
2020-12-29T06:46:38.000Z
|
2020-12-29T06:46:38.000Z
|
model_zoo/official/nlp/bert_thor/src/evaluation_config.py
|
forwhat461/mindspore
|
59a277756eb4faad9ac9afcc7fd526e8277d4994
|
[
"Apache-2.0"
] | 14
|
2021-01-29T02:39:47.000Z
|
2022-03-23T05:00:26.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '',
'schema_file': None,
'finetune_ckpt': '',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=8 if not cfg.clue_benchmark else 1,
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| 28.8
| 78
| 0.693813
| 219
| 1,584
| 4.835616
| 0.643836
| 0.056657
| 0.024551
| 0.030217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035823
| 0.171717
| 1,584
| 54
| 79
| 29.333333
| 0.771341
| 0.431818
| 0
| 0
| 0
| 0
| 0.085324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.09375
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aad801ac3abc226337a71ef38e5ff434b1f3490
| 1,052
|
py
|
Python
|
portal/apps/core/management/commands/sync_articleviewedby.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 8
|
2020-12-15T17:11:08.000Z
|
2021-12-13T22:08:33.000Z
|
portal/apps/core/management/commands/sync_articleviewedby.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 28
|
2020-12-15T17:34:03.000Z
|
2022-02-01T04:09:10.000Z
|
portal/apps/core/management/commands/sync_articleviewedby.py
|
Artis-Physis/utopia-cms
|
5cb8d941d0b2df53fddc566a52e9d3baee4a007e
|
[
"BSD-3-Clause"
] | 7
|
2020-12-15T19:59:17.000Z
|
2021-11-24T16:47:06.000Z
|
# -*- coding: utf-8 -*-
# utopia-cms 2020. Aníbal Pacheco.
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from apps import core_articleviewedby_mdb
from core.models import ArticleViewedBy
class Command(BaseCommand):
help = "Moves article viewed by data from mongodb to Django model"
def handle(self, *args, **options):
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
while mdb_view:
try:
avb = ArticleViewedBy.objects.get(article=mdb_view['article'], user=mdb_view['user'])
avb.viewed_at = mdb_view['viewed_at']
avb.save()
except ArticleViewedBy.DoesNotExist:
try:
ArticleViewedBy.objects.create(
article_id=mdb_view['article'], user_id=mdb_view['user'], viewed_at=mdb_view['viewed_at'])
except IntegrityError:
pass
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
| 37.571429
| 114
| 0.640684
| 121
| 1,052
| 5.347107
| 0.46281
| 0.097372
| 0.102009
| 0.080371
| 0.225657
| 0.225657
| 0.15456
| 0.15456
| 0.15456
| 0.15456
| 0
| 0.006477
| 0.26616
| 1,052
| 27
| 115
| 38.962963
| 0.831606
| 0.051331
| 0
| 0.2
| 0
| 0
| 0.097487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.05
| 0.2
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8aad8dc0d7dead55101c7087ad08700bb763b130
| 7,900
|
py
|
Python
|
examples/minkunet.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | 1
|
2019-05-12T00:06:10.000Z
|
2019-05-12T00:06:10.000Z
|
examples/minkunet.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | null | null | null |
examples/minkunet.py
|
dendisuhubdy/MinkowskiEngine
|
a1cdcba68ef925bfefed2fe161f62e1ec78573b9
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.common import data_loader
from examples.resnet import ResNetBase
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels, out_channels, D=3):
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7],
out_channels,
kernel_size=1,
has_bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat((out, out_b3p8))
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat((out, out_b2p4))
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat((out, out_b1p2))
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat((out, out_p1))
out = self.block8(out)
return self.final(out)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
if __name__ == '__main__':
# loss and network
criterion = nn.CrossEntropyLoss()
net = MinkUNet14A(in_channels=3, out_channels=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader(is_classification=False)
input = ME.SparseTensor(feat, coords=coords).to(device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| 30.501931
| 80
| 0.603291
| 1,006
| 7,900
| 4.651093
| 0.191849
| 0.046377
| 0.011541
| 0.011113
| 0.446463
| 0.368241
| 0.290447
| 0.209446
| 0.163283
| 0.142766
| 0
| 0.087391
| 0.277215
| 7,900
| 258
| 81
| 30.620155
| 0.732049
| 0.053797
| 0
| 0.161677
| 0
| 0
| 0.006704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017964
| false
| 0
| 0.041916
| 0
| 0.317365
| 0.011976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ab02ecbf400acde29e043cc50c322067db1b570
| 1,654
|
py
|
Python
|
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
|
Preethinaidu14/greyatom-python-for-data-science
|
5b758dd6123d9fc50031c43771b30d69e366c044
|
[
"MIT"
] | null | null | null |
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
|
Preethinaidu14/greyatom-python-for-data-science
|
5b758dd6123d9fc50031c43771b30d69e366c044
|
[
"MIT"
] | null | null | null |
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py
|
Preethinaidu14/greyatom-python-for-data-science
|
5b758dd6123d9fc50031c43771b30d69e366c044
|
[
"MIT"
] | null | null | null |
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
path
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis = 1)
print(banks)
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks = banks.fillna(bank_mode)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts()
#print(loan_approved_se)
loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts()
print(loan_approved_nse)
Loan_Status = 614
percentage_se = (56/Loan_Status)*100
percentage_nse = (366/Loan_Status)*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12)
print(loan_term.value_counts())
big_loan = [i for i in loan_term if i >= 25]
big_loan_term = len(big_loan)
print(big_loan_term)
#[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
# code ends here
| 19.458824
| 105
| 0.688634
| 239
| 1,654
| 4.527197
| 0.351464
| 0.051756
| 0.077634
| 0.073937
| 0.260628
| 0.212569
| 0.16451
| 0.081331
| 0.081331
| 0
| 0
| 0.015193
| 0.124547
| 1,654
| 84
| 106
| 19.690476
| 0.732044
| 0.255744
| 0
| 0
| 0
| 0
| 0.137531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.103448
| 0
| 0.103448
| 0.241379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ab2d6d56bce4e65f9e2921fdc0ec8fdc7ecb7fb
| 855
|
py
|
Python
|
venv/Lib/site-packages/patsy/test_regressions.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 710
|
2015-01-07T20:08:59.000Z
|
2022-03-08T14:30:13.000Z
|
venv/Lib/site-packages/patsy/test_regressions.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 142
|
2015-01-07T02:20:27.000Z
|
2021-11-15T04:23:02.000Z
|
venv/Lib/site-packages/patsy/test_regressions.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 101
|
2015-01-15T16:35:12.000Z
|
2022-02-19T06:50:02.000Z
|
# This file is part of Patsy
# Copyright (C) 2013 Nathaniel Smith <[email protected]>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
| 34.2
| 78
| 0.645614
| 131
| 855
| 4.145038
| 0.641221
| 0.014733
| 0.022099
| 0.014733
| 0.018416
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045593
| 0.230409
| 855
| 24
| 79
| 35.625
| 0.779635
| 0.375439
| 0
| 0
| 0
| 0
| 0.022814
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ab404c67e6f07e674ae9c5b07f6e6e0e0f914ac
| 7,764
|
py
|
Python
|
skimage/io/_plugins/pil_plugin.py
|
smheidrich/scikit-image
|
e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb
|
[
"BSD-3-Clause"
] | 3
|
2019-02-28T16:05:36.000Z
|
2020-04-03T17:29:07.000Z
|
Lib/site-packages/skimage/io/_plugins/pil_plugin.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | 26
|
2020-03-24T18:07:06.000Z
|
2022-03-12T00:12:27.000Z
|
Lib/site-packages/skimage/io/_plugins/pil_plugin.py
|
caiyongji/Anaconda-py36.5-tensorflow-built-env
|
f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2
|
[
"PSF-2.0"
] | 3
|
2019-12-31T23:21:40.000Z
|
2020-04-03T17:29:08.000Z
|
__all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| 29.861538
| 93
| 0.579341
| 1,034
| 7,764
| 4.263056
| 0.246615
| 0.017695
| 0.02314
| 0.016334
| 0.161071
| 0.106171
| 0.071234
| 0.071234
| 0.071234
| 0.071234
| 0
| 0.014155
| 0.308475
| 7,764
| 259
| 94
| 29.976834
| 0.806854
| 0.328568
| 0
| 0.295455
| 0
| 0
| 0.073508
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 1
| 0.037879
| false
| 0
| 0.022727
| 0
| 0.106061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ab863848d8379f82bfc5f650de33e10615f3285
| 8,132
|
py
|
Python
|
machine.py
|
yukti07/Dell_Hire_hack
|
9422b7aaa0b96292191b4b880c0a8fb772fd1864
|
[
"MIT"
] | null | null | null |
machine.py
|
yukti07/Dell_Hire_hack
|
9422b7aaa0b96292191b4b880c0a8fb772fd1864
|
[
"MIT"
] | null | null | null |
machine.py
|
yukti07/Dell_Hire_hack
|
9422b7aaa0b96292191b4b880c0a8fb772fd1864
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from flask import flash
import numpy as np
def check(X, clf):
# print("TTTTTTTTTTTTThis is XXXXXXXXXXXX")
# print(X)
X = np.array(X)
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
p = clf.predict(X)
t = ()
for x in p:
if x == 0:
a = 'No'
else:
a = 'Yes'
t = t+(a,)
return t
def analyze(df, clf):
feature_importances = pd.DataFrame(clf.feature_importances_, index=['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'],columns=['importance']).sort_values('importance',ascending=False)
feature_importances['x1'] = feature_importances.index
ax = feature_importances.plot.bar(x='x1', y='importance', rot=90)
plt.savefig('templates/graphs/raw/feature_importances.png', frameon=True)
intervals = [x for x in range(0, 22000, 2000)]
categories = ['<'+str(x) for x in range(2000, 22000, 2000)]
df1 = df
df1['Income_Categories'] = pd.cut(df.MonthlyIncome, intervals, labels=categories)
ax = sns.countplot(x="Income_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Monthly Income vs Attrition", xlabel="Income group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/MIvsAttr.png')
intervals = [x for x in range(18,63,3)]
categories = ['<'+str(x) for x in range(21,63,3)]
df1 = df
df1['Age_Categories'] = pd.cut(df.Age, intervals, labels=categories)
ax = sns.countplot(x="Age_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Age vs Attrition", xlabel="Age group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/AgevsAttr.png')
intervals = [x for x in range(0,32,2)]
categories = ['<'+str(x) for x in range(2,32,2)]
df1 = df
df1['Distance_from_home'] = pd.cut(df.DistanceFromHome, intervals, labels=categories)
ax = sns.countplot(x="Distance_from_home", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Distance from home vs Attrition", xlabel="Distance", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/DistanceFromHomevsAttr.png')
ax = sns.countplot(x="PercentSalaryHike", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Salary Hike Percentage vs Attrition", xlabel="Salary Hike Percentage", ylabel="Total")
plt.savefig('templates/graphs/raw/PercentSalaryHikevsAttr.png')
ax = sns.countplot(x="NumCompaniesWorked", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Previously Worked Companies vs Attrition", xlabel="Number Of Previously Worked Companies", ylabel="Total")
plt.savefig('templates/graphs/raw/NPWCvsAttr.png')
intervals = [x for x in range(0,22,2)]
categories = ['<'+str(x) for x in range(2,22,2)]
df1 = df
df1['Current_Role'] = pd.cut(df.YearsInCurrentRole, intervals, labels=categories)
ax = sns.countplot(x="Current_Role", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Years in Current Role vs Attrition", xlabel="Number Of Years in Current Role", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YICRvsAttr.png')
ax = sns.countplot(x="OverTime", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Over Time vs Attrition", xlabel="Over Time", ylabel="Total")
plt.savefig('templates/graphs/raw/OverTimevsAttr.png')
ax = sns.countplot(x="JobRole", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Job Role vs Attrition", xlabel="Job Role", ylabel="Total")
plt.xticks(rotation=70)
plt.savefig('templates/graphs/raw/JobRolevsAttr.png')
intervals = [x for x in range(0,18,2)]
categories = ['<'+str(x) for x in range(2,18,2)]
df1 = df
df1['Promotion'] = pd.cut(df.YearsSinceLastPromotion, intervals, labels=categories)
ax = sns.countplot(x="Promotion", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number of Years since Promotion vs Attrition", xlabel="Number of Years since Promotion", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YSCPvsAttr.png')
ax = sns.countplot(x="MaritalStatus", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Marital Status vs Attrition", xlabel="Marital Status", ylabel="Total")
plt.savefig('templates/graphs/raw/MSvsAttr.png')
def run(data):
df = pd.read_csv('original_dataset.csv')
skills = df['Skills'].tolist()
# print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS")
# print(skills)
df = df.drop(['DailyRate', 'EmployeeCount', 'YearsAtCompany', 'TotalWorkingYears', 'JobLevel', 'HourlyRate', 'MonthlyRate', 'Over18', 'StandardHours', 'EnvironmentSatisfaction', 'JobInvolvement', 'PerformanceRating', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'YearsWithCurrManager'], axis=1)
df = df[['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']]
#print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
#print(skills)
X = df.iloc[:, 1:].values
y = df.iloc[:, 0].values
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
X = X.astype(float)
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40,random_state=0)
clf = RandomForestClassifier(n_estimators=200)
clf.fit(X_train,y_train)
p = clf.predict(X_test)
acc = accuracy_score(y_test,p)*100
flash(acc)
X = [list(elem) for elem in data]
[r.pop(0) for r in X]
#print("####### THIS IS XXXX##########")
#print(X)
att = check(X, clf)
skills = skills[:(len(att)):]
print("LLLLLLLLLLLLLLLENGHT" + str(len(att)) +" " + str(len(skills)))
i = 0
for row in att:
X[i].insert(0, row)
i = i+1
df1 = pd.DataFrame(X)
df1.columns=['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']
analyze(df, clf)
df1.to_csv('dataset1.csv')
return att, skills
| 47.835294
| 405
| 0.684702
| 1,047
| 8,132
| 5.211079
| 0.209169
| 0.100073
| 0.033358
| 0.050403
| 0.577529
| 0.531892
| 0.510264
| 0.427419
| 0.413673
| 0.36217
| 0
| 0.027814
| 0.155558
| 8,132
| 169
| 406
| 48.118343
| 0.76671
| 0.025578
| 0
| 0.273381
| 0
| 0
| 0.28944
| 0.070651
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021583
| false
| 0
| 0.093525
| 0
| 0.129496
| 0.007194
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ab8993b826c4cf13cc7b962623c2d00cc2adcf7
| 6,435
|
py
|
Python
|
TM-GCN-master/experiment_bitcoin_baseline_link_prediction.py
|
OsmanMalik/TM-GCN
|
31b19a538f264f6c30b5503ecefb497ee865b4d7
|
[
"Apache-2.0"
] | 14
|
2020-11-04T17:10:19.000Z
|
2022-03-04T07:48:22.000Z
|
TM-GCN-master/experiment_bitcoin_baseline_link_prediction.py
|
OsmanMalik/TM-GCN
|
31b19a538f264f6c30b5503ecefb497ee865b4d7
|
[
"Apache-2.0"
] | 2
|
2021-09-06T09:38:12.000Z
|
2021-09-06T09:50:52.000Z
|
TensorGCN-master/experiment_bitcoin_baseline_link_prediction.py
|
NaimahmedNesaragi/TM-GCN
|
275d057a7261d8e6b544dad66b7daa7943d11c4f
|
[
"Apache-2.0"
] | 6
|
2021-01-11T23:42:39.000Z
|
2022-01-31T08:37:13.000Z
|
# This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline
# The point of this script is to do link prediction
# Imports and aliases
import pickle
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
import datetime
from scipy.sparse import csr_matrix
import os.path
import embedding_help_functions as ehf
import scipy.io as sio
unsq = t.unsqueeze
sq = t.squeeze
# Settings
alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95]
no_layers = 1
dataset = "OTC" # OTC or Alpha
no_epochs = 1000
mat_f_name = "saved_content_bitcoin_otc.mat"
no_trials = 1
beta1 = 19
beta2 = 19
cutoff = 95
eval_type = "MAP-MRR" # "MAP-MRR" or "F1"
data_loc = "data/Bitcoin_" + dataset + "/"
S_train, S_val, S_test = 95, 20, 20
lr = 0.01
momentum = 0.9
# Load and return relevant data
A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)
# Create features for the nodes
X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False)
# Extract edges and labels from A_labels, and augment with nonexisting edges
# edges, beta
edges = A_labels._indices()
edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff)
# Divide adjacency matrices and labels into training, validation and testing sets
edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False)
if no_trials > 1:
ep_acc_loss_vec = []
for tr in range(no_trials):
for alpha in alpha_vec:
class_weights = t.tensor([alpha, 1.0-alpha])
save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction"
# Create gcn for training
if no_layers == 2:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu")
elif no_layers == 1:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2])
# Train
optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target)
if eval_type == "F1":
ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test)
elif eval_type == "MAP-MRR":
ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test)
for ep in range(no_epochs):
# Compute loss and take step
optimizer.zero_grad()
output_train = gcn()
loss_train = criterion(output_train, target_train[edges_train[0]!=0])
loss_train.backward()
optimizer.step()
# Things that don't require gradient
with t.no_grad():
if ep % 100 == 0:
# Compute stats for training data; no point in doing more often than this
guess_train = t.argmax(output_train, dim=1)
if eval_type == "F1":
precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0])
elif eval_type == "MAP-MRR":
MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0])
# Compute stats for validation data
output_val = gcn(C_val[:-1], X_val[:-1], e_val)
guess_val = t.argmax(output_val, dim=1)
if eval_type == "F1":
precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0])
elif eval_type == "MAP-MRR":
MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0])
loss_val = criterion(output_val, target_val[edges_val[0]!=0])
# Compute stats for test data
output_test = gcn(C_test[:-1], X_test[:-1], e_test)
guess_test = t.argmax(output_test, dim=1)
if eval_type == "F1":
precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0])
elif eval_type == "MAP-MRR":
MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0])
loss_test = criterion(output_test, target_test[edges_test[0]!=0])
# Print
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep)
elif eval_type == "MAP-MRR":
print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train))
print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val))
print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test))
# Store values with results
if eval_type == "F1":
ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test]
elif eval_type == "MAP-MRR":
ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test]
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True)
elif eval_type == "MAP-MRR":
print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train))
print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val))
print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test))
if no_trials == 1:
pickle.dump(ep_acc_loss, open(save_res_fname, "wb"))
print("Results saved for single trial")
else:
ep_acc_loss_vec.append(ep_acc_loss)
if no_trials > 1:
pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb"))
print("Results saved for all trials")
| 45.638298
| 191
| 0.707537
| 1,084
| 6,435
| 3.928044
| 0.208487
| 0.025364
| 0.019023
| 0.026303
| 0.480977
| 0.437764
| 0.403476
| 0.305543
| 0.220291
| 0.193048
| 0
| 0.035998
| 0.158197
| 6,435
| 141
| 192
| 45.638298
| 0.750046
| 0.150894
| 0
| 0.15534
| 0
| 0.029126
| 0.107149
| 0.009557
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.145631
| 0
| 0.145631
| 0.097087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8abb81ca4107a0dafeae1ce248a3690886bc60c3
| 1,960
|
py
|
Python
|
Coding_Part/bob.py
|
qizhu8/CSCI6230-HW02
|
c889c0532db7ff4f25e134937469e5e6181416f0
|
[
"Apache-2.0"
] | null | null | null |
Coding_Part/bob.py
|
qizhu8/CSCI6230-HW02
|
c889c0532db7ff4f25e134937469e5e6181416f0
|
[
"Apache-2.0"
] | null | null | null |
Coding_Part/bob.py
|
qizhu8/CSCI6230-HW02
|
c889c0532db7ff4f25e134937469e5e6181416f0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
from PKC_Classes import NetworkUser, KDC
from DES import DES
from RSA_Class import RSA
import socket
import os
import sys
import threading
import time
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
def reply_conn(conn, addr):
print('Accept new connection from user {0}'.format(addr));
#conn.settimeout(500)
# conn.send(b'Hi, This is bob. Waiting for your sess key')
buf = conn.recv(1024)
while True:
if buf:
receive_packet = bytes.decode(buf).rstrip('\x00')
reply_packet = bob.process_packet(receive_packet)
conn.send(reply_packet.encode())
buf = conn.recv(1024)
else:
time.sleep(0.5)
conn.close()
bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200)
print('bob:', bob.uid)
# socket communication
kdc_host, kdc_port = 'localhost', 9999
bob_host, bob_port = 'localhost', 9200
# talk to kdc for sess key
try:
sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_with_kdc.connect((kdc_host, kdc_port))
print(sock_with_kdc.recv(1024))
# send cipher_key
bob_cipher_key_packet = bob.send_cipher_key()
sock_with_kdc.send(bob_cipher_key_packet.encode())
kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode()
print(kdc_bob_cipher_key_packet)
bob.process_packet(kdc_bob_cipher_key_packet)
except socket.error as msg:
print(msg);
sys.exit(1)
# sock_with_kdc.shutdown(socket.SHUT_WR)
# talk to bob
try:
sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_self.bind((bob_host, bob_port))
sock_self.listen(10)
except socket.error as msg:
print(msg);
sys.exit(1)
while 1:
conn, addr = sock_self.accept()
thread = threading.Thread(target=reply_conn, args=(conn, addr))
thread.start()
# sock_self.close()
| 26.849315
| 69
| 0.694388
| 298
| 1,960
| 4.348993
| 0.38255
| 0.048611
| 0.050926
| 0.069444
| 0.222994
| 0.126543
| 0.126543
| 0.126543
| 0.126543
| 0.058642
| 0
| 0.032663
| 0.187755
| 1,960
| 72
| 70
| 27.222222
| 0.781407
| 0.127551
| 0
| 0.2
| 0
| 0
| 0.051795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.16
| 0
| 0.18
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8abbc734ea1294bef8b90bd4c5b933a5890bb4db
| 10,257
|
py
|
Python
|
proj/scripts/cluster/baselines/triplets_greyscale.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/scripts/cluster/baselines/triplets_greyscale.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
proj/scripts/cluster/baselines/triplets_greyscale.py
|
zqma/IIC
|
9d4e30b51535c6ca381389d9c22ce45be4d11883
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import itertools
import os
import pickle
import sys
from datetime import datetime
import matplotlib
import numpy as np
import torch
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import proj.archs as archs
from proj.utils.cluster.general import config_to_str, get_opt, update_lr
from proj.utils.cluster.baselines.triplets import make_triplets_data, \
triplets_eval, triplets_loss
"""
Triplets.
Makes output distribution same as that of attractor, and different to that
of repeller.
Greyscale version (no sobel).
"""
# Options ----------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--arch", type=str, required=True)
parser.add_argument("--opt", type=str, default="Adam")
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--dataset_root", type=str, required=True)
parser.add_argument("--gt_k", type=int, required=True)
parser.add_argument("--output_k", type=int, required=True)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--lr_schedule", type=int, nargs="+", default=[])
parser.add_argument("--lr_mult", type=float, default=0.1)
parser.add_argument("--num_epochs", type=int, default=1000)
parser.add_argument("--batch_sz", type=int, required=True) # num pairs
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--restart", dest="restart", default=False,
action="store_true")
parser.add_argument("--test_code", dest="test_code", default=False,
action="store_true")
parser.add_argument("--save_freq", type=int, default=10)
parser.add_argument("--kmeans_on_features", default=False,
action="store_true")
# transforms
# used for "positive" sample
parser.add_argument("--demean", dest="demean", default=False,
action="store_true")
parser.add_argument("--per_img_demean", dest="per_img_demean", default=False,
action="store_true")
parser.add_argument("--data_mean", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--data_std", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--crop_orig", dest="crop_orig", default=False,
action="store_true")
parser.add_argument("--crop_other", dest="crop_other", default=False,
action="store_true")
parser.add_argument("--tf1_crop", type=str, default="random") # type name
parser.add_argument("--tf2_crop", type=str, default="random")
parser.add_argument("--tf1_crop_sz", type=int, default=84)
parser.add_argument("--tf2_crop_szs", type=int, nargs="+",
default=[84]) # allow diff crop for imgs_tf
parser.add_argument("--tf3_crop_diff", dest="tf3_crop_diff", default=False,
action="store_true")
parser.add_argument("--tf3_crop_sz", type=int, default=0)
parser.add_argument("--input_sz", type=int, default=96)
parser.add_argument("--rot_val", type=float, default=0.)
parser.add_argument("--always_rot", dest="always_rot", default=False,
action="store_true")
parser.add_argument("--no_jitter", dest="no_jitter", default=False,
action="store_true")
parser.add_argument("--no_flip", dest="no_flip", default=False,
action="store_true")
config = parser.parse_args()
# Fixed settings and checks ----------------------------------------------------
config.in_channels = 1
if config.output_k != config.gt_k:
assert (config.output_k > config.gt_k)
assert (config.kmeans_on_features)
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
config.dataloader_batch_sz = config.batch_sz
config.num_dataloaders = 1
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if config.restart:
given_config = config
reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == given_config.model_ind)
config.restart = True
# copy over new num_epochs and lr schedule
config.num_epochs = given_config.num_epochs
config.lr_schedule = given_config.lr_schedule
if not hasattr(config, "kmeans_on_features"):
config.kmeans_on_features = False
else:
print("Config: %s" % config_to_str(config))
# Data, nets, optimisers -------------------------------------------------------
dataloader_original, dataloader_positive, dataloader_negative, \
dataloader_test = make_triplets_data(config)
train_dataloaders = [dataloader_original, dataloader_positive,
dataloader_negative]
net = archs.__dict__[config.arch](config)
if config.restart:
model_path = os.path.join(config.out_dir, "latest_net.pytorch")
taking_best = not os.path.exists(model_path)
if taking_best:
print("using best instead of latest")
model_path = os.path.join(config.out_dir, "best_net.pytorch")
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.train()
optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart:
opt_path = os.path.join(config.out_dir, "latest_optimiser.pytorch")
if taking_best:
opt_path = os.path.join(config.out_dir, "best_optimiser.pytorch")
optimiser.load_state_dict(torch.load(opt_path))
# Results storage --------------------------------------------------------------
if config.restart:
if not taking_best:
next_epoch = config.last_epoch + 1 # corresponds to last saved model
else:
next_epoch = np.argmax(np.array(config.epoch_acc)) + 1
print("starting from epoch %d" % next_epoch)
config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot
config.epoch_loss = config.epoch_loss[:next_epoch]
config.masses = config.masses[:next_epoch, :]
config.per_class_acc = config.per_class_acc[:next_epoch, :]
else:
config.epoch_acc = []
config.epoch_loss = []
config.masses = None
config.per_class_acc = None
_ = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Pre: time %s: \n %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
next_epoch = 1
fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20))
# Train ------------------------------------------------------------------------
for e_i in xrange(next_epoch, config.num_epochs):
print("Starting e_i: %d" % (e_i))
if e_i in config.lr_schedule:
optimiser = update_lr(optimiser, lr_mult=config.lr_mult)
avg_loss = 0. # over heads and head_epochs (and sub_heads)
avg_loss_count = 0
sys.stdout.flush()
iterators = (d for d in train_dataloaders)
b_i = 0
for tup in itertools.izip(*iterators):
net.module.zero_grad()
imgs_orig = tup[0][0].cuda()
imgs_pos = tup[1][0].cuda()
imgs_neg = tup[2][0].cuda()
outs_orig = net(imgs_orig)
outs_pos = net(imgs_pos)
outs_neg = net(imgs_neg)
curr_loss = triplets_loss(outs_orig, outs_pos, outs_neg)
if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10):
print("Model ind %d epoch %d batch %d "
"loss %f time %s" % \
(config.model_ind, e_i, b_i, curr_loss.item(), datetime.now()))
sys.stdout.flush()
if not np.isfinite(float(curr_loss.item())):
print("Loss is not finite... %s:" % str(curr_loss.item()))
exit(1)
avg_loss += curr_loss.item()
avg_loss_count += 1
curr_loss.backward()
optimiser.step()
b_i += 1
if b_i == 2 and config.test_code:
break
avg_loss = float(avg_loss / avg_loss_count)
config.epoch_loss.append(avg_loss)
# Eval and storage -----------------------------------------------------------
# when epoch over both heads is finished
is_best = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Time %s, acc %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
axarr[0].clear()
axarr[0].plot(config.epoch_acc)
axarr[0].set_title("acc, top: %f" % max(config.epoch_acc))
axarr[1].clear()
axarr[1].plot(config.epoch_loss)
axarr[1].set_title("Loss")
axarr[2].clear()
for c in xrange(config.gt_k):
axarr[2].plot(config.masses[:, c])
axarr[2].set_title("masses")
axarr[3].clear()
for c in xrange(config.gt_k):
axarr[3].plot(config.per_class_acc[:, c])
axarr[3].set_title("per_class_acc")
fig.tight_layout()
fig.canvas.draw_idle()
fig.savefig(os.path.join(config.out_dir, "plots.png"))
if is_best or (e_i % config.save_freq == 0):
net.module.cpu()
if is_best:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "best_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "best_optimiser.pytorch"))
if e_i % config.save_freq == 0:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "latest_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "latest_optimiser.pytorch"))
config.last_epoch = e_i # for last saved version
net.module.cuda()
with open(os.path.join(config.out_dir, "config.pickle"),
'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"),
"w") as text_file:
text_file.write("%s" % config)
if config.test_code:
exit(0)
| 33.963576
| 82
| 0.632641
| 1,386
| 10,257
| 4.453824
| 0.20202
| 0.049571
| 0.093634
| 0.051029
| 0.359631
| 0.283169
| 0.256601
| 0.233274
| 0.17042
| 0.083266
| 0
| 0.010256
| 0.201521
| 10,257
| 301
| 83
| 34.076412
| 0.743468
| 0.073608
| 0
| 0.169014
| 0
| 0
| 0.11863
| 0.013812
| 0
| 0
| 0
| 0
| 0.014085
| 1
| 0
| false
| 0
| 0.065728
| 0
| 0.065728
| 0.046948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8abc2535fb59574434dff13ed4c596ed4d606f9e
| 4,279
|
py
|
Python
|
addons/twofactor/tests/test_models.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 11
|
2018-12-11T16:39:40.000Z
|
2022-02-26T09:51:32.000Z
|
addons/twofactor/tests/test_models.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 52
|
2018-04-13T05:03:21.000Z
|
2022-03-22T02:56:19.000Z
|
addons/twofactor/tests/test_models.py
|
tsukaeru/RDM-osf.io
|
2dc3e539322b6110e51772f8bd25ebdeb8e12d0e
|
[
"Apache-2.0"
] | 16
|
2018-07-09T01:44:51.000Z
|
2021-06-30T01:57:16.000Z
|
import unittest
from future.moves.urllib.parse import urlparse, urljoin, parse_qs
import pytest
from addons.twofactor.tests.utils import _valid_code
from nose.tools import (assert_equal, assert_false, assert_is_none,
assert_is_not_none, assert_true)
from osf_tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestCallbacks(unittest.TestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
def test_add_to_user(self):
assert_equal(self.user_settings.totp_drift, 0)
assert_is_not_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_unconfirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_confirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.is_confirmed = True
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
class TestUserSettingsModel(unittest.TestCase):
TOTP_SECRET = 'b8f85986068f8079aa9d'
TOTP_SECRET_B32 = 'XD4FTBQGR6AHTKU5'
def setUp(self):
super(TestUserSettingsModel, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
self.user_settings.totp_secret = self.TOTP_SECRET
self.user_settings.save()
def tearDown(self):
super(TestUserSettingsModel, self).tearDown()
self.user.__class__.delete(self.user)
def test_b32(self):
assert_equal(self.user_settings.totp_secret_b32, self.TOTP_SECRET_B32)
def test_otpauth_url(self):
url = urlparse(self.user_settings.otpauth_url)
assert_equal(url.scheme, 'otpauth')
assert_equal(url.netloc, 'totp')
assert_equal(url.path, '/RDM:{}'.format(self.user.username))
assert_equal(
parse_qs(url.query),
{'secret': [self.TOTP_SECRET_B32]}
)
def test_json(self):
# url = 'otpauth://totp/RDM:{}?secret=' + self.TOTP_SECRET_B32
settings = self.user_settings.to_json(user=None)
assert_equal(
settings,
{
'is_enabled': True,
'addon_full_name': 'Two-factor Authentication',
'addon_short_name': 'twofactor',
'drift': 0,
'is_confirmed': False,
'nodes': [],
'secret': self.TOTP_SECRET_B32,
'has_auth': False,
}
)
def test_verify_valid_code(self):
assert_true(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
def test_verify_valid_core_drift(self):
# use a code from 30 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=1)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 1)
# use a code from 60 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=2)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 2)
# use the current code (which is now 2 periods away from the drift)
assert_false(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
| 32.416667
| 78
| 0.646646
| 529
| 4,279
| 4.939509
| 0.20794
| 0.122465
| 0.177574
| 0.091848
| 0.562189
| 0.521623
| 0.50287
| 0.487945
| 0.487945
| 0.487945
| 0
| 0.01519
| 0.26151
| 4,279
| 131
| 79
| 32.664122
| 0.811709
| 0.093012
| 0
| 0.361702
| 0
| 0
| 0.05811
| 0
| 0
| 0
| 0
| 0
| 0.244681
| 1
| 0.117021
| false
| 0
| 0.06383
| 0
| 0.223404
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8abed448e30652e384272b8cc640eedca2d718cf
| 1,708
|
py
|
Python
|
lanedet/runner/utils/net_utils.py
|
ztjsw/lanedet
|
c957e1f70695e39063231612637e22fcad2769f5
|
[
"Apache-2.0"
] | 1
|
2021-05-22T09:36:17.000Z
|
2021-05-22T09:36:17.000Z
|
lanedet/runner/utils/net_utils.py
|
ztjsw/lanedet
|
c957e1f70695e39063231612637e22fcad2769f5
|
[
"Apache-2.0"
] | null | null | null |
lanedet/runner/utils/net_utils.py
|
ztjsw/lanedet
|
c957e1f70695e39063231612637e22fcad2769f5
|
[
"Apache-2.0"
] | null | null | null |
import torch
import os
from torch import nn
import numpy as np
import torch.nn.functional
from termcolor import colored
from .logger import get_logger
def save_model(net, optim, scheduler, recorder, is_best=False):
model_dir = os.path.join(recorder.work_dir, 'ckpt')
os.system('mkdir -p {}'.format(model_dir))
epoch = recorder.epoch
ckpt_name = 'best' if is_best else epoch
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(ckpt_name)))
# remove previous pretrained model if the number of models is too big
# pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
# if len(pths) <= 2:
# return
# os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths)))))
def load_network_specified(net, model_dir, logger=None):
pretrained_net = torch.load(model_dir)['net']
net_state = net.state_dict()
state = {}
for k, v in pretrained_net.items():
if k not in net_state.keys() or v.size() != net_state[k].size():
if logger:
logger.info('skip weights: ' + k)
continue
state[k] = v
net.load_state_dict(state, strict=False)
def load_network(net, model_dir, finetune_from=None, logger=None):
if finetune_from:
if logger:
logger.info('Finetune model from: ' + finetune_from)
load_network_specified(net, finetune_from, logger)
return
pretrained_model = torch.load(model_dir)
net.load_state_dict(pretrained_model['net'], strict=True)
| 34.16
| 84
| 0.648712
| 240
| 1,708
| 4.445833
| 0.325
| 0.067479
| 0.028116
| 0.028116
| 0.088097
| 0.050609
| 0.050609
| 0
| 0
| 0
| 0
| 0.001498
| 0.218384
| 1,708
| 49
| 85
| 34.857143
| 0.797753
| 0.141101
| 0
| 0.052632
| 0
| 0
| 0.065708
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.184211
| 0
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac004a4f19bb41d9cfa8a39529011d30c5a08dc
| 5,455
|
py
|
Python
|
main.py
|
jonodrew/matchex
|
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
|
[
"MIT"
] | null | null | null |
main.py
|
jonodrew/matchex
|
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
|
[
"MIT"
] | null | null | null |
main.py
|
jonodrew/matchex
|
531e7cd1c328cb9dc34b601a06648bd2c3e709e6
|
[
"MIT"
] | null | null | null |
from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
| 30.646067
| 88
| 0.698075
| 761
| 5,455
| 4.948752
| 0.299606
| 0.01487
| 0.018587
| 0.033457
| 0.260754
| 0.217207
| 0.189326
| 0.123208
| 0.057886
| 0.038768
| 0
| 0.017787
| 0.16517
| 5,455
| 177
| 89
| 30.819209
| 0.809179
| 0.094409
| 0
| 0.044444
| 0
| 0
| 0.141674
| 0.018093
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.011111
| 0.133333
| 0
| 0.133333
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac00891cba917dcea99bd7701a43788bba03334
| 3,142
|
py
|
Python
|
pip_info/setup.py
|
95616ARG/SyReNN
|
19abf589e84ee67317134573054c648bb25c244d
|
[
"MIT"
] | 36
|
2019-08-19T06:17:52.000Z
|
2022-03-11T09:02:40.000Z
|
pip_info/setup.py
|
95616ARG/SyReNN
|
19abf589e84ee67317134573054c648bb25c244d
|
[
"MIT"
] | 8
|
2020-04-09T20:59:04.000Z
|
2022-03-11T23:56:50.000Z
|
pip_info/setup.py
|
95616ARG/SyReNN
|
19abf589e84ee67317134573054c648bb25c244d
|
[
"MIT"
] | 4
|
2021-01-13T11:17:55.000Z
|
2021-06-28T19:36:04.000Z
|
"""Setup script for PySyReNN.
Adapted from:
https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
"""
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "pysyrenn"
PACKAGES = [
"syrenn_proto",
"pysyrenn",
"pysyrenn.frontend",
"pysyrenn.helpers",
]
META_PATH = "__metadata__.py"
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["torch"]
with open("requirements.txt") as requirements:
reading = False
for line in requirements.readlines():
if line.startswith("# PYSYRENN"):
reading = True
elif line.startswith("# END"):
reading = False
elif line.startswith("#"):
pass
elif reading:
INSTALL_REQUIRES.append(line.strip().split("==")[0])
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "."},
package_data={"": ["pysyrenn/**/*.py"]},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| 30.803922
| 77
| 0.595799
| 329
| 3,142
| 5.507599
| 0.452888
| 0.115342
| 0.151766
| 0.086093
| 0.029801
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007717
| 0.216423
| 3,142
| 101
| 78
| 31.108911
| 0.72827
| 0.084978
| 0
| 0.025641
| 0
| 0
| 0.358303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0.012821
| 0.051282
| 0
| 0.102564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac22e55a9c9778c66e3a1d86342cccdc465c6de
| 4,117
|
py
|
Python
|
pygears/svgen/modules/sieve.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/svgen/modules/sieve.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
pygears/svgen/modules/sieve.py
|
Risto97/pygears
|
19393e85101a16762cb3bbbf3010946ef69217f2
|
[
"MIT"
] | null | null | null |
import itertools
from pygears.common.sieve import sieve
from pygears.svgen.inst import SVGenInstPlugin
from pygears.svgen.svmod import SVModuleGen
from functools import partial
from pygears.svgen.svgen import SVGenPlugin
from pygears.svgen.util import svgen_visitor
from pygears.core.hier_node import HierVisitorBase
from pygears.svgen.inst import svgen_inst
from pygears.rtl.gear import RTLGearHierVisitor, is_gear_instance
def index_to_sv_slice(dtype, key):
subtype = dtype[key]
if isinstance(key, slice):
key = key.start
if key is None or key == 0:
low_pos = 0
else:
low_pos = int(dtype[:key])
high_pos = low_pos + int(subtype) - 1
return f'{high_pos}:{low_pos}'
class SVGenSieve(SVModuleGen):
@property
def is_generated(self):
return True
def get_module(self, template_env):
def get_stages():
for s in itertools.chain(self.node.pre_sieves, [self.node]):
indexes = s.params['key']
if not isinstance(indexes, tuple):
indexes = (indexes, )
dtype = s.in_ports[0].dtype
out_type = s.out_ports[0].dtype
slices = list(
map(
partial(index_to_sv_slice, dtype),
filter(lambda i: int(dtype[i]) > 0, indexes)))
yield slices, out_type
stages = list(get_stages())
# If any of the sieves has shrunk data to 0 width, there is nothing to
# do
if any(i[0] == [] for i in stages):
stages = []
context = {
'stages': stages,
'module_name': self.sv_module_name,
'intfs': list(self.sv_port_configs())
}
return template_env.render_local(__file__, "sieve.j2", context)
@svgen_visitor
class RemoveEqualReprSieveVisitor(RTLGearHierVisitor):
def sieve(self, node):
pout = node.out_ports[0]
pin = node.in_ports[0]
if pin.dtype == pout.dtype:
node.bypass()
@svgen_visitor
class CollapseSievesVisitor(RTLGearHierVisitor):
def sieve(self, node):
if not hasattr(node, 'pre_sieves'):
node.pre_sieves = []
sieve_cons = [
p for p in node.consumers if is_gear_instance(p.node, sieve)
]
pin = node.in_ports[0]
pout = node.out_ports[0]
iin = pin.producer
iout = pout.consumer
if sieve_cons:
# There is a Sieve connected to this Sieve, hence we can combine
# two of them into a single SV module
# Connect the consumers of this Sieve, which are Sieves themselves,
# to this Sieve's predecessor
for cons_pin in iout.consumers.copy():
consumer = cons_pin.node
if is_gear_instance(consumer, sieve):
# print(f'Merging {node.name} to {consumer.name}')
# print(consumer.params['key'])
# If the consumer is a Sieve, just register this Sieve with
# it, and short circuit this one
consumer.pre_sieves = node.pre_sieves + [node]
iout.disconnect(cons_pin)
iin.connect(cons_pin)
# print(f'Remaining conusmer: {[p.node.name for p in node.consumers]}')
if not node.consumers:
# Finally, if ther are no consumers left for this sieve remove
# this Sieve completely (with all it's connections) from the
# SVGen tree
node.remove()
iout.remove()
class SVGenSievePlugin(SVGenInstPlugin, SVGenPlugin):
@classmethod
def bind(cls):
cls.registry['svgen']['module_namespace'][sieve] = SVGenSieve
cls.registry['svgen']['flow'].insert(
cls.registry['svgen']['flow'].index(svgen_inst),
CollapseSievesVisitor)
# cls.registry['SVGenFlow'].insert(
# cls.registry['SVGenFlow'].key(CollapseSievesVisitor),
# RemoveEqualReprSieveVisitor)
| 32.674603
| 83
| 0.589264
| 489
| 4,117
| 4.838446
| 0.323108
| 0.037194
| 0.033812
| 0.016906
| 0.130178
| 0.017751
| 0
| 0
| 0
| 0
| 0
| 0.004636
| 0.318922
| 4,117
| 125
| 84
| 32.936
| 0.839158
| 0.184115
| 0
| 0.097561
| 0
| 0
| 0.03053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0.012195
| 0.121951
| 0.012195
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac2a36b9aed8734fe00d975f21caf0ecc7d8aef
| 5,461
|
py
|
Python
|
examples/my_model_test.py
|
gzpyy/qlib
|
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
|
[
"MIT"
] | null | null | null |
examples/my_model_test.py
|
gzpyy/qlib
|
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
|
[
"MIT"
] | null | null | null |
examples/my_model_test.py
|
gzpyy/qlib
|
56fdd028c8296c75f2a32bdb51869f010dd4f6d1
|
[
"MIT"
] | null | null | null |
#encoding=utf-8
import qlib
import pandas as pd
import pickle
import xgboost as xgb
import numpy as np
import re
from qlib.constant import REG_US
from qlib.utils import exists_qlib_data, init_instance_by_config
from qlib.workflow import R
from qlib.workflow.record_temp import SignalRecord, PortAnaRecord
from qlib.utils import flatten_dict
from qlib.data import LocalExpressionProvider
from qlib.data.ops import Operators, OpsList
from qlib.data.base import Feature
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Grid
from my_data_handler import MyAlphaHandler
# model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model'
model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model'
with open(model_file, 'rb') as fi:
model = pickle.load(fi)
exprs, columns = MyAlphaHandler.get_custom_config()
raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time'])
raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00")
raw_data.set_index('time', inplace=True)
raw_data["vwap"] = np.nan
raw_data.sort_index(inplace=True)
# print(raw_data)
class MyFeature(Feature):
def _load_internal(self, instrument, start_index, end_index, freq):
print("load", self._name, instrument, start_index, end_index, freq)
return raw_data.loc[start_index:end_index][self._name]
Operators.register(OpsList + [MyFeature])
def my_parse_field(field):
if not isinstance(field, str):
field = str(field)
for pattern, new in [(r"\$(\w+)", rf'MyFeature("\1")'), (r"(\w+\s*)\(", r"Operators.\1(")]: # Features # Operators
field = re.sub(pattern, new, field)
return field
obj = dict()
for field in exprs:
expression = eval(my_parse_field(field))
series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min")
series = series.astype(np.float32)
obj[field] = series
data = pd.DataFrame(obj)
data.columns = columns
view_time_start = '2022-02-11'
view_time_end = '2022-02-12'
pre_data = raw_data.loc[view_time_start:view_time_end].copy()
pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end]))
pre_data['pred_score'] = pred
records = pre_data.to_dict("records")
cash = 50000
position = {}
hold_thresh = 5
score_thresh = 0.001
x_axises, y_axises, mark_points, money = [], [], [], []
for record in records:
x_axises.append(record['data_time'])
y_axises.append([
record['open'], record['close'], record['low'], record['high']
])
if 'hold_cnt' in position:
position['hold_cnt'] += 1
if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh):
cash += position['amount'] * record['open']
position = {}
#print("sell")
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='triangle', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="green")
))
elif record['pred_score'] > score_thresh and not position:
position = dict(record)
position['amount'] = int(cash / position['open'])
cash -= position['amount'] * position['open']
# buy
#print("buy")
position['hold_cnt'] = 0
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='arrow', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="yellow")
))
cur_money = cash
if position:
cur_money += position['amount'] * record['close']
money.append(cur_money)
if position:
cash += position['amount'] * records[-1]['close']
print("cash:", cash)
kline_graph = (
Kline()
.add_xaxis(x_axises)
.add_yaxis(
"kline",
y_axises,
markpoint_opts=opts.MarkPointOpts(
data=mark_points
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(is_scale=True),
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)),
datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)],
)
)
kline_line = (
Line()
.add_xaxis(xaxis_data=x_axises)
.add_yaxis(
series_name="cur_money",
y_axis=money,
is_smooth=True,
linestyle_opts=opts.LineStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
markline_opts=opts.MarkLineOpts(
data=[opts.MarkLineItem(y=50000)]
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
min_='dataMin'
)
)
)
grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px'))
grid_chart.add(
kline_graph,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"),
)
grid_chart.add(
kline_line,
grid_opts=opts.GridOpts(
pos_left="3%", pos_right="10%", pos_top="60%", height="30%"
),
)
grid_chart.render("kline_markline.html")
| 33.29878
| 190
| 0.655741
| 717
| 5,461
| 4.781032
| 0.324965
| 0.042007
| 0.015169
| 0.015753
| 0.204784
| 0.179697
| 0.126604
| 0.106768
| 0.061844
| 0.061844
| 0
| 0.029837
| 0.202161
| 5,461
| 164
| 191
| 33.29878
| 0.756943
| 0.030031
| 0
| 0.164384
| 0
| 0
| 0.101701
| 0.016824
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.116438
| 0
| 0.150685
| 0.013699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac30fc95afe68d34f716111b4aac384fefa954a
| 2,291
|
py
|
Python
|
graphzoom/embed_methods/dgi/execute.py
|
junhoher/GraphZoom
|
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
|
[
"MIT"
] | 16
|
2019-10-18T06:31:29.000Z
|
2021-09-23T12:46:19.000Z
|
graphzoom/embed_methods/dgi/execute.py
|
junhoher/GraphZoom
|
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
|
[
"MIT"
] | 7
|
2019-10-18T06:36:32.000Z
|
2022-02-10T01:37:04.000Z
|
graphzoom/embed_methods/dgi/execute.py
|
junhoher/GraphZoom
|
5073b49a34badf7bc6c25bd2a6cc6c78b4ee7d5a
|
[
"MIT"
] | 4
|
2019-11-15T12:47:11.000Z
|
2021-02-15T07:26:24.000Z
|
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import networkx as nx
import time
from embed_methods.dgi.models import DGI, LogReg
from embed_methods.dgi.utils import process
def dgi(G, features):
batch_size = 1
nb_epochs = 10000
patience = 20
lr = 0.001
l2_coef = 0.0
drop_prob = 0.0
hid_units = 512
sparse = True
nonlinearity = 'prelu' # special name to separate parameters
adj = nx.to_scipy_sparse_matrix(G, weight='wgt')
features = sp.lil_matrix(np.matrix(features))
features, _ = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
if sparse:
sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = (adj + sp.eye(adj.shape[0])).todense()
features = torch.FloatTensor(features[np.newaxis])
if not sparse:
adj = torch.FloatTensor(adj[np.newaxis])
model = DGI(ft_size, hid_units, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
if torch.cuda.is_available():
print('Using CUDA')
model.cuda()
features = features.cuda()
if sparse:
sp_adj = sp_adj.cuda()
else:
adj = adj.cuda()
b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
cnt_wait = 0
best = 1e9
best_t = 0
for epoch in range(nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(nb_nodes)
shuf_fts = features[:, idx, :]
lbl_1 = torch.ones(batch_size, nb_nodes)
lbl_2 = torch.zeros(batch_size, nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
if torch.cuda.is_available():
shuf_fts = shuf_fts.cuda()
lbl = lbl.cuda()
logits = model(features, shuf_fts, sp_adj if sparse else adj, sparse, None, None, None)
loss = b_xent(logits, lbl)
print('Loss:', loss)
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
else:
cnt_wait += 1
if cnt_wait == patience:
print("epochs: ", epoch)
print('Early stopping!')
break
loss.backward()
optimiser.step()
return (((model.embed(features, sp_adj if sparse else adj, sparse, None)[0]).squeeze()).data).cpu().numpy()
| 24.634409
| 109
| 0.656482
| 335
| 2,291
| 4.325373
| 0.346269
| 0.017253
| 0.022084
| 0.026225
| 0.125604
| 0.069013
| 0.069013
| 0.041408
| 0
| 0
| 0
| 0.020787
| 0.223047
| 2,291
| 92
| 110
| 24.902174
| 0.793258
| 0.015277
| 0
| 0.123288
| 0
| 0
| 0.020408
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.109589
| 0
| 0.136986
| 0.054795
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac447e8327f451aa635702a06c66e0d74dc0eb1
| 1,668
|
py
|
Python
|
tools/ci/deploy_to_github_release.py
|
rodb70/RDMnet
|
94d17e1dfda2d1f56b120f6342231c43bf6862b0
|
[
"Apache-2.0"
] | 30
|
2018-07-16T15:54:19.000Z
|
2021-11-21T21:17:36.000Z
|
tools/ci/deploy_to_github_release.py
|
rodb70/RDMnet
|
94d17e1dfda2d1f56b120f6342231c43bf6862b0
|
[
"Apache-2.0"
] | 27
|
2019-04-12T22:45:25.000Z
|
2021-08-13T15:20:04.000Z
|
tools/ci/deploy_to_github_release.py
|
rodb70/RDMnet
|
94d17e1dfda2d1f56b120f6342231c43bf6862b0
|
[
"Apache-2.0"
] | 12
|
2019-06-28T19:28:58.000Z
|
2021-11-17T12:10:44.000Z
|
"""Deploys binaries to a GitHub release given the specified tag name."""
import argparse
import os
import time
from github import Github
THIS_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
GH_REPO_IDENT = "ETCLabs/RDMnet"
GH_USERNAME = "svc-etclabs"
GH_API_TOKEN = os.getenv("SVC_ETCLABS_REPO_TOKEN")
def deploy_binaries(version: str):
"""Deploys staged binaries to a new GitHub Release."""
g = Github(login_or_token=GH_USERNAME, password=GH_API_TOKEN)
repo = g.get_repo(GH_REPO_IDENT)
print(f"Waiting for the correct GitHub tag v{version} to become available...")
keep_trying = True
while keep_trying:
for tag in repo.get_tags():
if tag.name == f"v{version}":
keep_trying = False # Tag now exists
break
if keep_trying:
time.sleep(5)
print(f"Tag v{version} available. Creating release...")
new_release = repo.create_git_release(
tag=f"v{version}",
name=f"RDMnet v{version}",
message=f"Automated release of RDMnet for v{version}",
)
new_release.upload_asset("RDMnetSetup_x86.msi")
new_release.upload_asset("RDMnetSetup_x64.msi")
new_release.upload_asset("RDMnet.pkg")
def main():
parser = argparse.ArgumentParser(
description="Deploy RDMnet artifacts to GitHub Release"
)
parser.add_argument("version", help="Artifact version being deployed")
args = parser.parse_args()
# Make sure our cwd is the root of the repository
os.chdir(os.path.abspath(os.path.join(THIS_FILE_DIRECTORY, "..", "..")))
deploy_binaries(args.version)
if __name__ == "__main__":
main()
| 29.785714
| 82
| 0.682854
| 230
| 1,668
| 4.726087
| 0.434783
| 0.044158
| 0.044158
| 0.057958
| 0.083717
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003776
| 0.206235
| 1,668
| 55
| 83
| 30.327273
| 0.817221
| 0.107314
| 0
| 0
| 0
| 0
| 0.255924
| 0.014895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0.025641
| 0.102564
| 0
| 0.153846
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac489649919e5a666b90d4e91cad4bcbdd5e983
| 1,513
|
py
|
Python
|
matchms/filtering/add_losses.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/filtering/add_losses.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/filtering/add_losses.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
import logging
import numpy
from ..Fragments import Fragments
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType:
"""Derive losses based on precursor mass.
Parameters
----------
spectrum_in:
Input spectrum.
loss_mz_from:
Minimum allowed m/z value for losses. Default is 0.0.
loss_mz_to:
Maximum allowed m/z value for losses. Default is 1000.0.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
precursor_mz = spectrum.get("precursor_mz", None)
if precursor_mz:
assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.",
"Consider applying 'add_precursor_mz' filter first.")
peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities
losses_mz = (precursor_mz - peaks_mz)[::-1]
losses_intensities = peaks_intensities[::-1]
# Add losses which are within given boundaries
mask = numpy.where((losses_mz >= loss_mz_from)
& (losses_mz <= loss_mz_to))
spectrum.losses = Fragments(mz=losses_mz[mask],
intensities=losses_intensities[mask])
else:
logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
return spectrum
| 35.186047
| 109
| 0.639128
| 183
| 1,513
| 5.076503
| 0.371585
| 0.106566
| 0.032293
| 0.017223
| 0.178687
| 0.157158
| 0.157158
| 0.068891
| 0
| 0
| 0
| 0.01444
| 0.26768
| 1,513
| 42
| 110
| 36.02381
| 0.824007
| 0.188367
| 0
| 0
| 0
| 0
| 0.15865
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.043478
| false
| 0
| 0.173913
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ac8388c155952144c99a47c3c6e38eeff168835
| 10,829
|
py
|
Python
|
cornflow_client/schema/dictSchema.py
|
baobabsoluciones/cornflow-client
|
f9996f0b841885d26639cb63c8ba6090387de57f
|
[
"MIT"
] | 3
|
2021-05-12T11:21:26.000Z
|
2022-02-22T19:23:46.000Z
|
cornflow_client/schema/dictSchema.py
|
baobabsoluciones/cornflow-client
|
f9996f0b841885d26639cb63c8ba6090387de57f
|
[
"MIT"
] | 17
|
2021-03-14T17:09:46.000Z
|
2022-02-28T19:12:37.000Z
|
cornflow_client/schema/dictSchema.py
|
baobabsoluciones/cornflow-client
|
f9996f0b841885d26639cb63c8ba6090387de57f
|
[
"MIT"
] | 2
|
2020-10-03T20:00:19.000Z
|
2022-03-24T11:52:22.000Z
|
import re
from .dict_functions import gen_schema, ParameterSchema, sort_dict
from cornflow_client.constants import JSON_TYPES, DATASCHEMA
class DictSchema:
"""
A json-schema to dict-schema parser
"""
def __init__(self, jsonschema):
"""
Class to manage internal dictionary schema
:param jsonschema: a json schema
"""
self.types = JSON_TYPES
schema_dict = self.get_empty_schema()
if "definitions" in jsonschema:
for item in jsonschema["definitions"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
if "properties" in jsonschema:
for item in jsonschema["properties"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
self._create_data_schema(
schema_dict=schema_dict,
item=item,
required_list=jsonschema.get("required"),
)
self.schema = schema_dict
def get_schema(self):
return self.schema
@staticmethod
def get_empty_schema():
"""
Create un empty schema dict
"""
return {DATASCHEMA: []}
def _create_data_schema(self, schema_dict, item, required_list=None):
"""
Add a schema to schema_dict[DATASCHEMA]
:param item: (key, value) of a dict. The key contains the name of the schema
and the value contains its content.
return the schema dict.
"""
name, content = item
if required_list is None:
required_list = []
schema = dict(
name=name,
type=self._get_type_or_new_schema(item),
many=("type" in content and content["type"] == "array"),
required=name in required_list,
)
schema_dict[DATASCHEMA].append(schema)
return schema
def _get_element_dict(self, schema_dict, item, required_list=None):
"""
Parse an item (key, value) from the jsonschema and return the corresponding dict.
:param item: An item from the jsonschema (key, value)
:param required_list: A list of names corresponding to the required fields in the parent object
:return A dict element for a schema_dict.
"""
if required_list is None:
required_list = []
name, content = item
if "type" not in content:
if "$ref" in content:
return {
"name": name,
"type": self._get_ref(item),
"many": False,
"required": (name in required_list),
}
else:
print("\nType missing for item: {}".format(name))
raise TypeError("Type missing")
if content["type"] == "object":
return {
"name": name,
"type": self._get_object_schema(schema_dict=schema_dict, item=item),
"many": False,
"required": (name in required_list),
}
elif content["type"] == "array":
return {
"name": name,
"type": self._get_array_schema(schema_dict=schema_dict, item=item),
"many": True,
"required": (name in required_list),
}
else:
return self._get_field_dict(item, required_list)
def _get_object_schema(self, schema_dict, item):
"""
Transform an object item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema objects are similar to python dict.
The object in jsonschema is in the following format:
"object_name": {"type":"object", "properties":{"field1": {...}, "filed2": {...}}, "required": ["field1]}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)}, ...]
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
ell = {
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
schema_dict.update(ell)
return schema_name
def _get_array_schema(self, schema_dict, item):
"""
Transform a array item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema arrays are similar to python lists.
The object in jsonschema is in the following format:
"object_name": {"type":"array", "items":{format_of_items}}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
content = content["items"]
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
if "type" in content and content["type"] == "object":
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
)
elif "$ref" in content:
schema_name = self._get_ref((None, content))
elif "type" in content and content["type"] != "array":
return self._get_type(content["type"])
else:
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content.items()
]
}
)
return schema_name
def _get_field_dict(self, item, required_list=None):
"""
Transform a "normal" item from the jsonschema in a dict for the schema_dict and return it.
This is used for items that will directly translate into fields.
:param item: The jsonschema item in format (key, value)
:param required_list: a list of the fields required in the parent object.
:return: the schema_dict for this item
"""
d = dict(
name=item[0],
type=self._get_type(item[1]["type"]),
required=(item[0] in required_list),
allow_none=("null" in item[1]["type"]),
many=False,
)
return d
def _get_ref(self, item):
"""
Get the name of the schema for a jsonschema reference.
jsonschema definitions are parsed first and corresponding schema are created so a schema should exist
corresponding to the reference.
:param item: The jsonschema item in format (key, value)
The value should be in the following format: {"$ref": "#/definitions/object_name"}
:return The schema name (_get_schema_name(object_name))
"""
content = item[1]
ref = re.search("definitions/(.+)", content["$ref"]).group(1)
return self._get_schema_name(ref)
def _get_type_or_new_schema(self, item):
"""
returns a new schema or a type depending on the json_type
"""
name, content = item
if "type" not in content or content["type"] == "object":
return self._get_schema_name(name)
elif content["type"] == "array":
return self._get_type_or_new_schema((name, content["items"]))
else:
return self._get_type(content["type"])
def _get_type(self, json_type):
"""
Translate the type between jsonschema and schema_dict.
:param json_type: the type in jsonschema
:return: the type in schema_dict.
"""
if type(json_type) is list:
not_null_type = [i for i in json_type if i != "null"]
if len(not_null_type) > 1:
raise Warning("Warning: more than one type given")
return self.types[not_null_type[0]]
else:
return self.types[json_type]
@staticmethod
def _get_schema_name(name, n=0):
"""
Transform an element name into a schema name in order to create a schema corresponding to an object or array.
The schema name use the following format:
[name][n]Schema (for example if name is "values" and n is 3: Values3Schema)
:param name: The name of the object or array.
:param n: if n is different from 0, it is added to the schema name.
:return: the corresponding schema name.
"""
if n == 0:
return name.capitalize() + "Schema"
else:
return name.capitalize() + str(n) + "Schema"
def _get_new_schema_name(self, schema_dict, name, n=0):
try_name = self._get_schema_name(name, n)
if try_name in schema_dict:
return self._get_new_schema_name(
schema_dict=schema_dict, name=name, n=n + 1
)
else:
return try_name
@staticmethod
def _get_required(content):
"""
Get the list of required name of it exist.
:content: the dict which should have a "required" key.value
:return: The required list or empty list.
"""
return content.get("required", [])
def to_marshmallow(self):
dict_params = self.schema
result_dict = {}
ordered = sort_dict(dict_params)
tuplist = sorted(dict_params.items(), key=lambda v: ordered[v[0]])
for key, params in tuplist:
schema = ParameterSchema()
# this line validates the list of parameters:
params1 = schema.load(params, many=True)
result_dict[key] = gen_schema(key, params1, result_dict)
return result_dict[DATASCHEMA]
| 35.739274
| 117
| 0.559793
| 1,265
| 10,829
| 4.605534
| 0.124901
| 0.084106
| 0.038448
| 0.037762
| 0.456402
| 0.401819
| 0.344147
| 0.296087
| 0.250086
| 0.235668
| 0
| 0.003512
| 0.342599
| 10,829
| 302
| 118
| 35.857616
| 0.814862
| 0.303629
| 0
| 0.346821
| 0
| 0
| 0.053468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086705
| false
| 0
| 0.017341
| 0.00578
| 0.242775
| 0.00578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8acad105c230508195bd3af6419dc374a38241b0
| 6,670
|
py
|
Python
|
swift/common/ondisk.py
|
citrix-openstack-build/swift
|
34340ddf49a84f3b3398012c2b60be1215033559
|
[
"Apache-2.0"
] | 1
|
2016-03-14T23:38:37.000Z
|
2016-03-14T23:38:37.000Z
|
swift/common/ondisk.py
|
vimeo/swift
|
5eea524d3ea6d29c2b6f34927c0130090e7ed44d
|
[
"Apache-2.0"
] | null | null | null |
swift/common/ondisk.py
|
vimeo/swift
|
5eea524d3ea6d29c2b6f34927c0130090e7ed44d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods & Attributes for shared 'on-disk' data layouts."""
import os
import sys
import errno
from hashlib import md5
from random import shuffle
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift import gettext_ as _
from swift.common.utils import listdir, quote
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
_hash_conf = ConfigParser()
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
if _hash_conf.read('/etc/swift/swift.conf'):
try:
HASH_PATH_SUFFIX = _hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = _hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
def validate_configuration():
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
sys.exit("Error: [swift-hash]: both swift_hash_path_suffix "
"and swift_hash_path_prefix are missing "
"from /etc/swift/swift.conf")
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account]
if container:
paths.append(container)
if object:
paths.append(object)
if raw_digest:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).digest()
else:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).hexdigest()
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return "%016.05f" % (float(timestamp))
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises: ValueError if given an invalid device or partition
"""
invalid_device = False
invalid_partition = False
if not device or '/' in device or device in ['.', '..']:
invalid_device = True
if not partition or '/' in partition or partition in ['.', '..']:
invalid_partition = True
if invalid_device:
raise ValueError('Invalid device: %s' % quote(device or ''))
elif invalid_partition:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None):
'''
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
'''
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
for device in device_dir:
if mount_check and not \
os.path.ismount(os.path.join(devices, device)):
if logger:
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, datadir)
partitions = listdir(datadir_path)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
| 36.054054
| 79
| 0.625487
| 816
| 6,670
| 5.011029
| 0.311275
| 0.037173
| 0.023967
| 0.017119
| 0.089998
| 0.078748
| 0.078748
| 0.078748
| 0.062607
| 0.062607
| 0
| 0.007707
| 0.2997
| 6,670
| 184
| 80
| 36.25
| 0.867694
| 0.358921
| 0
| 0.255102
| 0
| 0
| 0.081899
| 0.031972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0.020408
| 0.081633
| 0
| 0.183673
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8acb675f5ab5c65b02ffbf255720c5176625a170
| 1,923
|
py
|
Python
|
.OLD_FILES/dossiers2_old1/custom/cache.py
|
KIHestad/WoT-Dossier-Parser-Create-Struct
|
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
|
[
"MIT"
] | null | null | null |
.OLD_FILES/dossiers2_old1/custom/cache.py
|
KIHestad/WoT-Dossier-Parser-Create-Struct
|
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
|
[
"MIT"
] | null | null | null |
.OLD_FILES/dossiers2_old1/custom/cache.py
|
KIHestad/WoT-Dossier-Parser-Create-Struct
|
9eadeeead59b7b6cf78dc6a1e1e89fe2dffb260e
|
[
"MIT"
] | 2
|
2021-11-10T19:12:57.000Z
|
2022-03-13T10:04:48.000Z
|
# uncompyle6 version 2.11.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)]
# Embedded file name: scripts/common/dossiers2/custom/cache.py
import nations
from items import vehicles
def getCache():
global _g_cache
return _g_cache
def buildCache():
vehiclesByLevel = {}
vehiclesByTag = {'beast': set(),'sinai': set(),'patton': set()}
vehiclesInTreeByNation = {}
vehiclesInTree = set()
nationsWithVehiclesInTree = []
unlocksSources = vehicles.getUnlocksSources()
for nationIdx in xrange(len(nations.NAMES)):
nationList = vehicles.g_list.getList(nationIdx)
vehiclesInNationTree = set()
for vehDescr in nationList.itervalues():
vehiclesByLevel.setdefault(vehDescr.level, set()).add(vehDescr.compactDescr)
for tag in ('beast', 'sinai', 'patton'):
if tag in vehDescr.tags:
vehiclesByTag[tag].add(vehDescr.compactDescr)
if len(unlocksSources.get(vehDescr.compactDescr, set())) > 0 or len(vehicles.g_cache.vehicle(nationIdx, vehDescr.id).unlocksDescrs) > 0:
vehiclesInNationTree.add(vehDescr.compactDescr)
vehiclesInTree.update(vehiclesInNationTree)
vehiclesInTreeByNation[nationIdx] = vehiclesInNationTree
if bool(vehiclesInNationTree):
nationsWithVehiclesInTree.append(nationIdx)
vehicles8p = vehiclesByLevel[8] | vehiclesByLevel[9] | vehiclesByLevel[10]
_g_cache.update({'vehiclesByLevel': vehiclesByLevel,
'vehicles8+': vehicles8p,
'vehiclesByTag': vehiclesByTag,
'mausTypeCompDescr': vehicles.makeVehicleTypeCompDescrByName('germany:G42_Maus'),
'vehiclesInTreesByNation': vehiclesInTreeByNation,
'vehiclesInTrees': vehiclesInTree,
'nationsWithVehiclesInTree': nationsWithVehiclesInTree
})
_g_cache = {}
| 40.0625
| 148
| 0.693708
| 178
| 1,923
| 7.432584
| 0.522472
| 0.022676
| 0.052154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0.198648
| 1,923
| 48
| 149
| 40.0625
| 0.828683
| 0.107124
| 0
| 0
| 0
| 0
| 0.096906
| 0.028021
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8acb8cd4dc2d6e35f38c30493bd708782f4c4cfd
| 3,400
|
py
|
Python
|
render_video.py
|
frostburn/branch-cut-mandelbrot
|
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
|
[
"MIT"
] | null | null | null |
render_video.py
|
frostburn/branch-cut-mandelbrot
|
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
|
[
"MIT"
] | null | null | null |
render_video.py
|
frostburn/branch-cut-mandelbrot
|
26c4d2db75a32b9190d40a09ebfb8a67fc4829e8
|
[
"MIT"
] | null | null | null |
import argparse
import imageio
import progressbar
from _routines import ffi, lib
from pylab import *
from random import Random
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
max_iter = 32
im_buf = ffi.new("double[]", args.width * args.height)
cut_buf = ffi.new("double[]", max_iter)
fixed_seed = Random(1)
for i in range(max_iter):
cut_buf[i] = i*fixed_seed.random()
for n in progressbar.progressbar(range(args.num_frames)):
tg = n / (args.num_frames - 1)
t = tg
lib.mandelbrot(im_buf, args.width, args.height, 0.7, 0.8, 3.5, t-20, cut_buf, max_iter)
im = array(list(im_buf)).reshape(args.height, args.width)
# for i in range(max_iter):
# cut_buf[i] *= 0.05**args.dt
bg = (im < 0)
im /= im.max()
fg = 1 - bg
red = im
green = 1 - im
blue = 4*im*(1-im)
blue = blue + 0.2*green
red = 0.1 + 0.8*red + green**3
green = 0.2 + 0.21*green
frame = make_video_frame([red*fg + 0.15*bg, green*fg + 0.08*bg, blue*fg + 0.1*bg], indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render audio samples')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--params', type=str, help='Parameter YAML file name')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close()
| 34.693878
| 113
| 0.627059
| 486
| 3,400
| 4.281893
| 0.343621
| 0.034599
| 0.065353
| 0.030754
| 0.092263
| 0.055742
| 0.055742
| 0.024027
| 0.024027
| 0
| 0
| 0.059901
| 0.229118
| 3,400
| 97
| 114
| 35.051546
| 0.734071
| 0.024706
| 0
| 0
| 0
| 0
| 0.134058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.075
| 0
| 0.1125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8accb038864b63aa2e837e9fa4c1312771a520cd
| 1,238
|
py
|
Python
|
tests/mqtt/test_subscribe.py
|
smurfix/hbmqtt
|
914440cd18b43fbe56496a73bb1259132811c539
|
[
"MIT"
] | null | null | null |
tests/mqtt/test_subscribe.py
|
smurfix/hbmqtt
|
914440cd18b43fbe56496a73bb1259132811c539
|
[
"MIT"
] | null | null | null |
tests/mqtt/test_subscribe.py
|
smurfix/hbmqtt
|
914440cd18b43fbe56496a73bb1259132811c539
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Nicolas JOUANIN
#
# See the file license.txt for copying permission.
import anyio
import unittest
from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload
from hbmqtt.mqtt.packet import PacketIdVariableHeader
from hbmqtt.mqtt.constants import QOS_1, QOS_2
from hbmqtt.adapters import BufferAdapter
class SubscribePacketTest(unittest.TestCase):
def test_from_stream(self):
data = b'\x80\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02'
stream = BufferAdapter(data)
message = anyio.run(SubscribePacket.from_stream, stream)
(topic, qos) = message.payload.topics[0]
self.assertEqual(topic, 'a/b')
self.assertEqual(qos, QOS_1)
(topic, qos) = message.payload.topics[1]
self.assertEqual(topic, 'c/d')
self.assertEqual(qos, QOS_2)
def test_to_stream(self):
variable_header = PacketIdVariableHeader(10)
payload = SubscribePayload(
[
('a/b', QOS_1),
('c/d', QOS_2)
])
publish = SubscribePacket(variable_header=variable_header, payload=payload)
out = publish.to_bytes()
self.assertEqual(out, b'\x82\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02')
| 35.371429
| 83
| 0.671244
| 156
| 1,238
| 5.230769
| 0.410256
| 0.091912
| 0.051471
| 0.029412
| 0.144608
| 0.07598
| 0.07598
| 0.07598
| 0.07598
| 0.07598
| 0
| 0.051653
| 0.218094
| 1,238
| 34
| 84
| 36.411765
| 0.791322
| 0.067044
| 0
| 0
| 0
| 0.074074
| 0.090356
| 0.079931
| 0
| 0
| 0
| 0
| 0.185185
| 1
| 0.074074
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ace9182901a299fe90834f06095914657f35b9c
| 8,392
|
py
|
Python
|
examples/cmrc2018_example/main.trainer.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | 1,121
|
2020-03-02T02:24:00.000Z
|
2022-03-31T06:33:49.000Z
|
examples/cmrc2018_example/main.trainer.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | 85
|
2020-03-04T09:46:17.000Z
|
2022-03-30T09:33:35.000Z
|
examples/cmrc2018_example/main.trainer.py
|
fangd123/TextBrewer
|
866f4363d9bd964f00aa60b0db5e9252a7905448
|
[
"Apache-2.0"
] | 200
|
2020-03-02T07:23:21.000Z
|
2022-03-30T08:26:24.000Z
|
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from processing import convert_examples_to_features, read_squad_examples
from processing import ChineseFullTokenizer
from pytorch_pretrained_bert.my_modeling import BertConfig
from optimization import BERTAdam
import config
from utils import read_and_convert, divide_parameters
from modeling import BertForQASimple, BertForQASimpleAdaptorTraining
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from functools import partial
from train_eval import predict
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#read data
train_examples = None
train_features = None
eval_examples = None
eval_features = None
num_train_steps = None
tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
convert_fn = partial(convert_examples_to_features,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
if args.do_train:
train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
if args.fake_file_1:
fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples1
train_features += fake_features1
if args.fake_file_2:
fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples2
train_features += fake_features2
num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
#Build Model and load checkpoint
model_S = BertForQASimple(bert_config_S,args)
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distiller = BasicTrainer(train_config = train_config,
model = model_S,
adaptor = BertForQASimpleAdaptorTraining)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict,
eval_examples=eval_examples,
eval_features=eval_features,
args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_examples,eval_features,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
| 45.362162
| 130
| 0.674094
| 1,101
| 8,392
| 4.825613
| 0.214351
| 0.013552
| 0.020704
| 0.012422
| 0.214944
| 0.153962
| 0.130247
| 0.125353
| 0.118389
| 0.095615
| 0
| 0.005165
| 0.23868
| 8,392
| 184
| 131
| 45.608696
| 0.82642
| 0.018232
| 0
| 0.074324
| 0
| 0
| 0.064335
| 0.005849
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.013514
| false
| 0
| 0.101351
| 0
| 0.121622
| 0.006757
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad1153bc4951b73c09bcd9a5a044f2aeefb38fb
| 13,832
|
py
|
Python
|
gym/gym/benchmarks/__init__.py
|
youngwoon/DnC-RL-Tensorflow
|
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
|
[
"MIT"
] | 9
|
2019-02-01T22:45:57.000Z
|
2022-01-08T16:13:24.000Z
|
gym/gym/benchmarks/__init__.py
|
youngwoon/DnC-RL-Tensorflow
|
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
|
[
"MIT"
] | null | null | null |
gym/gym/benchmarks/__init__.py
|
youngwoon/DnC-RL-Tensorflow
|
02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f
|
[
"MIT"
] | 1
|
2020-04-07T20:09:48.000Z
|
2020-04-07T20:09:48.000Z
|
# EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
bandit_tasks = []
for n_arms in [5, 10, 50]:
for n_episodes in [10, 100, 500]:
bandit_tasks.append({
'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': n_episodes,
})
register_benchmark(
id='BernoulliBandit-v0',
name='BernoulliBandit',
description='Multi-armed Bernoulli bandits',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=bandit_tasks
)
tabular_mdp_tasks = []
for n_states in [10]:
for n_actions in [5]:
for episode_length in [10]:
for n_episodes in [10, 25, 50, 75, 100]:
tabular_mdp_tasks.append({
'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format(
s=n_states, a=n_actions, t=episode_length, n=n_episodes,
),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': episode_length * n_episodes * 2,
})
register_benchmark(
id='RandomTabularMDP-v0',
name='RandomTabularMDP',
description='Random tabular MDPs',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=tabular_mdp_tasks
)
| 28.286299
| 135
| 0.510049
| 1,307
| 13,832
| 5.193573
| 0.159908
| 0.041249
| 0.031821
| 0.08957
| 0.681791
| 0.613583
| 0.574396
| 0.478344
| 0.36152
| 0.315262
| 0
| 0.097037
| 0.343623
| 13,832
| 488
| 136
| 28.344262
| 0.650622
| 0.008603
| 0
| 0.558887
| 0
| 0
| 0.34289
| 0.049318
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004283
| 0
| 0.004283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad19946c7489c1b3a99e589e195e1b73244786f
| 9,538
|
py
|
Python
|
hypnettorch/data/timeseries/preprocess_audioset.py
|
pennfranc/hypnettorch
|
69d4c455028289ebe3d040af0955d909a9fef3ae
|
[
"Apache-2.0"
] | 31
|
2021-10-20T19:38:41.000Z
|
2022-03-28T08:23:32.000Z
|
hypnettorch/data/timeseries/preprocess_audioset.py
|
pennfranc/hypnettorch
|
69d4c455028289ebe3d040af0955d909a9fef3ae
|
[
"Apache-2.0"
] | 2
|
2022-02-14T08:25:43.000Z
|
2022-03-26T18:10:52.000Z
|
hypnettorch/data/timeseries/preprocess_audioset.py
|
pennfranc/hypnettorch
|
69d4c455028289ebe3d040af0955d909a9fef3ae
|
[
"Apache-2.0"
] | 5
|
2021-11-04T10:10:29.000Z
|
2022-03-21T09:00:22.000Z
|
#!/usr/bin/env python3
# Copyright 2020 Benjamin Ehret
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/preprocess_audioset.py
# author :be
# contact :[email protected]
# created :31/03/2020
# version :1.0
# python_version :3.7
"""
Script to structure the audioset dataset, which can then be used via
:class:`data.timeseries.audioset_data.AudiosetData`.
The result of this script is available at
https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0
If you want to recreate or modify this dataset, download the audioset data from
https://research.google.com/audioset/download.html
and extract the tar.gz into the following folder:
``datasets/sequential/audioset/audioset_download``.
Subsequently executing this script will create a pickle file containing the 100
class subset of audioset used in this study.
The dataset is stored in tensorflow files. Since we work with pytorch and there
is no utility to read tensorflow files, we extract the data and safe them as
numpy arrays in a pickle file.
Furthermore the data are preprocessed to fit our continual learning experiments.
The original dataset provides three subsets with different compositions of
samples and classes. Since we only work with a subset of classes and samples,
we load all available data and then filter and structure them according to our
criteria.
We use the same criteria as Kemker et al. Classes and samples are restricted in
the following way:
Classes:
- no restriction according to ontology file (parsed from ontology.json)
- no parent / child relationship (parsed from ontology.json)
- confidence level > 70% (data was copied from website into txt file)
- number of samples: we only take classes that have more samples than
a certain threshold
Samples:
- since samples can have multiple labels, we only use samples which
only belong to one of the classes we use
- we exclude samples that don't have the full length of 10 seconds
The chosen classes and samples are then split into train and test data and
saved to a pickle file.
"""
import numpy as np
import pickle
import tensorflow as tf
import os
import json
from warnings import warn
warn('The script was created for one time usage and has to be adapted when ' +
'reusing it. All paths specified here are absolute.')
# Tensorflow eager mode needs to be enabled for dataset mapping to work!
tf.enable_eager_execution()
# Set paths and parameters
data_dir = '../../datasets/sequential/audioset/'
download_dir = os.path.join(data_dir,'audioset_download')
fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv')
fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv')
fpath_ontology = os.path.join(data_dir, 'ontology.json')
target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle')
n_classes = 100
n_sample = 1000
test_frac = 0.20
### Load data by serializing files and applying decode function.
def decode(serialized_example):
"""Decode data from TFRecord files.
Args:
serialized_example: serialized_example as created by
tf.data.TFRecordDataset
Returns:
(tuple): Tuple containing:
- **audio** (numpy.ndarray): Array of shape (10,128) representing one
sample with 10 timesteps and 128 features
- **label** (numpy.ndarray): Array of shape (1,) containing the class
of the corresponding sample
"""
sequence_features = {
'audio_embedding': tf.FixedLenSequenceFeature([], tf.string),
}
context_features = {
'start_time_seconds': tf.FixedLenFeature([], tf.float32),
'labels': tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
sequence_features=sequence_features,
context_features=context_features
)
audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8)
label = tf.cast(context_parsed['labels'], tf.int64)
return audio, label
# Apply decode function to all dataset entries using map function.
# Take files from all three data sets since we repartition anyway.
fpaths = []
for path, subdirs, files in os.walk(download_dir):
for name in files:
if 'tfrecord' in name:
fpaths.append(os.path.join(path, name))
# Create dataset and decode
dataset = tf.data.TFRecordDataset(fpaths)
dataset = dataset.map(decode)
# Extract data to lists
x = []
y = []
for d in dataset:
x.append(d[0].numpy())
y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy())
### Filter classes as described above.
# Parse confidence values
conf_data = {}
with open(fpath_conf_data) as f:
for line in f:
tokens = line.split()
# parse confidence
c = 0
for t in tokens:
if t.find('%') is not -1:
c = int(t[:-1])
# parse class name
n = ''
for t in tokens:
if t.find('%') == -1 and t != '-':
if n == '':
n = t
else:
n = n+' '+t
else:
break
conf_data.update({n:c})
# Parse class numbers from label csv file
l = -1
csv_data = {}
with open(fpath_label_inds) as f:
for line in f:
if l == -1:
l += 1
continue
tokens = line.split('"')
n = tokens[1]
csv_data.update({n:l})
l +=1
# Parse ontology info from json file
with open(fpath_ontology, 'r') as f:
json_data = json.load(f)
# Put all data into a single list.
all_data = []
for j in json_data:
if j['name'] in conf_data.keys():
class_info = {
'name' : j['name'],
'restricted' : j['restrictions'] != [],
'has_child' : j['child_ids'] != [],
'conf' : conf_data[j['name']],
'id' : csv_data[j['name']]
}
all_data.append(class_info)
# Filter classes
classes = []
for c in all_data:
if not c['restricted'] and not c['has_child'] and c['conf'] >= 70:
classes.append(c['id'])
### Filter the samples.
# Find samples that belong to only one of the potential classes.
# We also exclude some samples that don't have data for the full 10 seconds.
# First discard labels that are not in the set of potential classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],classes))
# Find samples with one label
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that are shorter than 10 seconds (to be excluded)
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Count number of valid samples for potential classes
y_single = np.asarray([y_fil[i][0] for i in valid_idx])
num_samples = [len(np.where(y_single == i)[0]) for i in classes]
# Take the n classes with the highest number of samples
n_sample_cutoff = np.sort(num_samples)[-n_classes]
class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0]
our_classes = [classes[i] for i in class_idx]
### Filter the data again according the the chosen classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],our_classes))
# Find samples that belong to only one of the potential classes
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that dont are shorter than 10 seconds
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Restructure data and relabel the classes to be between 0 and n_classes
y_data = [y_fil[i][0] for i in valid_idx]
y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data]
y_data = np.asarray(y_data)
x_data = [x[i] for i in valid_idx]
x_data = np.stack(x_data)
### Split into test and train and restrict the number of samples per class
np.random.seed(42)
n_train = int(n_sample * (1-test_frac))
n_test = int(n_sample * test_frac)
train_ind = []
test_ind = []
for i in range(n_classes):
sample_idx = np.where(y_data == i)[0]
n_sample_class = len(sample_idx)
rand_idx = np.arange(n_sample_class)
np.random.shuffle(rand_idx)
train_ind.extend(sample_idx[rand_idx[0:n_train]])
test_ind.extend(sample_idx[rand_idx[n_train:n_sample]])
train_ind = np.asarray(train_ind)
test_ind = np.asarray(test_ind)
sub_sample_idx = np.hstack((train_ind,test_ind))
x_data_sub = x_data[sub_sample_idx,:,:]
y_data_sub = y_data[sub_sample_idx]
train_ind = np.arange(0,len(train_ind))
test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind))
### Save data
with open(target_path, 'wb') as f:
pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
| 32.889655
| 80
| 0.68463
| 1,478
| 9,538
| 4.29161
| 0.257781
| 0.014189
| 0.008513
| 0.011036
| 0.165379
| 0.12297
| 0.103106
| 0.097115
| 0.097115
| 0.090809
| 0
| 0.014708
| 0.215873
| 9,538
| 289
| 81
| 33.00346
| 0.8334
| 0.461732
| 0
| 0.145985
| 0
| 0
| 0.084313
| 0.01754
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007299
| false
| 0
| 0.043796
| 0
| 0.058394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad1bc3d3021f0317b2b318ccf03355bd2585dd4
| 13,844
|
py
|
Python
|
Posts/viewsAPI.py
|
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
|
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
|
[
"Apache-2.0"
] | 3
|
2021-12-11T13:43:56.000Z
|
2022-03-31T02:36:05.000Z
|
Posts/viewsAPI.py
|
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
|
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
|
[
"Apache-2.0"
] | 9
|
2021-10-01T22:46:57.000Z
|
2021-12-16T18:01:31.000Z
|
Posts/viewsAPI.py
|
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
|
63c0ba2a03f0b462e3673ce7a4bf6bae7999440c
|
[
"Apache-2.0"
] | 2
|
2021-12-16T16:37:10.000Z
|
2021-12-16T20:30:12.000Z
|
from django.conf import settings
from django.core import serializers
from django.utils import timezone
import requests
from Posts.commentModel import Comments
#from Posts.commentView import add_Comment
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from django.shortcuts import HttpResponse, render
from requests import get
from .serializers import CommentSerializer, PostSerializer
from Author.serializers import LikeSerializer
from Author.models import Like
from Author.views import updateForeignAuthors, GetForeignAuthors
from .models import Post, Author
from .form import PostForm
from Posts.commentForm import CommentForm
import json
import uuid
import re
import base64
from django.db.models import Q
import django.core
from permissions import CustomAuthentication, AccessPermission
from django.core.paginator import Paginator
import traceback
def newPost(request, uid=None, auth_pk=None):
form = PostForm(request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
descirption = form.cleaned_data['description']
categories = form.cleaned_data['categories'].split(' ')
visibility = form.cleaned_data['visibility']
unlisted = form.cleaned_data['unlisted']
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
source = settings.SERVER_URL + "/"
origin = settings.SERVER_URL + "/"
author_id = Author.objects.get(pk=auth_pk)
id = author_id.url
author = json.loads(serializers.serialize('json', Author.objects.filter(pk=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
id = id + '/posts/' + uid + "/"
comments_id = id + "comments/"
published = timezone.now()
posts = Post(pk=uid, id=id, author_id=author_id, author=author, title=title, source=source, origin=origin, description=descirption, contentType=contentType, count=0, size=10, categories=categories,visibility=visibility, unlisted=unlisted, published=published, content=content, comments=comments_id)
posts.save()
return True
else:
print(request.data)
print(form.errors)
print(form.data)
return False
def add_Comment(request, post_pk, auth_pk, uid=None):
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
updateForeignAuthors()
published = timezone.now()
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
author_id = json.loads(serializers.serialize('json', Author.objects.filter(email=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
post = Post.objects.get(pk = post_pk)
post_pk_str = post_pk
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
comment_id = getattr(post, 'comments') + uid
comments = Comments(pk=uid, id=comment_id, Post_pk=post, Post_pk_str = post_pk_str, auth_pk_str = auth_pk, author=author_id, size=10, published=published, contentType=contentType, content=content)
comments.save()
return True
else:
print(request.data)
return False
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostLikesView(request, post_pk, auth_pk):
post = Post.objects.get(post_pk = post_pk)
author = Author.objects.get(pk = auth_pk)
likeObjs = Like.objects.filter(~Q(auth_pk = author), object = post.id)
Likes = LikeSerializer(likeObjs, read_only=True, many=True)
likes = []
for l in Likes.data:
like = {}
for key in l:
if(key != "context"):
like[key] = l[key]
like["@context"] = l["context"]
like["author"] = json.loads(django.core.serializers.serialize('json', Author.objects.filter(id=l["author"]), fields=('type', 'id', 'displayName', 'host', 'url', 'github',)))[0]['fields']
likes.append(like)
response_dict = {
"type": "likes",
"items": likes
}
return Response(response_dict)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostsList(request, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
if auth_pk:
try:
author = Author.objects.get(auth_pk=auth_pk)
posts = Post.objects.filter(author_id=author, id__icontains = "linkedspace")
code = status.HTTP_200_OK
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
except Exception as e:
print(e)
data = {}
code = status.HTTP_400_BAD_REQUEST
else:
code = status.HTTP_200_OK
posts = Post.objects.filter(id__icontains = "linkedspace")
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
elif request.method == 'POST':
if newPost(request, auth_pk=request.data['auth_pk']):
code = status.HTTP_201_CREATED
post = Post.objects.latest("published")
data = PostSerializer(post).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentListView(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
comments = Comments.objects.filter(Post_pk_str=post_pk)
post = Post.objects.get(pk=post_pk)
post_id = getattr(post, 'id')
comment_id = getattr(post, 'comments')
paginator = Paginator(comments, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
response_dict = {
"type": "comments",
"page": page_number,
"size": page_size,
"post": post_id,
"id": comment_id,
"comments": serializer.data,
}
return Response(response_dict)
elif request.method == 'POST':
if add_Comment(request, post_pk=request.data['Post_pk'], auth_pk=request.data['auth_pk']):
code = status.HTTP_202_ACCEPTED
comment = Comments.objects.latest("published")
data = CommentSerializer(comment).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST', 'PUT', 'DELETE', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostDetail(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
if 'title' in request.data.keys():
post.title = request.data['title']
if 'description' in request.data.keys():
post.description = request.data['description']
if 'categories' in request.data.keys():
post.categories = request.data['categories'].split(' ')
if 'visibility' in request.data.keys():
post.visibility = request.data['visibility']
if 'unlisted' in request.data.keys():
post.unlisted = request.data['unlisted']
if 'contentType' in request.data.keys():
post.contentType = request.data['contentType']
if post.contentType == "application/app":
post.content = request.FILES['file'].read() #Inputfile
elif post.contentType in ["image/png", "image/jpeg",]:
post.content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
post.content = request.data["text"]
post.save()
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'PUT':
try:
code = status.HTTP_201_CREATED
assert newPost(request, post_pk, request.data['auth_pk'])==True
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'DELETE':
try:
post = Post.objects.get(post_pk=post_pk)
post.delete()
code = status.HTTP_200_OK
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET', 'POST', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentDetail(request, post_pk, comment_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
if 'contentType' in request.data.keys():
comment.contentType = request.data['contentType']
if 'text' in request.data.keys():
comment.content = request.data['text']
comment.save()
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET',])
def connection(request, auth_id=None):
data = []
team3 = get('https://social-dis.herokuapp.com/posts', auth=('socialdistribution_t03','c404t03'))
if team3.status_code == 200:
data.append(team3.json())
team15 = get('https://unhindled.herokuapp.com/service/allposts/', auth=('connectionsuperuser','404connection'))
if team15.status_code == 200:
data.append(team15.json())
team17 = get('https://cmput404f21t17.herokuapp.com/service/connect/public/', auth=('4cbe2def-feaa-4bb7-bce5-09490ebfd71a','123456'))
if team17.status_code == 200:
data.append(team17.json())
return Response({'connection': data})
| 38.455556
| 306
| 0.621063
| 1,571
| 13,844
| 5.318269
| 0.126034
| 0.020108
| 0.031837
| 0.016158
| 0.598444
| 0.523639
| 0.495871
| 0.464512
| 0.429084
| 0.413645
| 0
| 0.014097
| 0.262135
| 13,844
| 359
| 307
| 38.562674
| 0.803818
| 0.006862
| 0
| 0.572816
| 0
| 0
| 0.075904
| 0.004221
| 0
| 0
| 0
| 0
| 0.003236
| 1
| 0.02589
| false
| 0
| 0.084142
| 0
| 0.145631
| 0.035599
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad1ee45a7daa21c8e394ff77552f61ad841514d
| 3,753
|
py
|
Python
|
workers/tests/test_array_element.py
|
Open-EO/openeo-sentinelhub-python-driver
|
92f990f098065ffb658eba6dca291dd1d5fc70f2
|
[
"Apache-2.0"
] | 2
|
2019-12-03T12:49:47.000Z
|
2020-10-25T20:14:39.000Z
|
workers/tests/test_array_element.py
|
Open-EO/openeo-sentinelhub-python-driver
|
92f990f098065ffb658eba6dca291dd1d5fc70f2
|
[
"Apache-2.0"
] | 5
|
2019-12-03T10:32:48.000Z
|
2020-10-09T13:07:39.000Z
|
workers/tests/test_array_element.py
|
Open-EO/openeo-sentinelhub-python-driver
|
92f990f098065ffb658eba6dca291dd1d5fc70f2
|
[
"Apache-2.0"
] | 4
|
2020-03-06T14:51:52.000Z
|
2020-11-24T10:30:18.000Z
|
import pytest
import sys, os
import xarray as xr
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import process
from process._common import ProcessArgumentInvalid, ProcessArgumentRequired
@pytest.fixture
def generate_data():
def _construct(
data = [[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]],
dims = ('t','y','x','band'),
reduce_by = "band",
as_list = False
):
if as_list:
return data
xrdata = xr.DataArray(
data,
dims=dims,
attrs={'reduce_by': [reduce_by]},
)
return xrdata
return _construct
@pytest.fixture
def execute_array_element_process(generate_data):
def wrapped(data_arguments={}, index=None, return_nodata=None):
arguments = {}
if data_arguments is not None: arguments["data"] = generate_data(**data_arguments)
if index is not None: arguments["index"] = index
if return_nodata is not None: arguments["return_nodata"] = return_nodata
return process.array_element.array_elementEOTask(None, "" , None, {}, "arrayel1").process(arguments)
return wrapped
###################################
# tests:
###################################
@pytest.mark.parametrize('data,return_nodata,index,expected_result', [
([9,8,7,6,5], None, 2, 7),
(["A","B","C"], None, 0, "A"),
([], True, 0, None)
])
def test_examples(execute_array_element_process, data, index, return_nodata, expected_result):
"""
Test array_element process with examples from https://open-eo.github.io/openeo-api/processreference/#array_element
"""
data_arguments = {"data": data, "as_list": True}
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=return_nodata)
assert result == expected_result
@pytest.mark.parametrize('data,index,reduce_by,expected_data,expected_dims', [
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 0, "band", [[[0.1, 0.15], [0.05, -0.9]]], ('t','y','x')),
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 1, "y", [[[0.05, 0.1], [-0.9, 0.05]]], ('t','x','band')),
])
def test_with_xarray(execute_array_element_process, generate_data, data, index, reduce_by, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays
"""
expected_result = generate_data(data=expected_data, dims=expected_dims, reduce_by=reduce_by)
result = execute_array_element_process(data_arguments={"data": data, "reduce_by": reduce_by}, index=index)
xr.testing.assert_allclose(result, expected_result)
def test_with_xarray_out_bounds(execute_array_element_process, generate_data):
"""
Test array_element process with xarray.DataArrays with out of bounds index
"""
with pytest.raises(ProcessArgumentInvalid) as ex:
result = execute_array_element_process(index=5)
assert ex.value.args[0] == "The argument 'index' in process 'array_element' is invalid: Index out of bounds."
@pytest.mark.parametrize('data_arguments,index,expected_data,expected_dims', [
({}, 5, [[[np.nan, np.nan], [np.nan, np.nan]]], ('t','y','x')),
])
def test_with_xarray_out_bounds_return_nodata(execute_array_element_process, generate_data, data_arguments, index, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays with out of bounds index and return_no_data
"""
expected_result = generate_data(expected_data, dims=expected_dims)
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=True)
xr.testing.assert_equal(result, expected_result)
| 40.354839
| 145
| 0.662137
| 509
| 3,753
| 4.650295
| 0.194499
| 0.081115
| 0.104352
| 0.098859
| 0.434728
| 0.381073
| 0.291508
| 0.234052
| 0.184622
| 0.184622
| 0
| 0.030547
| 0.17133
| 3,753
| 92
| 146
| 40.793478
| 0.730547
| 0.090861
| 0
| 0.081967
| 0
| 0
| 0.095253
| 0.041654
| 0
| 0
| 0
| 0
| 0.065574
| 1
| 0.131148
| false
| 0
| 0.098361
| 0
| 0.311475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad263d1cb0c4c04603f5f92c314ea18d8d73526
| 1,681
|
py
|
Python
|
python/ray/autoscaler/tags.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
python/ray/autoscaler/tags.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
python/ray/autoscaler/tags.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
"""The Ray autoscaler uses tags/labels to associate metadata with instances."""
# Tag for the name of the node
TAG_RAY_NODE_NAME = "ray-node-name"
# Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag
# value says 'type' instead of 'kind'.
TAG_RAY_NODE_KIND = "ray-node-type"
NODE_KIND_HEAD = "head"
NODE_KIND_WORKER = "worker"
NODE_KIND_UNMANAGED = "unmanaged"
# Tag for user defined node types (e.g., m4xl_spot). This is used for multi
# node type clusters.
TAG_RAY_USER_NODE_TYPE = "ray-user-node-type"
# Tag for autofilled node types for legacy cluster yamls without multi
# node type defined in the cluster configs.
NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type"
NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type"
# Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS = "ray-node-status"
STATUS_UNINITIALIZED = "uninitialized"
STATUS_WAITING_FOR_SSH = "waiting-for-ssh"
STATUS_SYNCING_FILES = "syncing-files"
STATUS_SETTING_UP = "setting-up"
STATUS_UPDATE_FAILED = "update-failed"
STATUS_UP_TO_DATE = "up-to-date"
# Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME = "ray-cluster-name"
# Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG = "ray-launch-config"
# Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config"
# Hash of the contents of the directories specified by the file_mounts config
# if the node is a worker, this also hashes content of the directories
# specified by the cluster_synced_files config
TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
| 40.02381
| 79
| 0.781678
| 279
| 1,681
| 4.512545
| 0.308244
| 0.057188
| 0.028594
| 0.023828
| 0.047657
| 0.047657
| 0
| 0
| 0
| 0
| 0
| 0.000686
| 0.132659
| 1,681
| 41
| 80
| 41
| 0.862826
| 0.518739
| 0
| 0
| 0
| 0
| 0.353165
| 0.096203
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad69b4670dd35b6830ae32d5cbb71d9e32dff45
| 1,427
|
py
|
Python
|
tests/test_models/test_components/test_discriminators/test_light_cnn.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | null | null | null |
tests/test_models/test_components/test_discriminators/test_light_cnn.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | 1
|
2021-08-05T16:20:39.000Z
|
2021-08-05T16:20:39.000Z
|
tests/test_models/test_components/test_discriminators/test_light_cnn.py
|
ChenShuwei1001/mmediting
|
285e629fe9da8a13c7538a6bb3347e8870cd7201
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import torch
from mmedit.models.builder import build_component
from mmedit.models.components.discriminators.light_cnn import MaxFeature
def test_max_feature():
# cpu
conv2d = MaxFeature(16, 16, filter_type='conv2d')
x1 = torch.rand(3, 16, 16, 16)
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
linear = MaxFeature(16, 16, filter_type='linear')
x2 = torch.rand(3, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# gpu
if torch.cuda.is_available():
x1 = x1.cuda()
x2 = x2.cuda()
conv2d = conv2d.cuda()
linear = linear.cuda()
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# filter_type should be conv2d or linear
with pytest.raises(ValueError):
MaxFeature(12, 12, filter_type='conv1d')
def test_light_cnn():
cfg = dict(type='LightCNN', in_channels=3)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
inputs = torch.rand((2, 3, 128, 128))
output = net(inputs)
assert output.shape == (2, 1)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(inputs.cuda())
assert output.shape == (2, 1)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
| 27.980392
| 72
| 0.613174
| 196
| 1,427
| 4.377551
| 0.316327
| 0.037296
| 0.037296
| 0.024476
| 0.356643
| 0.191142
| 0.132867
| 0.132867
| 0.132867
| 0.06993
| 0
| 0.077286
| 0.256482
| 1,427
| 50
| 73
| 28.54
| 0.731385
| 0.060967
| 0
| 0.368421
| 0
| 0
| 0.019505
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8ad728c2bc84ac4630b400804d13c8940597431e
| 4,727
|
py
|
Python
|
src/consumer.py
|
ssichynskyi/web_metrics_posting
|
26f104d2fdf31c2d029bac5a4d5337db42df86f5
|
[
"MIT"
] | null | null | null |
src/consumer.py
|
ssichynskyi/web_metrics_posting
|
26f104d2fdf31c2d029bac5a4d5337db42df86f5
|
[
"MIT"
] | null | null | null |
src/consumer.py
|
ssichynskyi/web_metrics_posting
|
26f104d2fdf31c2d029bac5a4d5337db42df86f5
|
[
"MIT"
] | null | null | null |
import json
import logging
from typing import Iterable
from kafka import KafkaConsumer
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# I've used this example:
# https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py
# as well as Aiven Kafka tutorials
class Consumer:
GROUP_ID = 'web_metrics_consumer'
CLIENT_ID = 'website-monitoring-consumer-service'
def __init__(
self,
*topics,
**connection_kwargs
):
"""Class for creating Kafka consumer.
Args:
*topics - topics to subscribe to. Could be changed during lifetime, str
**connection_kwargs - keyword arguments as taken by KafkaConsumer
below there are some useful kwargs and their default value:
'bootstrap_servers' - uri with port for the service
'security_protocol' - SSL, SASL_PLAINTEXT, etc
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None
Note:
although all params are optional, at least
'sasl_plain_username' and 'sasl_plain_password'
or
'ssl_cafile', 'ssl_certfile' and 'ssl_keyfile
or other certificate-related inputs shall be defined
Usage:
Connection is activated not on object instantiation but
when entering with statement. e.g.:
consumer = Consumer(...)
with consumer:
consumer.send(...)
"""
self._topics = topics
self._connection_data = connection_kwargs
# auto-determine security protocol if not provided
try:
self._connection_data['security_protocol']
except KeyError:
username_given = 'sasl_plain_username' in self._connection_data.keys()
password_given = 'sasl_plain_password' in self._connection_data.keys()
ca_file_given = 'ssl_cafile' in self._connection_data.keys()
service_cert_given = 'ssl_certfile' in self._connection_data.keys()
service_key_given = 'ssl_keyfile' in self._connection_data.keys()
if all((ca_file_given, service_cert_given, service_key_given)):
self._connection_data['security_protocol'] = 'SSL'
elif username_given and password_given:
self._connection_data['security_protocol'] = 'SASL_PLAINTEXT'
else:
msg = 'Security protocol not provided and cannot be determined automatically.'
msg = f'{msg} Check auth kwargs'
raise ValueError(msg)
self._client_id = f'{self.CLIENT_ID}:{id(self)}'
def __enter__(self):
"""Method which creates the connection. Activated inside with statement."""
self._consumer = KafkaConsumer(
*self._topics,
**self._connection_data,
auto_offset_reset='earliest',
enable_auto_commit=False,
client_id=self._client_id,
group_id=self.GROUP_ID,
consumer_timeout_ms=1000,
value_deserializer=lambda x: json.loads(x.decode("utf-8"))
)
log.info(f'Connected to kafka broker at: {self._consumer.config["bootstrap_servers"]}')
def fetch_latest(self):
"""Fetches only not read messages by members of this group.
Returns:
list of decoded message values
"""
self._consumer.poll()
messages = list()
for message in self._consumer:
messages.append(message.value)
log.info(
f'Fetched {len(messages)} messages from {self._consumer.config["bootstrap_servers"]}'
)
self._consumer.commit()
return messages
def change_topics(self, topics: Iterable) -> None:
"""Changes Kafka consumer topic statically or dynamically
Args:
topics: any iterable: set, list, tuple
Returns:
None
"""
topics = tuple(topics)
try:
self._consumer.unsubscribe()
self._consumer.subscribe(list(topics))
except AttributeError:
# when topics are changed in inactive consumer i.e. not inside `with` statement
self._topics = topics
def __exit__(self, exc_type, exc_value, traceback):
"""Actions to perform when exiting with statement."""
log.info(
f'Closed connection tp kafka broker at: {self._consumer.config["bootstrap_servers"]}'
)
| 36.643411
| 97
| 0.610324
| 515
| 4,727
| 5.363107
| 0.401942
| 0.050688
| 0.06517
| 0.036206
| 0.135409
| 0.084721
| 0.034033
| 0.034033
| 0
| 0
| 0
| 0.001525
| 0.306325
| 4,727
| 128
| 98
| 36.929688
| 0.840805
| 0.338058
| 0
| 0.090909
| 0
| 0
| 0.199576
| 0.068527
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0.030303
| 0.060606
| 0
| 0.19697
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|