hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b9e62db340ea51b4cda5971027dcd23a1f17c3d
| 3,704
|
py
|
Python
|
superset_config.py
|
mikiec84/incubator-superset
|
3a1c32ae2378902a26873113d98bd55d290233ca
|
[
"Apache-2.0"
] | 1
|
2020-08-07T16:30:54.000Z
|
2020-08-07T16:30:54.000Z
|
superset_config.py
|
mikiec84/incubator-superset
|
3a1c32ae2378902a26873113d98bd55d290233ca
|
[
"Apache-2.0"
] | null | null | null |
superset_config.py
|
mikiec84/incubator-superset
|
3a1c32ae2378902a26873113d98bd55d290233ca
|
[
"Apache-2.0"
] | 1
|
2020-08-07T16:30:58.000Z
|
2020-08-07T16:30:58.000Z
|
#---------------------------------------------------------
# Superset specific config
#---------------------------------------------------------
ROW_LIMIT = 5000
SUPERSET_WEBSERVER_PORT = 8088
#---------------------------------------------------------
#---------------------------------------------------------
# Flask App Builder configuration
#---------------------------------------------------------
# Your App secret key
SECRET_KEY = '\2\1ulan123456\1\2\e\y\y\h'
# The SQLAlchemy connection string to your database backend
# This connection defines the path to the database that stores your
# superset metadata (slices, connections, tables, dashboards, ...).
# Note that the connection information to connect to the datasources
# you want to explore are managed directly in the web UI
#SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://superset:superset@localhost:5432/superset'
# ------------------------------
# GLOBALS FOR APP Builder
# ------------------------------
# Uncomment to setup Your App name
APP_NAME = 'Insights'
# Uncomment to setup an App icon
APP_ICON = '/static/assets/images/qmatic_insights-logo.png'
# Extract and use X-Forwarded-For/X-Forwarded-Proto headers?
ENABLE_PROXY_FIX = True
ENABLE_JAVASCRIPT_CONTROLS = True
'''
import os
from flask_appbuilder.security.manager import AUTH_OID, AUTH_REMOTE_USER, AUTH_DB, AUTH_LDAP, AUTH_OAUTH
basedir = os.path.abspath(os.path.dirname(__file__))
SUPERSET_WORKERS = 8
CSRF_ENABLED = True
AUTH_TYPE = AUTH_OAUTH
AUTH_USER_REGISTRATION = False
AUTH_USER_REGISTRATION_ROLE = "Gamma" #"Public"
OAUTH_PROVIDERS = [
{
'name': 'google',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
'base_url': 'https://www.googleapis.com/oauth2/v2/',
'request_token_params': {
'scope': 'email profile'
},
'request_token_url': None,
'access_token_url': 'https://accounts.google.com/o/oauth2/token',
'authorize_url': 'https://accounts.google.com/o/oauth2/auth',
'consumer_key': '996225546131-1qd2alfrrp1scf6gvkeg63mg2ku85lka.apps.googleusercontent.com',
'consumer_secret': '3fxwT-a8YA1akyuUYFfakMCz'
}
},
{
'name': 'slatest.qmaticcloud.com',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
#'base_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/',
'base_url': None,
'request_token_params': {
'scope': 'user_info',
'state': '123'
},
'request_token_url': None,
'access_token_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/token',
'authorize_url': 'https://slatest.qmaticcloud.com/oauth2server/oauth/authorize',
'consumer_key': 'businessintelligence',
'consumer_secret': 'fSmI0K1uSvnORBk3'
}
},
{
'name': 'msdemo.qmatic.cloud',
'icon': 'fa-google',
'token_key': 'access_token',
'remote_app': {
'base_url': None,
'request_token_params': {
'scope': 'user_info',
'state': '123'
},
'request_token_url': None,
'access_token_url': 'https://msdemo.qmatic.cloud/oauth2server/oauth/token',
'authorize_url': 'https://msdemo.qmatic.cloud/oauth2server/oauth/authorize',
'consumer_key': 'businessintelligence',
'consumer_secret': 'fSmI0K1uSvnORBk3'
}
}
]
'''
| 35.615385
| 107
| 0.551836
| 352
| 3,704
| 5.590909
| 0.426136
| 0.03252
| 0.042683
| 0.025915
| 0.396341
| 0.396341
| 0.382114
| 0.270325
| 0.251016
| 0.162602
| 0
| 0.024669
| 0.24487
| 3,704
| 103
| 108
| 35.961165
| 0.678942
| 0.262959
| 0
| 0
| 0
| 0
| 0.307692
| 0.276923
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b9ee2812f3c3d983291b0a7f5a83dcf6f853ee4
| 5,038
|
py
|
Python
|
python_code/yolo/extract_car_num.py
|
mukulbhave/tensorflow
|
848b16fa32cd0f180ab80a98254edd2147ea3948
|
[
"CNRI-Python"
] | null | null | null |
python_code/yolo/extract_car_num.py
|
mukulbhave/tensorflow
|
848b16fa32cd0f180ab80a98254edd2147ea3948
|
[
"CNRI-Python"
] | null | null | null |
python_code/yolo/extract_car_num.py
|
mukulbhave/tensorflow
|
848b16fa32cd0f180ab80a98254edd2147ea3948
|
[
"CNRI-Python"
] | null | null | null |
import argparse
import cv2
import re
import numpy as np
import string
import PIL
import os,glob
import ntpath
import time
import matplotlib.pyplot as plt
from PIL import Image
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
from retrain_yolo import (create_model,get_classes)
import keras.backend as K
from crnn.train_crnn import create_crnn_model
from crnn.crnn_data_gen import *
char_list = string.ascii_letters+string.digits
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
class_names=['plate','no-plate']
class CarNumberDetector:
def __init__(self,viden_yolo_weights_path,viden_crnn_weights_path,classes_path,out_path):
self.act_model = create_crnn_model(train=False)
self.act_model.load_weights(viden_crnn_weights_path)# 'viden_trained_models\\viden_crnn_14May2021.hdf5')
class_names = get_classes(classes_path)
#print(class_names)
self.out_path=out_path
#self.anchors = YOLO_ANCHORS
self.yolo_model_body, self.yolo_model = create_model(YOLO_ANCHORS, class_names,load_pretrained=False,freeze_body=False)
self.yolo_model_body.load_weights(viden_yolo_weights_path)#'viden_trained_models\\viden_yolo_14May2021.h5')
self.yolo_outputs = yolo_head(self.yolo_model_body.output, YOLO_ANCHORS, len(class_names))
self.yolo_input_image_shape = K.placeholder(shape=(2, ))
self.boxes, self.scores, self.classes = yolo_eval(
self.yolo_outputs, self.yolo_input_image_shape,max_boxes=1, score_threshold=.7, iou_threshold=0.5)
def extract_number(self,orig_image_url=None,image_array=None,save=False):
"""
This is the primary method to detect number plate on car and fetch the number.
image_array is the numpy array representing original car image
method returns numpy array of image with bounding box ad the extracted car_number string
"""
if( image_array is None and orig_image_url is None):
raise ValueError("image array or url is required")
if(orig_image_url is not None):
image = PIL.Image.open(orig_image_url)
else:
image = PIL.Image.fromarray(image_array)
pred_boxes,pred_box_classes,img_with_boxes = self.get_bounding_boxes(image)
pred_txt=''
for i, box in list(enumerate(pred_boxes)):
box_class = class_names[pred_box_classes[i]]
top, left, bottom, right = box
pred_obj_img = image.crop((left,top,right,bottom))
pred_txt=self.get_text(pred_obj_img)
# Save the image:
if save:
time_param= int(round(time.time() * 1000))
orig_img_name = self.out_path+pred_txt+"-"+str(time_param)+".jpg"
orig_image = PIL.Image.fromarray(img_with_boxes)
orig_image.save(orig_img_name)
return (orig_image,pred_txt)
def get_text(self,pil_image):
img = pil_image.resize((128, 32), Image.BICUBIC)
img = np.array(img) /255;
img = np.sum(img, axis=2,keepdims=True)
img = np.expand_dims(img , axis = 0)
prediction = self.act_model.predict(img)
# use CTC decoder
out = K.get_value(K.ctc_decode(prediction, input_length=np.ones(prediction.shape[0])*prediction.shape[1],greedy=False)[0][0])
x = out[0]
le= min(10,out.shape[1])
s=''
for x in out:
for p in range(0, le):
if int(x[p]) != -1:
s += char_list[int(x[p])]
return s
def get_bounding_boxes(self,image):
image_shape = (416, 416)
resized_image =image.resize(tuple(image_shape), PIL.Image.BICUBIC)
image_data = np.array(resized_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
sess = K.get_session()
out_boxes, out_scores, out_classes = sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model_body.input: image_data,
self.yolo_input_image_shape: [image_data.shape[1], image_data.shape[2]],
K.learning_phase(): 0
})
# Convert pred on 416 to actual image size
resized_boxes = out_boxes/416
w,h = image.size
box_resize_dim = [h,w,h,w]
resized_boxes = resized_boxes * box_resize_dim
orig_image_data = np.array(image, dtype='float32')
orig_image_with_boxes = draw_boxes(orig_image_data, resized_boxes, out_classes, class_names, out_scores,"rand")
return resized_boxes,out_classes,orig_image_with_boxes
| 39.984127
| 133
| 0.639936
| 699
| 5,038
| 4.336195
| 0.288984
| 0.032662
| 0.021445
| 0.022435
| 0.076212
| 0.04223
| 0
| 0
| 0
| 0
| 0
| 0.035107
| 0.264986
| 5,038
| 126
| 134
| 39.984127
| 0.783419
| 0.089123
| 0
| 0
| 0
| 0
| 0.014525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043956
| false
| 0
| 0.186813
| 0
| 0.274725
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ba04308181ebd07871e89cce3a567b034f969f9
| 2,881
|
py
|
Python
|
examples/time_frequency/plot_tfr_topography.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 2
|
2015-09-27T20:33:49.000Z
|
2020-04-22T19:10:56.000Z
|
examples/time_frequency/plot_tfr_topography.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | null | null | null |
examples/time_frequency/plot_tfr_topography.py
|
Anevar/mne-python
|
15b19ed6b9364ae4787f0df2fd7e689b3c0a30bb
|
[
"BSD-3-Clause"
] | 1
|
2018-09-15T09:45:38.000Z
|
2018-09-15T09:45:38.000Z
|
"""
===================================================================
Plot time-frequency representations on topographies for MEG sensors
===================================================================
Both induced power and phase locking values are displayed.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import fiff
from mne.time_frequency import induced_power
from mne.viz import plot_topo_power, plot_topo_phase_lock
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = fiff.Raw(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = fiff.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data() # as 3D matrix
layout = mne.find_layout(epochs.info, 'meg')
###############################################################################
# Calculate power and phase locking value
frequencies = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = frequencies / float(7) # different number of cycle per frequency
Fs = raw.info['sfreq'] # sampling in Hz
decim = 3
power, phase_lock = induced_power(data, Fs=Fs, frequencies=frequencies,
n_cycles=n_cycles, n_jobs=1, use_fft=False,
decim=decim, zero_mean=True)
###############################################################################
# Prepare topography plots, set baseline correction parameters
baseline = (None, 0) # set the baseline for induced power
mode = 'ratio' # set mode for baseline rescaling
###############################################################################
# Show topography of power.
title = 'Induced power - MNE sample data'
plot_topo_power(epochs, power, frequencies, layout, baseline=baseline,
mode=mode, decim=decim, vmin=0., vmax=14, title=title)
plt.show()
###############################################################################
# Show topography of phase locking value (PLV)
mode = None # no baseline rescaling for PLV
title = 'Phase locking value - MNE sample data'
plot_topo_phase_lock(epochs, phase_lock, frequencies, layout,
baseline=baseline, mode=mode, decim=decim, title=title)
plt.show()
| 35.567901
| 79
| 0.591461
| 352
| 2,881
| 4.71875
| 0.414773
| 0.036123
| 0.030704
| 0.024082
| 0.131246
| 0.10596
| 0.10596
| 0.10596
| 0
| 0
| 0
| 0.015365
| 0.164179
| 2,881
| 80
| 80
| 36.0125
| 0.674419
| 0.289483
| 0
| 0.05
| 0
| 0
| 0.104338
| 0.041032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.175
| 0
| 0.175
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ba39b6087c75616c2877cb61a2b0736b03e97e4
| 2,201
|
py
|
Python
|
A1/greenHouseBluetooth.py
|
rmit-s3559384-andrew-alvaro/IoT
|
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
|
[
"MIT"
] | null | null | null |
A1/greenHouseBluetooth.py
|
rmit-s3559384-andrew-alvaro/IoT
|
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
|
[
"MIT"
] | 1
|
2021-06-01T23:39:58.000Z
|
2021-06-01T23:39:58.000Z
|
A1/greenHouseBluetooth.py
|
AndrewAlvaro/IoT
|
ec444d0b037ddbd2e3aab01c34ea57fd2bd51d5f
|
[
"MIT"
] | null | null | null |
import bluetooth
import sys, os
import subprocess as sp
import datetime
from pushBulletForBluetooth import pushNotification
from makeReminderforBluetooth import Reminder
import csv
class blueDev:
def findmyDevice(self):
sendPushBullet = pushNotification()
timestamp = datetime.datetime.now().strftime('%d/%m/%Y')
nearby_devices = bluetooth.discover_devices(lookup_names = True)
if nearby_devices is not None:
print("Scanned device:")
for addr, name in nearby_devices:
devices = (addr.split("(")[-1])
print(devices)
else:
print("No device available")
print()
paired = sp.Popen(["bt-device", "--list"], stdin = sp.PIPE, stdout = sp.PIPE, close_fds = True)
(stdout, stdin) = (paired.stdout, paired.stdin)
list_of_paired_devices = stdout.readlines()
list_of_paired_devices.pop(0)
print("Matching devices...")
for paired_device in list_of_paired_devices:
pairedString = paired_device.decode()
pairedSplit = pairedString.split("(")[-1]
pairedDevice = pairedSplit[0:-2]
for devices, name in nearby_devices:
if pairedDevice == devices:
print(devices, "=", pairedDevice)
with open('bluetoothReminder.csv', 'r') as csvfile:
readCSV = csv.reader(csvfile)
for row in readCSV:
if row[0] != timestamp:
print("Device matched!")
sendPushBullet.send()
else:
print("Device matched! Notification has already been sent today.")
else:
print(devices, "!=", pairedDevice)
print("Device not matched...")
def main():
bluetooth = blueDev()
reminder = Reminder()
reminder.makeReminder()
bluetooth.findmyDevice()
if __name__ == "__main__":
main()
| 31
| 103
| 0.52567
| 195
| 2,201
| 5.8
| 0.435897
| 0.045977
| 0.03183
| 0.050398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004402
| 0.380736
| 2,201
| 70
| 104
| 31.442857
| 0.825385
| 0
| 0
| 0.06
| 0
| 0
| 0.092727
| 0.009545
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.14
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ba509cf1a05cf33bf195b861b6306f41e7b81ea
| 954
|
py
|
Python
|
myfitnesspal_to_sqlite/cli.py
|
seeM/myfitnesspal-to-sqlite
|
ce4c133009cbeacd5fa5410016f81f5eb45e7a64
|
[
"Apache-2.0"
] | 4
|
2021-07-14T17:31:40.000Z
|
2021-12-03T21:50:09.000Z
|
myfitnesspal_to_sqlite/cli.py
|
seeM/myfitnesspal-to-sqlite
|
ce4c133009cbeacd5fa5410016f81f5eb45e7a64
|
[
"Apache-2.0"
] | null | null | null |
myfitnesspal_to_sqlite/cli.py
|
seeM/myfitnesspal-to-sqlite
|
ce4c133009cbeacd5fa5410016f81f5eb45e7a64
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import myfitnesspal
import sqlite_utils
import click
from . import utils
@click.group()
@click.version_option()
def cli():
"Save data from MyFitnessPal to a SQLite database"
@cli.command()
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument(
"user",
type=str,
required=True,
)
@click.argument(
"date",
type=str,
required=True,
)
@click.option(
"--measurement",
multiple=True,
required=True,
)
def diary(db_path, user, date, measurement):
"Save food, exercise, goal, and measurement entries for a given user and date"
date = datetime.fromisoformat(date).date()
db = sqlite_utils.Database(db_path)
client = myfitnesspal.Client(user)
diary_entry = utils.fetch_diary_entry(date, client, measurement)
utils.save_diary_entry(db, diary_entry)
utils.ensure_db_shape(db)
| 21.2
| 82
| 0.705451
| 126
| 954
| 5.206349
| 0.388889
| 0.073171
| 0.077744
| 0.07622
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178197
| 954
| 44
| 83
| 21.681818
| 0.836735
| 0.131027
| 0
| 0.236842
| 0
| 0
| 0.159329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4ba90d216dd9521bb1b314598a55d371117b4821
| 8,392
|
py
|
Python
|
alipay/aop/api/domain/CircleRecommendItemDTO.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/CircleRecommendItemDTO.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/CircleRecommendItemDTO.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AoiInfoDTO import AoiInfoDTO
from alipay.aop.api.domain.ItemStoreDTO import ItemStoreDTO
class CircleRecommendItemDTO(object):
def __init__(self):
self._aoi_info = None
self._discount = None
self._item_cover = None
self._item_detail_url = None
self._item_id = None
self._item_label = None
self._item_name = None
self._item_store = None
self._original_price = None
self._sales_info = None
self._saved_money = None
self._saved_money_info = None
self._sell_price = None
self._sold_quantity = None
self._store_id = None
@property
def aoi_info(self):
return self._aoi_info
@aoi_info.setter
def aoi_info(self, value):
if isinstance(value, AoiInfoDTO):
self._aoi_info = value
else:
self._aoi_info = AoiInfoDTO.from_alipay_dict(value)
@property
def discount(self):
return self._discount
@discount.setter
def discount(self, value):
self._discount = value
@property
def item_cover(self):
return self._item_cover
@item_cover.setter
def item_cover(self, value):
self._item_cover = value
@property
def item_detail_url(self):
return self._item_detail_url
@item_detail_url.setter
def item_detail_url(self, value):
self._item_detail_url = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_label(self):
return self._item_label
@item_label.setter
def item_label(self, value):
self._item_label = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def item_store(self):
return self._item_store
@item_store.setter
def item_store(self, value):
if isinstance(value, ItemStoreDTO):
self._item_store = value
else:
self._item_store = ItemStoreDTO.from_alipay_dict(value)
@property
def original_price(self):
return self._original_price
@original_price.setter
def original_price(self, value):
self._original_price = value
@property
def sales_info(self):
return self._sales_info
@sales_info.setter
def sales_info(self, value):
self._sales_info = value
@property
def saved_money(self):
return self._saved_money
@saved_money.setter
def saved_money(self, value):
self._saved_money = value
@property
def saved_money_info(self):
return self._saved_money_info
@saved_money_info.setter
def saved_money_info(self, value):
self._saved_money_info = value
@property
def sell_price(self):
return self._sell_price
@sell_price.setter
def sell_price(self, value):
self._sell_price = value
@property
def sold_quantity(self):
return self._sold_quantity
@sold_quantity.setter
def sold_quantity(self, value):
self._sold_quantity = value
@property
def store_id(self):
return self._store_id
@store_id.setter
def store_id(self, value):
self._store_id = value
def to_alipay_dict(self):
params = dict()
if self.aoi_info:
if hasattr(self.aoi_info, 'to_alipay_dict'):
params['aoi_info'] = self.aoi_info.to_alipay_dict()
else:
params['aoi_info'] = self.aoi_info
if self.discount:
if hasattr(self.discount, 'to_alipay_dict'):
params['discount'] = self.discount.to_alipay_dict()
else:
params['discount'] = self.discount
if self.item_cover:
if hasattr(self.item_cover, 'to_alipay_dict'):
params['item_cover'] = self.item_cover.to_alipay_dict()
else:
params['item_cover'] = self.item_cover
if self.item_detail_url:
if hasattr(self.item_detail_url, 'to_alipay_dict'):
params['item_detail_url'] = self.item_detail_url.to_alipay_dict()
else:
params['item_detail_url'] = self.item_detail_url
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_label:
if hasattr(self.item_label, 'to_alipay_dict'):
params['item_label'] = self.item_label.to_alipay_dict()
else:
params['item_label'] = self.item_label
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.item_store:
if hasattr(self.item_store, 'to_alipay_dict'):
params['item_store'] = self.item_store.to_alipay_dict()
else:
params['item_store'] = self.item_store
if self.original_price:
if hasattr(self.original_price, 'to_alipay_dict'):
params['original_price'] = self.original_price.to_alipay_dict()
else:
params['original_price'] = self.original_price
if self.sales_info:
if hasattr(self.sales_info, 'to_alipay_dict'):
params['sales_info'] = self.sales_info.to_alipay_dict()
else:
params['sales_info'] = self.sales_info
if self.saved_money:
if hasattr(self.saved_money, 'to_alipay_dict'):
params['saved_money'] = self.saved_money.to_alipay_dict()
else:
params['saved_money'] = self.saved_money
if self.saved_money_info:
if hasattr(self.saved_money_info, 'to_alipay_dict'):
params['saved_money_info'] = self.saved_money_info.to_alipay_dict()
else:
params['saved_money_info'] = self.saved_money_info
if self.sell_price:
if hasattr(self.sell_price, 'to_alipay_dict'):
params['sell_price'] = self.sell_price.to_alipay_dict()
else:
params['sell_price'] = self.sell_price
if self.sold_quantity:
if hasattr(self.sold_quantity, 'to_alipay_dict'):
params['sold_quantity'] = self.sold_quantity.to_alipay_dict()
else:
params['sold_quantity'] = self.sold_quantity
if self.store_id:
if hasattr(self.store_id, 'to_alipay_dict'):
params['store_id'] = self.store_id.to_alipay_dict()
else:
params['store_id'] = self.store_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CircleRecommendItemDTO()
if 'aoi_info' in d:
o.aoi_info = d['aoi_info']
if 'discount' in d:
o.discount = d['discount']
if 'item_cover' in d:
o.item_cover = d['item_cover']
if 'item_detail_url' in d:
o.item_detail_url = d['item_detail_url']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_label' in d:
o.item_label = d['item_label']
if 'item_name' in d:
o.item_name = d['item_name']
if 'item_store' in d:
o.item_store = d['item_store']
if 'original_price' in d:
o.original_price = d['original_price']
if 'sales_info' in d:
o.sales_info = d['sales_info']
if 'saved_money' in d:
o.saved_money = d['saved_money']
if 'saved_money_info' in d:
o.saved_money_info = d['saved_money_info']
if 'sell_price' in d:
o.sell_price = d['sell_price']
if 'sold_quantity' in d:
o.sold_quantity = d['sold_quantity']
if 'store_id' in d:
o.store_id = d['store_id']
return o
| 32.401544
| 83
| 0.596282
| 1,063
| 8,392
| 4.367827
| 0.056444
| 0.07409
| 0.080121
| 0.058152
| 0.411587
| 0.314236
| 0.057506
| 0.031876
| 0
| 0
| 0
| 0.000172
| 0.308151
| 8,392
| 258
| 84
| 32.527132
| 0.799518
| 0.005005
| 0
| 0.138528
| 0
| 0
| 0.101366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.017316
| 0.064935
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bab64e23dd52a8b7e2e5474ebad268f962e7d94
| 3,599
|
py
|
Python
|
vmcasterpub/uploader_dcap.py
|
hepix-virtualisation/vmcaster
|
f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51
|
[
"Apache-2.0"
] | null | null | null |
vmcasterpub/uploader_dcap.py
|
hepix-virtualisation/vmcaster
|
f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51
|
[
"Apache-2.0"
] | null | null | null |
vmcasterpub/uploader_dcap.py
|
hepix-virtualisation/vmcaster
|
f4ef1c65bbb81b82aa72a0cd1afc1aa6cf13eb51
|
[
"Apache-2.0"
] | null | null | null |
import subprocess
import time
import logging
import os
import signal
log = logging.getLogger(__name__)
def runpreloadcommand(cmd,timeout,preload):
newenv = dict(os.environ)
newenv["LD_PRELOAD"] = preload
process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,env=newenv)
processRc = None
handleprocess = True
counter = 0
stdout = ''
stderr = ''
while handleprocess:
counter += 1
time.sleep(1)
cout,cerr = process.communicate()
stdout += cout
stderr += cerr
process.poll()
processRc = process.returncode
if processRc != None:
break
if counter == timeout:
os.kill(process.pid, signal.SIGQUIT)
if counter > timeout:
os.kill(process.pid, signal.SIGKILL)
processRc = -9
break
return (processRc,stdout,stderr)
def gsiDcapCopy(src,dest,timeout = 60):
cmd = "dccp -C 3000 -d 2 -A %s %s" % (src,dest)
process = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processRc = None
handleprocess = True
counter = 0
stdout = ''
stderr = ''
while handleprocess:
counter += 1
time.sleep(1)
cout,cerr = process.communicate()
stdout += cout
stderr += cerr
process.poll()
processRc = process.returncode
if processRc != None:
break
if counter == timeout:
os.kill(process.pid, signal.SIGQUIT)
if counter > timeout:
os.kill(process.pid, signal.SIGKILL)
processRc = -9
break
if processRc != 0:
log = logging.getLogger("gsiDcapCopy")
log.error("failed to execute command '%s'" % (cmd))
return (processRc,stdout,stderr)
class uploaderDcap:
def __init__(self):
self.remotePrefix = None
self.log = logging.getLogger("uploaderGsiDcap")
def _getfilepath(self,remotePath):
if self.remotePrefix != None:
return self.remotePrefix + remotePath
else:
return remotePath
def exists(self,remotePath):
cmd = "stat %s" % (self._getfilepath(remotePath))
timeout = 10
preload = "/usr/lib64/libpdcap.so.1"
return runpreloadcommand(cmd,timeout,preload)
def delete(self,remotePath):
cmd = "unlink %s" % (self._getfilepath(remotePath))
timeout = 10
preload = "/usr/lib64/libpdcap.so.1"
return runpreloadcommand(cmd,timeout,preload)
def upload(self,localpath,remotePath):
path = self._getfilepath(remotePath)
return gsiDcapCopy(localpath,path)
def replace(self,localpath,remotePath):
path = self._getfilepath(remotePath)
(rc,stdout,stderr) = self.exists(path)
if rc == 0:
(rc,stdout,stderr) = self.delete(path)
if rc != 0:
msg = "stderr={stderr}".format(stderr=stderr)
log.error(msg)
return (rc,stdout,stderr)
rc,stdout,stderr = gsiDcapCopy(localpath,path)
if rc != 0:
msg = "stderr={stderr}".format(stderr=stderr)
log.error(msg)
return (rc,stdout,stderr)
return (rc,stdout,stderr)
def download(self,remotePath,localpath):
rc,stdout,stderr = gsiDcapCopy(self._getfilepath(remotePath),localpath)
if rc != 0:
for errorLine in stderr.split('\n'):
self.log.error("stderr:'%s'" % (errorLine))
return rc,stdout,stderr
| 31.025862
| 108
| 0.594054
| 384
| 3,599
| 5.528646
| 0.244792
| 0.067829
| 0.052756
| 0.033914
| 0.562412
| 0.562412
| 0.562412
| 0.513424
| 0.513424
| 0.513424
| 0
| 0.011806
| 0.293971
| 3,599
| 115
| 109
| 31.295652
| 0.823691
| 0
| 0
| 0.601942
| 0
| 0
| 0.055293
| 0.013337
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087379
| false
| 0
| 0.048544
| 0
| 0.252427
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4babaa82bca32126bf21a61b9966b1e6ecb0d62c
| 3,923
|
py
|
Python
|
drones/serializers.py
|
maprezdev/restfuldrones
|
9448a63b148cdf7da8f46d65067ddbb8773e2fd2
|
[
"MIT"
] | null | null | null |
drones/serializers.py
|
maprezdev/restfuldrones
|
9448a63b148cdf7da8f46d65067ddbb8773e2fd2
|
[
"MIT"
] | null | null | null |
drones/serializers.py
|
maprezdev/restfuldrones
|
9448a63b148cdf7da8f46d65067ddbb8773e2fd2
|
[
"MIT"
] | null | null | null |
# drones/serializers.py file
from rest_framework import serializers
from drones.models import DroneCategory, Drone, Pilot, Competition
from django.contrib.auth.models import User
import drones.views
class UserDroneSerializer(serializers.HyperlinkedModelSerializer):
"""serialize the drones related to a User"""
class Meta:
model: Drone
fields = (
'url',
'name')
class UserSerializer(serializers.HyperlinkedModelSerializer):
"""declare an instance of the UserDroneSerializer class"""
drones = UserDroneSerializer(
many=True,
read_only=True)
class Meta:
model = User
fields = (
'url',
'pk',
'username',
'drone')
class DroneCategorySerializer(serializers.HyperlinkedModelSerializer):
"""defines a one-to-many relationship that is read-
only"""
drones = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='drone-detail', # browsable API feature
)
class Meta:
"""model related to the serializer, and field names that we want
to include in the serialization"""
model = DroneCategory
fields = (
'url',
'pk',
'name',
'drones')
class DroneSerializer(serializers.HyperlinkedModelSerializer):
"""display the drone category name"""
drone_category = serializers.SlugRelatedField(
queryset=DroneCategory.objects.all(),
slug_field='name')
# Display the owner's username (read-only)
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
"""model related to the serializer, and field names that we want
to include in the serialization"""
model = Drone
fields = (
'url',
'name',
'drone_category',
'owner',
'manufacturing_date',
'has_it_competed',
'inserted_timestamp',
)
class CompetitionSerializer(serializers.HyperlinkedModelSerializer):
"""display all the details for the related Drone"""
drone = DroneSerializer()
class Meta:
"""model related to the serializer, and field names that we want
to include in the serialization"""
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'drone')
class PilotSerializer(serializers.HyperlinkedModelSerializer):
"""serialize Pilot instances and serialize all the Competition instances related to the Pilot"""
competitions = CompetitionSerializer(many=True, read_only=True)
gender = serializers.ChoiceField(choices=Pilot.gender_choices)
gender_description = serializers.CharField(source='get_gender_display', read_only=True)
class Meta:
"""model related to the serializer, and field names that we want
to include in the serialization"""
model = Pilot
fields = (
'url',
'name',
'gender',
'gender_description',
'races_count',
'inserted_timestamp',
'competitions')
class PilotCompetitionSerializer(serializers.ModelSerializer):
"""display the related Pilot name and the related Drone name"""
pilot = serializers.SlugRelatedField(
queryset=Pilot.objects.all(),
slug_field='name')
drone = serializers.SlugRelatedField(
queryset=Drone.objects.all(),
slug_field='name')
class Meta:
"""model related to the serializer, and field names that we want
to include in the serialization"""
model = Competition
fields = (
'url',
'pk',
'distance_in_feet',
'distance_achievement_date',
'pilot',
'drone')
| 29.946565
| 100
| 0.617894
| 374
| 3,923
| 6.40107
| 0.270053
| 0.026316
| 0.040936
| 0.04386
| 0.32122
| 0.255221
| 0.239348
| 0.239348
| 0.239348
| 0.239348
| 0
| 0
| 0.293143
| 3,923
| 131
| 101
| 29.946565
| 0.863325
| 0.237828
| 0
| 0.488889
| 0
| 0
| 0.121884
| 0.017313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.311111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bb0265f943903e9ce05ffd83240a67916be1de6
| 5,186
|
py
|
Python
|
scripts/utils.py
|
alterapars/drought_classification
|
585aaed3f00d5835059be1c80ad998189d9726f7
|
[
"MIT"
] | 1
|
2022-02-19T11:42:24.000Z
|
2022-02-19T11:42:24.000Z
|
scripts/utils.py
|
alterapars/drought_classification
|
585aaed3f00d5835059be1c80ad998189d9726f7
|
[
"MIT"
] | null | null | null |
scripts/utils.py
|
alterapars/drought_classification
|
585aaed3f00d5835059be1c80ad998189d9726f7
|
[
"MIT"
] | 2
|
2022-02-02T08:24:37.000Z
|
2022-02-03T12:27:05.000Z
|
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
############################ STATS input data ################################################
def return_nan_percentage(input_data):
"""
prints percentage of nan values in max. 3D sized array
Parameters
----------
input_array : array
max 3D array
Returns
-------
None
"""
total_size = input_data.size
nan_sum = np.isnan(input_data).sum()
perc = float(nan_sum / total_size)
print("percentage of nan values inside dataset is: %.2f" % float(perc) + " %")
# #4D example:
# for i in Training_data:
# return_nan_percentage(i)
def describe_with_stats(input_data):
flat_array = input_data.flatten()
# 'omit'performs the calculations ignoring nan values
nobs, minmax, mean, variance, skewness, kurtosis = stats.describe(
flat_array, nan_policy="omit"
)
print("Number of observations: " + str(nobs))
print("min: " + str(minmax[0]))
print("max: " + str(minmax[1]))
print("the mean is: " + str(mean))
print("the variance is: " + str(variance))
print("Skewness is: " + str(skewness))
print("Kurtosis: " + str(kurtosis))
print("---")
# for i in Training_data_germany:
# describe_with_stats(i)
############################ Derive Labels ###############################################
def mask_nan_values(input_array):
array_with_masked_nans = input_array.fillna(value=10000.00)
return array_with_masked_nans
# back to xarray with:
# label_xarray = xr.DataArray(output_3D_array, dims=['time', 'latitude', 'longitude'] )
# to turn list output into a 3D array use:
def list_to_array(output_list):
output_3D_array = np.stack(output_list, axis=0)
return output_3D_array
# TODO: returns list of 2D arrays now, try to return 3D x array to save as net cdf -SEE BELOW
# TODO: write test
# #Example:
# #create data subset of 10 of a data xarray
# data_10 = data[0:10] #first 10 items to test
# print(data.shape)
# #call function with a threshod of 10
# output_array = binary_image_classification(data_10, T=0.5)
# #show one image of the masked output images
# plt.imshow(output_array[0], origin = 'lower')
# #might need to change 'lower' to 'upper"
# TODO:
def list_of_2D_xarray_to_netcdf():
x = 0
netcdf = x
return netcdf
def save_plots_from_3Darray(
input_array, OUTPUT_PATH, title="drought mask figure Nr:", show_plots=True
):
"""
saves pngs and/or prints images from 3Darrays as png files
Parameters
----------
input_xarray : array
3-D input array in the format [num_samples, height, width]
title: str
title of the plots, number will be added according to iteration index
show_plots: boolean
determines if plots will be displayed as output or not
Returns
-------
None
"""
for k in range(len(input_array[0])):
fig = input_array[k].plot()
plt.title(title + str(k))
plt.axis("equal")
plt.title("drought mask for SMI, month " + str(k))
if show_plots:
plt.show()
fig.figure.savefig(OUTPUT_PATH + title + str(k) + ".png", dpi=100)
print(OUTPUT_PATH + "drought_mask_" + str(k) + ".png")
############################ class imbalance ######################################
# option 1, faster, combine these 2 fcts (recommended):
def hide_random_values(input_value, T=0.68):
if input_value == 0:
if np.random.rand(1) > T:
return -1
return input_value
# print(hide_random_values(0))
def reduce_class_size(input_array):
output_array = np.copy(input_array)
for t in range(0, 472):
for xy in range(0, 7171):
output_array[t, xy] = hide_random_values(output_array[t, xy])
return output_array
# option 2, combine these 2 fcts:
def get_indices(dataset, value=0):
"""dataset = str(), 2D-array
value = int(), value to print the indices for"""
result = np.where(dataset == value)
print("Tuple of arrays returned : ", result)
# zip the 2 arrays (array 1: rows, array 2: columns) to get the exact coordinates
listOfCoordinates = list(zip(result[0], result[1]))
# iterate over the list of coordinates
# for cord in listOfCoordinates:
# print(cord)
print(len(listOfCoordinates))
return listOfCoordinates
def reduce_class_size(input_array, indices_list, T=0.78, value=int(-1)):
"""set entries in array to value=x, randomly and within set percentage of array
list = list, list of indices (2D)
T = int() , percentage to be modified
returns:
"""
output_array = np.copy(input_array)
# determine the percentage of the array that will be modified
len_modifier = int(len(indices_list) * T)
# select percentage T randomly from the list
random_coords = random.sample(listOfCoordinates, len_modifier)
# print(random_coords[:10])
# set selected entries to value
print("selected indices will be set to " + str(value))
for i in random_coords:
# print(labels_reshaped[i])
output_array[i] == value
return output_array
| 26.459184
| 94
| 0.633822
| 716
| 5,186
| 4.444134
| 0.311453
| 0.034569
| 0.005657
| 0.013199
| 0.045883
| 0.034569
| 0
| 0
| 0
| 0
| 0
| 0.019525
| 0.219823
| 5,186
| 195
| 95
| 26.594872
| 0.76693
| 0.387968
| 0
| 0.057971
| 0
| 0
| 0.100719
| 0
| 0
| 0
| 0
| 0.005128
| 0
| 1
| 0.144928
| false
| 0
| 0.057971
| 0
| 0.318841
| 0.188406
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bb0f0499ca35cb26e70156806115a77ce9290c6
| 1,382
|
py
|
Python
|
2021/day8/2.py
|
tomhel/AoC_2019
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | 1
|
2021-12-07T13:18:52.000Z
|
2021-12-07T13:18:52.000Z
|
2021/day8/2.py
|
tomhel/AoC
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | null | null | null |
2021/day8/2.py
|
tomhel/AoC
|
c76c34235821864bc763f85d43cbcbfb9ed43469
|
[
"MIT"
] | null | null | null |
def load():
with open("input") as f:
for x in f:
a, b, = x.strip().split("|")
yield {frozenset(x) for x in a.strip().split()}, [frozenset(x) for x in b.strip().split()]
def decode_signal(signal):
num = {}
while len(num) < 10:
for x in signal.difference(num.values()):
if len(x) == 2:
num[1] = x
elif len(x) == 3:
num[7] = x
elif len(x) == 4:
num[4] = x
elif len(x) == 7:
num[8] = x
elif len(x) == 6 and 4 in num and num[4].issubset(x):
num[9] = x
elif len(x) == 5 and 1 in num and num[1].issubset(x):
num[3] = x
elif len(x) == 6 and 7 in num and 9 in num and num[7].issubset(x) and num[9] != x:
num[0] = x
elif len(x) == 6 and 1 in num and not num[1].issubset(x):
num[6] = x
elif len(x) == 5 and 6 in num and x.issubset(num[6]):
num[5] = x
elif len(x) == 5 and 3 in num and 5 in num:
num[2] = x
return {v: k for k, v in num.items()}
def decode_output():
result = 0
for sig, out in load():
mapping = decode_signal(sig)
result += int("".join(str(mapping[x]) for x in out))
return result
print(decode_output())
| 28.791667
| 102
| 0.447902
| 219
| 1,382
| 2.808219
| 0.237443
| 0.065041
| 0.117073
| 0.131707
| 0.260163
| 0.126829
| 0
| 0
| 0
| 0
| 0
| 0.045012
| 0.40521
| 1,382
| 47
| 103
| 29.404255
| 0.703163
| 0
| 0
| 0
| 0
| 0
| 0.004342
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0
| 0
| 0.135135
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bb3c9f13001ae9f4765556a61ae26a55cabde2c
| 1,402
|
py
|
Python
|
Data Structures/Linked Lists/reverse-a-linked-list.py
|
Owngithub10101/Hackerrank-Problem-Solving
|
4e35b609c9f5b94c5bda292b9991baa054a944b6
|
[
"MIT"
] | 23
|
2020-02-28T16:18:48.000Z
|
2021-12-21T11:51:07.000Z
|
Data Structures/Linked Lists/reverse-a-linked-list.py
|
ramanagali/Hackerrank-Problem-Solving
|
98f654f984013140d52b9a344146e9e38e46fb81
|
[
"MIT"
] | null | null | null |
Data Structures/Linked Lists/reverse-a-linked-list.py
|
ramanagali/Hackerrank-Problem-Solving
|
98f654f984013140d52b9a344146e9e38e46fb81
|
[
"MIT"
] | 16
|
2020-04-08T10:46:39.000Z
|
2021-11-15T03:46:56.000Z
|
# Reverse a linked list
# Developer: Murillo Grubler
# https://www.hackerrank.com/challenges/reverse-a-linked-list/problem
# Time complexity of reverse function: O(n)
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
# Complete the reverse function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
def reverse(head):
ln = SinglyLinkedListNode(head.data)
temp_node = head.next
while temp_node:
next_ln = ln
ln = SinglyLinkedListNode(temp_node.data)
ln.next = next_ln
temp_node = temp_node.next
return ln
if __name__ == '__main__':
tests = int(input())
for tests_itr in range(tests):
llist_count = int(input())
llist = SinglyLinkedList()
for _ in range(llist_count):
llist_item = int(input())
llist.insert_node(llist_item)
result = reverse(llist.head)
while result:
print (result.data, end=' ')
result = result.next
| 24.596491
| 69
| 0.622682
| 165
| 1,402
| 5.078788
| 0.357576
| 0.047733
| 0.033413
| 0.042959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285307
| 1,402
| 56
| 70
| 25.035714
| 0.836327
| 0.199715
| 0
| 0
| 0
| 0
| 0.008115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.194444
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bb64f1dd8e15adacfcfa40dd94e5cebe3d88bea
| 1,737
|
py
|
Python
|
web/api/user/utilities.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 5
|
2020-12-04T19:28:42.000Z
|
2021-12-07T16:14:28.000Z
|
web/api/user/utilities.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 50
|
2019-10-08T19:47:24.000Z
|
2021-07-26T05:43:37.000Z
|
web/api/user/utilities.py
|
cclrobotics/ARTBot
|
a0bffabebbc09361bf7748741fe3d30c78af8fbd
|
[
"MIT"
] | 4
|
2019-10-23T04:14:49.000Z
|
2021-08-01T01:22:37.000Z
|
import os
from PIL import Image
import random
from functools import wraps
from flask import jsonify
from flask_jwt_extended import get_current_user
from .artpiece import Artpiece
from .exceptions import InvalidUsage
from web.extensions import cache
#decorator to require admin_acccess for a route
def access_level_required(level):
try:
def outer(func):
@wraps(func)
def inner(*args, **kwargs):
if get_current_user().role < level:
raise InvalidUsage.forbidden()
return func(*args, **kwargs)
return inner
except TypeError:
raise TypeError("Specify an access level to use access_level_required decorator")
return outer
@cache.memoize(timeout=3600)
def get_image_description(image_path):
with Image.open(image_path) as image:
# Exif ID 270 = ImageDescription
return image.getexif().get(270)
"""
Return a list of images in the 'gallery' folder and their descriptions
Output is list of tuples (image_location, image_description)
output list is in random order for random display order every time
"""
def get_gallery_images():
internal_path_prefix = './web'
public_gallery_path = '/static/img/gallery/'
image_paths = [
public_gallery_path + filename
for filename in os.listdir(internal_path_prefix + public_gallery_path)
]
image_descriptions = list()
for image_path in image_paths:
this_image_description = get_image_description(internal_path_prefix + image_path)
image_descriptions.append(this_image_description)
image_metadata = list(zip(image_paths, image_descriptions))
random.shuffle(image_metadata)
return image_metadata
| 31.017857
| 89
| 0.716753
| 222
| 1,737
| 5.391892
| 0.418919
| 0.066834
| 0.045113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007358
| 0.217617
| 1,737
| 56
| 90
| 31.017857
| 0.873436
| 0.044329
| 0
| 0
| 0
| 0
| 0.060041
| 0.014493
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.230769
| 0
| 0.487179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bb717792f0ab03afa44f642bc10364fd9b57993
| 2,528
|
py
|
Python
|
network/utils.py
|
Goochaozheng/ChunkFusion
|
7458a8e08886cc76cfeb87881c51e23b1d0674c3
|
[
"MIT"
] | 3
|
2022-03-15T08:34:15.000Z
|
2022-03-15T08:40:06.000Z
|
network/utils.py
|
Goochaozheng/ChunkFusion
|
7458a8e08886cc76cfeb87881c51e23b1d0674c3
|
[
"MIT"
] | null | null | null |
network/utils.py
|
Goochaozheng/ChunkFusion
|
7458a8e08886cc76cfeb87881c51e23b1d0674c3
|
[
"MIT"
] | null | null | null |
import spconv
import torch
from torch import nn
def residualBlock(channels, kernel_size=3):
return spconv.SparseSequential(
spconv.ConcatTable()
.add(spconv.Identity())
.add(spconv.SparseSequential(
nn.BatchNorm1d(channels),
spconv.SubMConv3d(channels, channels, kernel_size=kernel_size),
nn.Sigmoid()
)),
spconv.JoinTable()
)
def subMVonvBlock(inChannels, outChannels, kernel_size=3, indiceKey=None):
return spconv.SparseSequential(
nn.BatchNorm1d(inChannels),
spconv.SubMConv3d(inChannels, outChannels, kernel_size=kernel_size, indice_key=indiceKey),
nn.Sigmoid()
)
def convBlock(inChannels, outChannels, kernel_size=3):
return nn.Sequential(
nn.BatchNorm3d(inChannels),
nn.Conv3d(inChannels, outChannels, kernel_size=kernel_size, stride=1, padding=1),
nn.LeakyReLU()
)
def kaiming_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.kaiming_uniform_(m.weight)
def toSparseInput(inputTSDF):
# Construct Sparse Tensor
inputTSDF = inputTSDF.permute(0,2,3,4,1)
sparseMask = torch.any(torch.abs(inputTSDF) < 1, dim=4)
batchSize = len(inputTSDF)
spatialShape = inputTSDF.shape[1:-1]
sparseIndice = sparseMask.to_sparse(inputTSDF.ndim-1).indices().permute(1, 0).contiguous().int()
sparseValue = inputTSDF[sparseMask]
inputData_sparse = spconv.SparseConvTensor(features=sparseValue, indices=sparseIndice, spatial_shape=spatialShape, batch_size=batchSize)
return inputData_sparse
def sparseFuse(inputSparseTSDF, oldSparseTSDF, inputMask, oldMask):
# fuseTSDF = torch.cat((self.toDense(inputSparseTSDF), self.toDense(oldSparseTSDF)), dim=1)
oldTSDF = spconv.ToDense(oldSparseTSDF).permute(0,2,3,4,1)
inputTSDF = spconv.ToDense(inputSparseTSDF).permute(0,2,3,4,1)
# oldTSDF[inputMask] = (oldTSDF[inputMask] * oldWeight[inputMask] + inputTSDF[inputMask] * inputWeight[inputMask]) / (oldWeight[inputMask] + inputWeight[inputMask])
batchSize = inputSparseTSDF.batch_size
spatialShape = inputSparseTSDF.spatial_shape
fuseMask = torch.logical_or(inputMask, oldMask)
sparseIndice = fuseMask.to_sparse(oldTSDF.ndim-1).indices().permute(1, 0).contiguous().int()
sparseValue = oldTSDF[fuseMask]
return spconv.SparseConvTensor(features=sparseValue, indices=sparseIndice, spatial_shape=spatialShape, batch_size=batchSize)
| 36.114286
| 168
| 0.71163
| 273
| 2,528
| 6.472527
| 0.32967
| 0.050934
| 0.061121
| 0.070175
| 0.269383
| 0.233164
| 0.166384
| 0.166384
| 0.166384
| 0.11545
| 0
| 0.018191
| 0.173655
| 2,528
| 69
| 169
| 36.637681
| 0.827669
| 0.109177
| 0
| 0.083333
| 0
| 0
| 0.001779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0.0625
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bbaecaa33cf5b0c99d08e0e5f803ac656d6dabe
| 2,659
|
py
|
Python
|
unn/models/initializer.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | 2
|
2021-08-08T11:23:14.000Z
|
2021-09-16T04:05:23.000Z
|
unn/models/initializer.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | 1
|
2021-08-08T11:25:47.000Z
|
2021-08-08T11:26:15.000Z
|
unn/models/initializer.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | null | null | null |
import copy
import logging
import math
import torch
from torch import nn
logger = logging.getLogger('global')
def init_weights_normal(module, std=0.01):
for m in module.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(
m, nn.ConvTranspose2d):
nn.init.normal_(m.weight.data, std=std)
if m.bias is not None:
m.bias.data.zero_()
def init_weights_xavier(module):
for m in module.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(
m, nn.ConvTranspose2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def init_weights_msra(module):
for m in module.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(
m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def init_bias_focal(module, cls_loss_type, init_prior, num_classes):
if cls_loss_type == 'sigmoid':
for m in module.modules():
if isinstance(m, nn.Conv2d):
# to keep the torch random state
m.bias.data.normal_(-math.log(1.0 / init_prior - 1.0), init_prior)
torch.nn.init.constant_(m.bias, -math.log(1.0 / init_prior - 1.0))
elif cls_loss_type == 'softmax':
for m in module.modules():
if isinstance(m, nn.Conv2d):
m.bias.data.normal_(0, 0.01)
for i in range(0, m.bias.data.shape[0], num_classes):
fg = m.bias.data[i + 1:i + 1 + num_classes - 1]
mu = torch.exp(fg).sum()
m.bias.data[i] = math.log(mu * (1.0 - init_prior) / init_prior)
else:
raise NotImplementedError(f'{cls_loss_type} is not supported')
def initialize(model, method, **kwargs):
# initialize BN
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# initialize Conv & FC
if method == 'normal':
init_weights_normal(model, **kwargs)
elif method == 'msra':
init_weights_msra(model)
elif method == 'xavier':
init_weights_xavier(model)
else:
raise NotImplementedError(f'{method} not supported')
def initialize_from_cfg(model, cfg):
if cfg is None:
initialize(model, 'normal', std=0.01)
return
cfg = copy.deepcopy(cfg)
method = cfg.pop('method')
initialize(model, method, **cfg)
| 32.036145
| 83
| 0.588943
| 367
| 2,659
| 4.141689
| 0.220708
| 0.042763
| 0.102632
| 0.078947
| 0.395395
| 0.380921
| 0.380921
| 0.380921
| 0.354605
| 0.354605
| 0
| 0.018577
| 0.291463
| 2,659
| 82
| 84
| 32.426829
| 0.788217
| 0.024445
| 0
| 0.349206
| 0
| 0
| 0.039382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.079365
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bbcb3d3943aa14ce46dab08f6f7c37762566694
| 3,000
|
py
|
Python
|
AudioFile.py
|
ZZZlax/.Pyrate
|
42a85213e0557b2988bf62bb8eac540263e0ce30
|
[
"Unlicense"
] | null | null | null |
AudioFile.py
|
ZZZlax/.Pyrate
|
42a85213e0557b2988bf62bb8eac540263e0ce30
|
[
"Unlicense"
] | null | null | null |
AudioFile.py
|
ZZZlax/.Pyrate
|
42a85213e0557b2988bf62bb8eac540263e0ce30
|
[
"Unlicense"
] | null | null | null |
# This Python file uses the following encoding: utf-8
import os; import sys; import urllib.request; from bs4 import BeautifulSoup; import wikipedia
from PyQt5.QtWebEngineWidgets import *; from PyQt5.QtGui import QIcon; from PyQt5.QtWidgets import *; from PyQt5.QtNetwork import QNetworkProxy
class AudioFile(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
proxy = QNetworkProxy()
proxy.setType(1); proxy.setHostName("127.0.0.1"); proxy.setPort(9050); proxy.setApplicationProxy(proxy)
toolb = QToolBar("URL"); self.lineEdit = QLineEdit(self); self.lineEdit2 = QLineEdit(self); combobox2 = QComboBox(self); self.textEdit = QTextEdit(self)
def track_search():
try: soup = BeautifulSoup(urllib.request.urlopen(wikipedia.page(str(self.lineEdit.text()+" "+self.lineEdit2.text())).url).read())
except: soup = BeautifulSoup(urllib.request.urlopen(wikipedia.page(str(self.lineEdit.text()+" "+self.lineEdit2.text()+" album")).url).read())
for link in soup.find_all("td"):
w = link.get_text().strip()
if w[:1] == '"': self.textEdit.append(w.replace('"', ""))
else: pass
self.lineEdit2.clear()
def save():
bandName = self.lineEdit.text(); script = self.textEdit.toPlainText()
with open(os.getcwd()+"/.Pyrate/Set_List/"+bandName+".txt", "w", encoding = 'utf-8') as file:
file.write(script)
file.close()
self.textEdit.clear(); self.lineEdit.clear()
def text_changed():
nav = combobox2.currentText()
if nav == "Save": save()
if nav == "Album Search": track_search()
combobox2.setCurrentText("")
toolb.setOrientation(0x2); self.addToolBar(toolb); self.lineEdit.setObjectName(u"Artist Name"); self.lineEdit.setPlaceholderText("Artist Name"); toolb.addWidget(self.lineEdit); self.lineEdit2.setObjectName(u"Album Name"); self.lineEdit2.setPlaceholderText("Album Name"); toolb.addWidget(self.lineEdit2); combobox2.addItems(["", "Album Search", "Save"]); combobox2.currentTextChanged.connect(lambda: text_changed()); toolb.addWidget(combobox2); self.textEdit.setObjectName(u"Track List"); self.textEdit.setPlaceholderText("Track List"); self.textEdit.setAcceptRichText(False); self.setCentralWidget(self.textEdit)
self.setWindowIcon(QIcon(os.getcwd()+'/.Pyrate/.images/pp.png')); self.setWindowTitle("Shanties"); self.setStyleSheet("color: #fe2023;" "background-color: #000000;" "selection-color: #ffffff;" "selection-background-color: #e01b24;"); self.lineEdit.setStyleSheet("background-color: #ffffff;" "selection-color: #000000;");self.lineEdit2.setStyleSheet("background-color: #ffffff;" "selection-color: #000000;"); self.textEdit.setStyleSheet("background-color: #ffffff;" "selection-color: #000000;")
if __name__ == "__main__":
app = QApplication([])
AudioFile().show()
sys.exit(app.exec_())
| 69.767442
| 620
| 0.676667
| 334
| 3,000
| 6.005988
| 0.389222
| 0.053838
| 0.03988
| 0.050847
| 0.170489
| 0.170489
| 0.170489
| 0.143569
| 0.085743
| 0.085743
| 0
| 0.026736
| 0.164667
| 3,000
| 42
| 621
| 71.428571
| 0.773743
| 0.017
| 0
| 0
| 0
| 0
| 0.149304
| 0.016966
| 0
| 0
| 0.001018
| 0
| 0
| 1
| 0.121212
| false
| 0.030303
| 0.060606
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bbd5d337a02e7405c19f8ae7746f2dbce197b3b
| 4,189
|
py
|
Python
|
s09_files_and_random/solutions/random_walking_simple.py
|
silverfield/pythonsessions
|
bf5d82dded7616a5d6998da4eb445708c728794f
|
[
"MIT"
] | null | null | null |
s09_files_and_random/solutions/random_walking_simple.py
|
silverfield/pythonsessions
|
bf5d82dded7616a5d6998da4eb445708c728794f
|
[
"MIT"
] | null | null | null |
s09_files_and_random/solutions/random_walking_simple.py
|
silverfield/pythonsessions
|
bf5d82dded7616a5d6998da4eb445708c728794f
|
[
"MIT"
] | null | null | null |
__author__ = 'ferrard'
# ---------------------------------------------------------------
# Imports
# ---------------------------------------------------------------
import scipy as sp
import random
import time
# ---------------------------------------------------------------
# Class - Graph
# ---------------------------------------------------------------
class WalkableGraph:
"""Graph on which we can do random walking"""
# ---------------------------------------------------------------
# Initialisation
# ---------------------------------------------------------------
def __init__(self, file_path):
""" Loads a graph from file
The file should have format:
CityName i_1 i_2 ... i_k
....
where i_j are indices of neighbouring cities (index given by index of the row)
"""
self._graph = []
self._cities = []
with open(file_path, 'r') as f:
for line in f:
city = line.split(' ')[0]
neighs = [int(s) for s in line.split(' ')[1:]]
self._cities.append(city)
self._graph.append(neighs)
self.n = len(self._cities)
self._transition_matrix = self.__get_transition_matrix()
# ---------------------------------------------------------------
# Interface
# ---------------------------------------------------------------
def print(self):
""" Prints the neighbourhood table of the graph """
for i in range(self.n):
print(str(i) + " " + self._cities[i] + " " + str(len(self._graph[i])) + " " + str(self._graph[i]))
def probs_after_k_steps(self, k):
""" Prints the probability (for each city) we end up in the city after k steps """
probs = (1/self.n)*sp.ones(self.n)
for i in range(k):
probs = sp.dot(self._transition_matrix, probs)
print("Probabilities: ")
for i in range(self.n):
print("\t" + self._cities[i] + ": " + str(probs[i]))
return probs
def random_walk(self, start_city, steps=10, time_in_city=0):
""" Does a random walk through the graph, starting at given city, making "steps" random steps and waiting in
each city for time_in_city seconds
"""
# find the index of the start-city
current_city_index = None
for i in range(len(self._cities)):
if self._cities[i] == start_city:
current_city_index = i
if current_city_index is None:
raise Exception("Unknown city " + start_city)
# do the random walking
print("Random walk with " + str(steps) + " steps. Started in " + self._cities[current_city_index])
visits = [0]*self.n
for i in range(steps):
visits[current_city_index] += 1
current_city_index = random.choice(self._graph[current_city_index])
print("Moved to " + self._cities[current_city_index])
time.sleep(time_in_city)
visits[current_city_index] += 1
# print the statistics
print("Finished random walk in: " + self._cities[current_city_index])
print("Visits of cities: ")
for i in range(self.n):
print("\t%s: %s (%s)" % (self._cities[i], visits[i], visits[i]/steps))
# ---------------------------------------------------------------
# Implementation
# ---------------------------------------------------------------
def __get_transition_matrix(self):
""" Gets the transition matrix of the graph """
transition_matrix = sp.zeros((self.n, self.n))
for j in range(self.n):
for i in self._graph[j]:
transition_matrix[i][j] = 1/len(self._graph[j])
return transition_matrix
# ---------------------------------------------------------------
# Main
# ---------------------------------------------------------------
def main():
random.seed()
g = WalkableGraph('ghana.txt')
g.print()
print()
print("Let's do some walking")
k = 1000
g.random_walk("CapeCoast", k, 0)
# g.probs_after_k_steps(k)
if __name__ == '__main__':
main()
| 34.336066
| 116
| 0.467176
| 455
| 4,189
| 4.098901
| 0.274725
| 0.058981
| 0.085791
| 0.035389
| 0.153351
| 0.082038
| 0.034853
| 0.023592
| 0
| 0
| 0
| 0.005433
| 0.253044
| 4,189
| 121
| 117
| 34.619835
| 0.590604
| 0.344235
| 0
| 0.080645
| 0
| 0
| 0.073328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.048387
| 0
| 0.193548
| 0.193548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4bc09df45d93aefe38be329327bcf363df1f3d3e
| 7,018
|
py
|
Python
|
bin/tdgwgeo2csv.py
|
Bauble/bauble.api
|
183c97fda076ea870e21e70ecf89a2a94a7f5722
|
[
"BSD-3-Clause"
] | null | null | null |
bin/tdgwgeo2csv.py
|
Bauble/bauble.api
|
183c97fda076ea870e21e70ecf89a2a94a7f5722
|
[
"BSD-3-Clause"
] | 1
|
2015-02-05T13:15:00.000Z
|
2015-02-05T13:15:00.000Z
|
bin/tdgwgeo2csv.py
|
Bauble/bauble.api
|
183c97fda076ea870e21e70ecf89a2a94a7f5722
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# tdwggeo2csv.py
#
# Description: convert TDWG plant distribution files out of the box to a single
# CSV file
#
# TODO: should create new id's for each entry and have a tdwg_code for
# each so we can maintain as much data as possbible
# TODO: we should probably include the original text files in bauble
# and run the conversion script on build
# TODO: add a notes column to geography so we carry over the extra
# geography data(kew regions, notes, etc.) and so that we can add
# notes to them in bauble
import codecs
import os
import re
from optparse import OptionParser
# l1 - Continent, tblLevel1.txt, UTF-8
# l2 - Region, tblLevel2.txt, UTF-8
# l3 - BotanicalCountry, tblLevel4, ISO-8859-15
# l4 - BaseUnit, tblLevel4.txt, ISO-8859-15
# gazette (places), tblGazette.txt, ISO-8859-15
parser = OptionParser()
parser.add_option('-d', '--directory', dest='directory',
help='directory of WGS txt files', metavar='DIR')
(options, args) = parser.parse_args()
if not options.directory:
parser.error('directory required')
cwd, _dummy = os.path.split(__file__)
src_dir = options.directory
class Reader(object):
def __init__(self, filename, encoding='utf8'):
self.file = codecs.open(filename, "r", encoding)
self.headers = self.file.next().strip().split('*')
s = ""
# sanitize the column headers
for h in self.headers:
h2 = h.replace(' ', '_')
s += '(?P<%s>.*?)\*' % h2
s = s[:-2] + '$'
self.line_rx = re.compile(s)
def group(self, line):
m = self.line_rx.match(line.strip())
if m is None:
raise ValueError("could not match:\n%s\n%s" %
(unicode(line), (unicode(s))))
return m.groupdict()
def __iter__(self):
return self
def next(self):
line = self.file.next()
# remove the stupid ,00 decimals at the end of the integers
#line = self.file.next().replace(',00','')
return self.group(line)
# converted rows organized by tdwg_code so we can resolve parents
converted_rows = {}
id_ctr = 1
class Row(dict):
def __init__(self, id=None, name=None, tdwg_code=None, iso_code=None,
parent_id=None):
super(Row, self).__init__(id=id, name=name, tdwg_code=tdwg_code,
iso_code=iso_code, parent_id=parent_id)
columns = ['id', 'name', 'tdwg_code', 'iso_code', 'parent_id']
def __getattr__(self, item):
if item in self:
return self[item]
else:
return getattr(self, item)
def __setattr__(self, key, value):
self[key] = value
def csv(self):
s = []
for c in self.columns:
if self[c] is None:
#s.append('None')
s.append('')
elif c is 'id' or c is 'parent_id':
s.append(self[c])
else:
s.append('"%s"' % self[c].encode('utf8'))
# s.append(quote(self[c]))
return ','.join(s)
def convert_level1():
global converted_data, id_ctr
reader = Reader(os.path.join(src_dir, 'tblLevel1.txt'), 'utf8')
for line in reader:
r = Row(id=str(id_ctr), name=line['L1_continent'],
tdwg_code=line['L1_code'])
converted_rows[line['L1_code']] = r
print(r.csv())
id_ctr += 1
def convert_level2():
global converted_data, id_ctr
reader = Reader(os.path.join(src_dir, 'tblLevel2.txt'), 'utf8')
for line in reader:
r = Row(id=str(id_ctr), name=line['L2_region'],
tdwg_code=line['L2_code'], iso_code=line['L2_ISOcode'])
r.parent_id = converted_rows[line['L1_code']]['id']
converted_rows[line['L2_code']] = r
print(r.csv())
id_ctr += 1
def convert_level3():
global converted_data, id_ctr
reader = Reader(os.path.join(src_dir, 'tblLevel3.txt'), 'iso-8859-15')
for line in reader:
r = Row(id=str(id_ctr), name=line['L3_area'],
tdwg_code=line['L3_code'], iso_code=line['L3_ISOcode'])
#r.parent_id = converted_rows[line['L2_code']]['id']
r['parent_id'] = converted_rows[line['L2_code']]['id']
converted_rows[line['L3_code']] = r
print(r.csv())
id_ctr += 1
def convert_level4():
global converted_data, id_ctr
reader = Reader(os.path.join(src_dir, 'tblLevel4.txt'), 'iso-8859-15')
for line in reader:
# skip redundant lines from level 3
if line['L4_code'].endswith('-OO'):
continue
r = Row(id=str(id_ctr), name=line['L4_country'],
tdwg_code=line['L4_code'], iso_code=line['L4_ISOcode'])
r.parent_id = converted_rows[line['L3_code']]['id']
converted_rows[line['L4_code']] = r
print(r.csv())
id_ctr += 1
def convert_gazetteer():
global converted_data, id_ctr
reader = Reader(os.path.join(src_dir, 'tblGazetteer.txt'), 'iso-8859-15')
for line in reader:
# try to only include those things that are unique to the gazeteer
if line['L3_code'] in converted_rows and \
converted_rows[line['L3_code']]['name'] == line['Gazetteer']:
continue
elif line['L4_code'] in converted_rows and \
converted_rows[line['L4_code']]['name'] == line['Gazetteer']:
continue
# TODO: create two rows, one for the gazetteer data and one for the
# kew data
r = Row(id=str(id_ctr), name=line['Gazetteer'],
tdwg_code=line['ID'])
# throw out anything that doesn't have a name, there seems
# to be at least one row that doesn't have a name and is really just
# a place holder for a kew region
if line['Synonym'] != '':
#print '%s == %s' % (line['Gazetteer'].encode('utf8'), line['Synonym'].encode('utf8'))
pass
if r.name == '' or line['Synonym'] != '':
continue
try:
r.parent_id = converted_rows[line['L4_code']]['id']
except KeyError as e:
try:
r.parent_id = converted_rows[line['L3_code']]['id']
except KeyError as e:
try:
r.parent_id = converted_rows[line['L2_code']]['id']
except KeyError as e:
try:
r.parent_id = converted_rows[line['L1_code']]['id']
except KeyError as e:
pass
# add the converted rows and print out the csv line
converted_rows[line['ID']] = r
print(r.csv())
id_ctr += 1
def main():
global id_ctr, converted_rows
print(','.join(['"%s"' % c for c in Row.columns]))
convert_level1()
convert_level2()
convert_level3()
convert_level4()
convert_gazetteer()
print(Row(id='%s' % id_ctr, name='Cultivated').csv())
id_ctr += 1
if __name__ == "__main__":
main()
| 31.470852
| 98
| 0.578655
| 963
| 7,018
| 4.050883
| 0.239875
| 0.069982
| 0.065368
| 0.053576
| 0.340682
| 0.302487
| 0.273263
| 0.260446
| 0.207383
| 0.169444
| 0
| 0.021535
| 0.285409
| 7,018
| 222
| 99
| 31.612613
| 0.756331
| 0.209176
| 0
| 0.286713
| 0
| 0
| 0.106618
| 0
| 0
| 0
| 0
| 0.004505
| 0
| 1
| 0.097902
| false
| 0.013986
| 0.027972
| 0.006993
| 0.188811
| 0.048951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
298a78605da6ac7b5a8526389d33bd97829a9e95
| 12,607
|
py
|
Python
|
tornado_sockets/views/timeseries.py
|
willjschmitt/joulia-webserver
|
712decb749c2d1bda71af49ecab245378bf30078
|
[
"FTL"
] | null | null | null |
tornado_sockets/views/timeseries.py
|
willjschmitt/joulia-webserver
|
712decb749c2d1bda71af49ecab245378bf30078
|
[
"FTL"
] | 95
|
2016-08-04T01:59:37.000Z
|
2021-06-10T18:41:46.000Z
|
tornado_sockets/views/timeseries.py
|
willjschmitt/joulia-webserver
|
712decb749c2d1bda71af49ecab245378bf30078
|
[
"FTL"
] | null | null | null |
"""Handles websockets and asynchronous endpoints provided by Tornado instead
of Django, but use the Django model framework for a database ORM.
"""
import datetime
import functools
import json
import logging
import tornado.escape
from tornado.ioloop import IOLoop
import tornado.web
import tornado.websocket
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
from rest_framework.authtoken.models import Token
from rest_framework.utils import model_meta
from brewery.models import AssetSensor
from brewery.models import RecipeInstance
from brewery.models import TimeSeriesDataPoint
from brewery.serializers import TimeSeriesDataPointSerializer
from joulia.random import random_string
from tornado_sockets.views.django import DjangoAuthenticatedWebSocketHandler
LOGGER = logging.getLogger(__name__)
class TimeSeriesSocketHandler(DjangoAuthenticatedWebSocketHandler):
"""A websocket request handler/connection used for a two-way connection
for streaming sensor data between a client and the webserver. Allows
for real-time streaming of sensor data as soon as it is posted.
Client posts a subscription request, which then triggers the handler
to send any updates to that sensor immediately to the client.
Attributes:
waiters: (class-level) - A set containing all of the current connection
handlers that are active.
subscriptions: (class-level) - A dictionary mapping to connection
handlers. Key is specified as a tuple of (recipe_instance_pk,
sensor_pk).
controller_requestmap: (class-level) - A dictionary mapping a websocket
connection to a brewhouse object. Used for indicating if a
connection exists with a brewhouse.
controller_controllermap: (class-level) A dictionary mapping a brewhouse
to the websocket connection to it. Used for indicating if a
connection exists with a brewhouse.
source_id: Identifies a unique connection with a short hash, which we
can use to compare new data points to, and see if the socket was the
one that originated it, and thusly should not
"""
waiters = set()
subscriptions = {}
controller_requestmap = {}
controller_controllermap = {}
def __init__(self, *args, **kwargs):
super(TimeSeriesSocketHandler, self).__init__(*args, **kwargs)
self.auth = None
self.recipe_instance_pk = None
self.source_id = random_string(4)
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def _authenticate(self):
"""If the connection comes from authentication associating it with a
particular Brewhouse, make sure we store the connection in a mapping
between the websocket and the brewhouse.
Stores this request in a class-level map to indicate we have an
established connection with a Brewhouse controller.
"""
if self.auth is not None and isinstance(self.auth, Token):
if self.auth.brewhouse_pk:
self.controller_controllermap[self.auth.brewhouse_pk] = self
self.controller_requestmap[self] = self.auth.brewhouse_pk
def _unauthenticate(self):
"""Remove this request from the class-level maps to indicate we have
lost connection with the Brewhouse.
"""
if self.auth is not None and isinstance(self.auth, Token):
if self.auth.brewhouse_pk:
del self.controller_controllermap[self.auth.brewhouse_pk]
del self.controller_requestmap[self]
def open(self):
"""Handles the opening of a new websocket connection for streaming data.
"""
LOGGER.info("New websocket connection incoming from %s.",
self.get_current_user())
self.waiters.add(self)
self._authenticate()
def on_close(self):
"""Handles the closing of the websocket connection, removing any
subscriptions.
"""
LOGGER.info("Websocket connection from %s ended.",
self.get_current_user())
self.waiters.remove(self)
self.unsubscribe()
self._unauthenticate()
def on_message(self, message):
"""Handles an incoming message in a websocket. Determines what subaction
to route it to, and calls that sub action.
Args:
message: the incoming raw message from the websocket.
"""
parsed_message = tornado.escape.json_decode(message)
self.recipe_instance_pk = parsed_message['recipe_instance']
if not self.check_permission():
return
# Subscription to a signal.
if 'subscribe' in parsed_message:
self.subscribe(parsed_message)
# Submission of a new datapoint.
else:
self.new_data(parsed_message)
def check_permission(self):
"""Checks if the user has access to the ``recipe_instance``."""
permitted = True
recipe_instance = RecipeInstance.objects.get(pk=self.recipe_instance_pk)
if not permitted:
LOGGER.error("Forbidden request from %s for %d.",
self.get_current_user(), recipe_instance)
return permitted
# TODO(willjschmitt): Get this working again.
# user = get_current_user(self)
# brewhouse = recipe_instance.brewhouse
# brewery = brewhouse.brewery
# company = brewery.company
# has_permission = is_member_of_brewing_company(user,company)
#
# if not has_permission:
# LOGGER.error("User %s attempted to access brewhouse they do not"
# " have access to (%s)",
# user, recipe_instance.brewhouse)
#
# return has_permission
def subscribe(self, parsed_message):
"""Handles a subscription request.
Args:
parsed_message: Data received from websocket.
"""
LOGGER.info('New subscription received from %s: %s.',
self.get_current_user(), parsed_message)
recipe_instance_pk = parsed_message['recipe_instance']
sensor_pk = parsed_message['sensor']
history_time = parsed_message.get('history_time', None)
self._add_subscription(recipe_instance_pk, sensor_pk)
historical_timedelta = \
datetime.timedelta(seconds=history_time) if history_time else None
self._write_historical_data(sensor_pk, recipe_instance_pk,
timedelta=historical_timedelta)
def unsubscribe(self):
for subscription in self.subscriptions.values():
if self in subscription:
subscription.remove(self)
def _add_subscription(self, recipe_instance_pk, sensor_pk):
key = (recipe_instance_pk, sensor_pk)
if key not in self.subscriptions:
self.subscriptions[key] = set()
self.subscriptions[key].add(self)
@classmethod
def _write_data_response_chunked(cls, websocket, data_points,
chunk_size=1000):
"""Writes serialized datas in chunks asynchronously.
Args:
websocket: The websocket to write messages on.
data_points: The data to write.
chunk_size: The number of data points to write as a maximum.
"""
assert chunk_size > 0
lower_bound = 0
total_points = len(data_points)
while lower_bound < total_points:
upper_bound = min(lower_bound + chunk_size, total_points)
chunk = data_points[lower_bound:upper_bound]
IOLoop.current().add_callback(
functools.partial(cls._write_data_response, websocket, chunk))
lower_bound += chunk_size
@staticmethod
def _write_data_response(websocket, data_points):
"""Generates a serialized data message with headers for deserialization.
Writes output to websocket.
"""
assert data_points
model_info = model_meta.get_field_info(TimeSeriesDataPoint)
field_names = TimeSeriesDataPointSerializer(data_points[0])\
.get_field_names({}, model_info)
response = {
'headers': list(field_names),
'data': [],
}
LOGGER.debug("Writing %d datapoints out.", len(data_points))
for data_point in data_points:
serializer = TimeSeriesDataPointSerializer(data_point)
data = serializer.data
data_entry = []
for field_name in field_names:
data_entry.append(data[field_name])
response['data'].append(data_entry)
websocket.write_message(json.dumps(response))
def _write_historical_data(self, sensor_pk, recipe_instance_pk,
timedelta=None):
"""Sends all the data that already exists, limited to now + timedelta.
If data exists, but is older than the timedelta, returns the last point
observed.
Args:
sensor_pk: The primary key for the sensor to send data.
recipe_instance_pk: The primary key for the recipe instance to send
data.
timedelta: The amount of time + now to filter against for sending to
client. Negative indicates data in the past. Positive indicates
data in the future, which will be none. If unset (set to None),
no time filter will be applied and all historical data will be
written.
"""
data_points = TimeSeriesDataPoint.objects.filter(
sensor=sensor_pk, recipe_instance=recipe_instance_pk)
if timedelta is not None:
now = timezone.now()
filter_start_time = now + timedelta
data_points = data_points.filter(time__gt=filter_start_time)
data_points = data_points.order_by("time")
if data_points.exists():
self._write_data_response_chunked(self, data_points)
else:
try:
latest_point = TimeSeriesDataPoint.objects.filter(
sensor=sensor_pk, recipe_instance=recipe_instance_pk)\
.latest()
self._write_data_response_chunked(self, [latest_point])
except TimeSeriesDataPoint.DoesNotExist:
pass
def new_data(self, parsed_message):
"""Handles a new data point request.
Args:
parsed_message: Data received from websocket.
"""
LOGGER.debug('New data received from %s: %s.', self.get_current_user(),
parsed_message)
data = parsed_message
data["source"] = self.source_id
serializer = TimeSeriesDataPointSerializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
@classmethod
def send_updates(cls, new_data_point):
"""Sends a new data point to all of the waiters watching the sensor it
is associated with.
Args:
new_data_point: An instance of a TimeSeriesDataPoint to be streamed
to any subscribers.
"""
key = (new_data_point.recipe_instance.pk, new_data_point.sensor.pk)
if key not in cls.subscriptions:
LOGGER.debug("No subscribers for %s.", new_data_point.sensor.name)
return
subscriptions = cls.subscriptions[key]
LOGGER.info("Sending value %s for sensor %s to %d waiters.",
new_data_point.value, new_data_point.sensor,
len(subscriptions))
for waiter in subscriptions:
# Skip sending data points to the subscriber that sent it.
source = new_data_point.source
if source is not None and source == waiter.source_id:
continue
LOGGER.debug("Writing value %s for sensor %s for %s.",
new_data_point.value, new_data_point.sensor,
waiter.get_current_user())
cls._write_data_response_chunked(waiter, [new_data_point])
@receiver(post_save, sender=TimeSeriesDataPoint)
def time_series_watcher(sender, instance, **kwargs):
"""A django receiver watching for any saves on a datapoint to send
to waiters
"""
LOGGER.debug("Observed newly saved datapoint: %s.", instance)
TimeSeriesSocketHandler.send_updates(instance)
| 39.396875
| 80
| 0.649005
| 1,468
| 12,607
| 5.398501
| 0.223433
| 0.042397
| 0.028265
| 0.011987
| 0.17653
| 0.148391
| 0.107508
| 0.082524
| 0.073691
| 0.059811
| 0
| 0.000885
| 0.282938
| 12,607
| 319
| 81
| 39.520376
| 0.875774
| 0.324264
| 0
| 0.08284
| 0
| 0
| 0.05337
| 0
| 0
| 0
| 0
| 0.003135
| 0.011834
| 1
| 0.100592
| false
| 0.005917
| 0.112426
| 0.005917
| 0.266272
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
298ede4e030cbedbbcf9ef9a22b8209288395ba1
| 1,751
|
py
|
Python
|
plugins/train/model/dfaker.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | 2
|
2021-11-11T08:29:01.000Z
|
2021-11-11T08:34:50.000Z
|
plugins/train/model/dfaker.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | null | null | null |
plugins/train/model/dfaker.py
|
aaman123/faceswap
|
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
""" DFaker Model
Based on the dfaker model: https://github.com/dfaker """
from keras.initializers import RandomNormal
from keras.layers import Input
from lib.model.nn_blocks import Conv2DOutput, UpscaleBlock, ResidualBlock
from .original import Model as OriginalModel, KerasModel
class Model(OriginalModel):
""" Dfaker Model """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.input_shape = (64, 64, 3)
self.encoder_dim = 1024
self.kernel_initializer = RandomNormal(0, 0.02)
def decoder(self, side):
""" Decoder Network """
input_ = Input(shape=(8, 8, 512))
var_x = input_
var_x = UpscaleBlock(512, res_block_follows=True)(var_x)
var_x = ResidualBlock(512, kernel_initializer=self.kernel_initializer)(var_x)
var_x = UpscaleBlock(256, res_block_follows=True)(var_x)
var_x = ResidualBlock(256, kernel_initializer=self.kernel_initializer)(var_x)
var_x = UpscaleBlock(128, res_block_follows=True)(var_x)
var_x = ResidualBlock(128, kernel_initializer=self.kernel_initializer)(var_x)
var_x = UpscaleBlock(64)(var_x)
var_x = Conv2DOutput(3, 5, name="face_out_{}".format(side))(var_x)
outputs = [var_x]
if self.config.get("learn_mask", False):
var_y = input_
var_y = UpscaleBlock(512)(var_y)
var_y = UpscaleBlock(256)(var_y)
var_y = UpscaleBlock(128)(var_y)
var_y = UpscaleBlock(64)(var_y)
var_y = Conv2DOutput(1, 5, name="mask_out_{}".format(side))(var_y)
outputs.append(var_y)
return KerasModel([input_], outputs=outputs, name="decoder_{}".format(side))
| 38.911111
| 85
| 0.656768
| 230
| 1,751
| 4.721739
| 0.326087
| 0.066298
| 0.04512
| 0.051565
| 0.325967
| 0.270718
| 0.270718
| 0.270718
| 0.270718
| 0.160221
| 0
| 0.041789
| 0.221017
| 1,751
| 44
| 86
| 39.795455
| 0.754399
| 0.067961
| 0
| 0
| 0
| 0
| 0.026103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.129032
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29902382e677a01b98fcb79346e95e0a9cada7e6
| 2,459
|
py
|
Python
|
classify/train.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | 3
|
2017-09-06T07:10:05.000Z
|
2019-08-01T03:27:39.000Z
|
classify/train.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | 2
|
2018-01-25T14:46:40.000Z
|
2018-01-25T14:53:13.000Z
|
classify/train.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: gallupliu
@contact: [email protected]
@version: 1.0
@license: Apache Licence
@file: train.py
@time: 2018/3/5 22:58
"""
import tensorflow as tf
from classify.dataset import data_utils
from sklearn.model_selection import train_test_split
from classify.model import TextCNN
def dataset_input_fn(ids,labels,batch_size):
dataset = tf.data.Dataset.from_tensor_slices((ids, labels))
dataset = dataset.shuffle(buffer_size=10000)
dataset = dataset.batch(batch_size)
return dataset
if __name__ == "__main__":
text,labels = data_utils.loadfile('./data_with_label.csv')
word2idx, vocab = data_utils.load_embedding('./dataset/test_cut.txt', './dataset/wiki_50.model')
ids = data_utils.get_sentence_ids(text, word2idx)
train_ids,test_ids,train_labels,test_labels = train_test_split(ids,labels,test_size=0.1)
# print(len(text),type(text))
# max_length = count_length(text)
# print(max_length)
# train_word2vec()
# print(type(text))
# print(list(word2idx.keys()))
# dataset = tf.data.Dataset.from_tensor_slices((ids, train_labels))
# iterator = dataset.make_initializable_iterator()
# next_element = iterator.get_next()
train_dataset = dataset_input_fn(train_ids, train_labels, 100)
val_dataset = dataset_input_fn(train_ids, train_labels, 100)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,train_dataset.output_shapes)
next_element,labels = iterator.get_next()
train_iterator_init_op = iterator.make_initializer(train_dataset)
val_iterator_init_op = iterator.make_initializer(val_dataset)
with tf.Session() as sess:
# sess.run(iterator.initializer)
# print(sess.run(next_element))
model = TextCNN(next_element,labels,vocab,120,3,[1,2,3,5],512)
sess.run(tf.global_variables_initializer())
# _,acc,loss = sess.run([model.train_op,model.accuracy,model.loss])
# print(acc,loss)
for _ in range(10):
#训练
sess.run(train_iterator_init_op)
feed_dict = {model.dropout_keep_prob:1.0}
while True:
try:
_, acc, loss = sess.run([model.train_op, model.accuracy, model.loss],feed_dict=feed_dict)
print(acc,loss)
# print(sess.run(next_element),sess.run(labels))
except tf.errors.OutOfRangeError:
break
| 33.684932
| 109
| 0.684831
| 329
| 2,459
| 4.838906
| 0.370821
| 0.035176
| 0.035176
| 0.025126
| 0.238693
| 0.209799
| 0.163317
| 0.163317
| 0.114322
| 0.060302
| 0
| 0.02387
| 0.199268
| 2,459
| 72
| 110
| 34.152778
| 0.784662
| 0.256608
| 0
| 0
| 0
| 0
| 0.041065
| 0.036626
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.1875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2992bbf3c6e1e4c6fcb24c568c080fff0f59e86b
| 2,299
|
py
|
Python
|
src/cone/app/tests/test_browser_login.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-13T17:51:09.000Z
|
2022-03-13T17:51:09.000Z
|
src/cone/app/tests/test_browser_login.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | 1
|
2021-08-06T08:12:00.000Z
|
2021-08-06T08:12:00.000Z
|
src/cone/app/tests/test_browser_login.py
|
lenadax/cone.app
|
b25c55aedb85e45a962003d2767a22a927cc61c0
|
[
"BSD-3-Clause"
] | null | null | null |
from cone.app import get_root
from cone.app import security
from cone.app import testing
from cone.app.browser.login import login_view
from cone.app.browser.login import logout_view
from cone.tile import render_tile
from cone.tile.tests import TileTestCase
from webob.response import Response
from webob.exc import HTTPFound
class TestBrowserLogin(TileTestCase):
layer = testing.security
def test_login_view(self):
root = get_root()
request = self.layer.new_request()
response = login_view(root, request)
self.assertTrue(isinstance(response, Response))
def test_logout_view(self):
root = get_root()
request = self.layer.new_request()
response = logout_view(root, request)
self.assertTrue(isinstance(response, HTTPFound))
def test_logout_tile(self):
root = get_root()
request = self.layer.new_request()
with self.layer.authenticated('admin'):
render_tile(root, request, 'logout')
self.checkOutput("""
ResponseHeaders([('Set-Cookie', 'auth_tkt=; Max-Age=0; Path=/; expires=...'),
('Set-Cookie', 'auth_tkt=; Domain=example.com; Max-Age=0; Path=/; expires=...'),
('Set-Cookie', 'auth_tkt=; Domain=.example.com; Max-Age=0; Path=/; expires=...')])
""", str(request.response.headers))
def test_login_form(self):
root = get_root()
request = self.layer.new_request()
res = render_tile(root, request, 'loginform')
self.assertTrue(res.find('<form action="http://example.com/login"') > -1)
# Authenticate with wrong credentials
request.params['loginform.user'] = 'foo'
request.params['loginform.password'] = 'bar'
request.params['action.loginform.login'] = '1'
res = render_tile(root, request, 'loginform')
self.assertTrue(res.find('class="errormessage">Invalid Credentials') > -1)
# Authenticate with correct credentials
request.params['loginform.user'] = security.ADMIN_USER
request.params['loginform.password'] = security.ADMIN_PASSWORD
request.params['action.loginform.login'] = '1'
render_tile(root, request, 'loginform')
self.assertTrue(isinstance(request.environ['redirect'], HTTPFound))
| 40.333333
| 94
| 0.660722
| 270
| 2,299
| 5.511111
| 0.255556
| 0.073925
| 0.060484
| 0.040323
| 0.505376
| 0.455645
| 0.370968
| 0.278226
| 0.278226
| 0.223118
| 0
| 0.003842
| 0.207482
| 2,299
| 56
| 95
| 41.053571
| 0.812843
| 0.031753
| 0
| 0.26087
| 0
| 0.043478
| 0.239316
| 0.046334
| 0
| 0
| 0
| 0
| 0.108696
| 1
| 0.086957
| false
| 0.043478
| 0.195652
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29934fcb2bb4b9dd5b0dcf07accd0d89e7187b95
| 752
|
py
|
Python
|
View/telaEditarControle.py
|
IuriBritoDev/TKINO
|
3c689788324bd5badc84c7969f331b076046c211
|
[
"MIT"
] | null | null | null |
View/telaEditarControle.py
|
IuriBritoDev/TKINO
|
3c689788324bd5badc84c7969f331b076046c211
|
[
"MIT"
] | null | null | null |
View/telaEditarControle.py
|
IuriBritoDev/TKINO
|
3c689788324bd5badc84c7969f331b076046c211
|
[
"MIT"
] | null | null | null |
from tkinter import *
def TelaEditarControle(tela, controle):
# Cria a tela de configuração
telaEditar = Toplevel(tela)
telaEditar.title('EDITA CONTROLE')
telaEditar.geometry('280x180+620+120')
telaEditar['bg'] = 'gray'
telaEditar.resizable(False,False)
telaEditar.focus_force()
telaEditar.grab_set()
# Lables de porta e arduino
lblPorta = Label(telaEditar,text='VALOR CONTROLADOR',foreground='black',bg='gray',anchor=W,)
lblPorta.place(x=50,y=20)
slide = Scale(telaEditar,from_=10,to=90,orient=HORIZONTAL)
slide.place(x=95,y=70,width=100,height=50)
# Botão de conexão
btnCnt = Button(telaEditar,text='SALVAR',command = '',foreground='white',bg='black')
btnCnt.place(x=210,y=140)
| 32.695652
| 96
| 0.694149
| 98
| 752
| 5.295918
| 0.673469
| 0.034682
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.162234
| 752
| 23
| 97
| 32.695652
| 0.768254
| 0.093085
| 0
| 0
| 0
| 0
| 0.113402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29964b779c4f66694fdf10686261f2a4a69976ee
| 4,531
|
py
|
Python
|
src/multiuserpad/twitchutil.py
|
codingwithsomeguy/multiuserpad
|
caca02bb3f98e855a0980b8ac9947c05d5b89463
|
[
"MIT"
] | 4
|
2020-04-14T03:25:06.000Z
|
2020-11-03T14:30:20.000Z
|
src/multiuserpad/twitchutil.py
|
codingwithsomeguy/multiuserpad
|
caca02bb3f98e855a0980b8ac9947c05d5b89463
|
[
"MIT"
] | null | null | null |
src/multiuserpad/twitchutil.py
|
codingwithsomeguy/multiuserpad
|
caca02bb3f98e855a0980b8ac9947c05d5b89463
|
[
"MIT"
] | null | null | null |
# TODO: Generalize this with the discordutil module, factor out oauth
import logging
from urllib.parse import urlencode
import requests
import json
from flask import request, redirect, session
from creds import get_creds
from config import config
from sessionutil import invalidate_session
def twitch_login():
ss = get_creds()
params = {
"client_id": ss["twitch_client_id"],
"redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL,
"state": True,
"response_type": "code",
"scope": "openid", # not user_read, id_token, or user_subscriptions
# need to request objects for the later userinfo request
"claims": json.dumps({
"id_token": {},
"userinfo": {
"picture": "null",
"preferred_username": "null",
}
})
}
redirect_url = "%s/oauth2/authorize?%s" % (
config.TWITCH_API_URL, urlencode(params))
return redirect(redirect_url)
def twitch_login_cb():
user_authenticated = False
result = "Missing code"
code = request.args.get("code")
scope = request.args.get("scope")
if code is not None and scope == "openid":
# fetch a token
user_authenticated = fetch_twitch_token(code)
if user_authenticated == True:
user_fetch_worked = fetch_twitch_user()
if not user_fetch_worked:
invalidate_session()
return redirect("/")
return redirect("/user")
return result
# TODO: this should be cached until expiration
# The server can reuse this for API requests
# TODO: factor this out with discord auth to oauth..auth
def fetch_twitch_token(code):
# on success, session has the token to use
ss = get_creds()
result = False
body_payload = {
"client_id": ss["twitch_client_id"],
"client_secret": ss["twitch_client_secret"],
"grant_type": "authorization_code",
"code": code,
"redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL,
}
# redirect_uri may need to match original cb URI (twitch_login_cb)
extra_headers = {
"Content-Type": "application/x-www-form-urlencoded",
}
response = requests.post(
"%s/oauth2/token" % config.TWITCH_API_URL,
data=urlencode(body_payload),
headers=extra_headers)
logging.debug("fetch_twitch_token: headers: %s\n\traw response: %s" % (
response.headers, response.text))
try:
token_response = json.loads(response.text)
if "access_token" in token_response and "refresh_token" in token_response:
session["token_response"] = token_response
result = True
else:
logging.warn("NO refresh_token AVAILABLE, BAD AUTH!")
except ValueError as e:
logging.error("ValueError: " + e)
result = False
return result
def fetch_twitch_user():
if "token_response" not in session:
return False
token = session["token_response"]
if "token_type" not in token or "access_token" not in token:
return False
auth_header_token_type = ""
# token request returns "bearer", not "Bearer" sometimes
if token["token_type"] in ["bearer", "Bearer"]:
auth_header_token_type = "Bearer"
# this are attached to session in fetch_twitch_token
extra_headers = {
"Authorization": "%s %s" % (
auth_header_token_type,
token["access_token"]
),
}
response = requests.get(
"%s/oauth2/userinfo" % config.TWITCH_API_URL,
headers=extra_headers)
logging.debug("fetch_twitch_user: headers: %s\n\traw response: %s" % (
response.headers, response.text))
twitch_avatar_url = None
twitch_username = None
twitch_id = None
try:
logging.debug("fetch_twitch_user response: %s" % response.text)
parsed_response = json.loads(response.text)
twitch_id = parsed_response["sub"]
twitch_username = parsed_response["preferred_username"]
twitch_avatar_url = parsed_response["picture"]
except ValueError as e:
logging.error("ValueError: " + e)
return False
ss = get_creds()
# TODO: move away from "discord" to a generic auth provider
session["discord"] = {
"full_username": twitch_username,
# TODO: get the right avatar from picture
"avatar_url": twitch_avatar_url,
"id": twitch_id,
"authorized": twitch_id in ss["authorized_twitch_ids"]
}
return True
| 31.685315
| 82
| 0.63827
| 546
| 4,531
| 5.086081
| 0.274725
| 0.031689
| 0.023046
| 0.019445
| 0.190493
| 0.14116
| 0.123875
| 0.093626
| 0.063378
| 0.03529
| 0
| 0.000894
| 0.259766
| 4,531
| 142
| 83
| 31.908451
| 0.827072
| 0.140366
| 0
| 0.234234
| 0
| 0
| 0.214378
| 0.030405
| 0
| 0
| 0
| 0.007042
| 0
| 1
| 0.036036
| false
| 0
| 0.072072
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2996df27209f1d350199a6a54bcf14fae9ad1a1a
| 6,173
|
py
|
Python
|
src/pixel_sorting.py
|
in3rtial/imgsrt
|
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
|
[
"CC0-1.0"
] | 2
|
2015-11-08T09:22:30.000Z
|
2020-10-15T03:42:24.000Z
|
src/pixel_sorting.py
|
in3rtial/imgsrt
|
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
|
[
"CC0-1.0"
] | null | null | null |
src/pixel_sorting.py
|
in3rtial/imgsrt
|
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/python3
"""transliteration of Kim Asendorf's pixel sorting script"""
from copy import copy
from random import random, gauss
from PIL import Image
from numpy import int32
from argparse import ArgumentParser
# PROGRAM CONSTANTS
# rgb(103, 105, 128)
BLACK_VALUE = int32(-10000000)
# rgb(164, 114, 128)
WHITE_VALUE = int32((255 << 24) + (230 << 16) + (230 << 8) + 230)
BRIGHTNESS_VALUE = int32(30)
# PIXEL CONVERSION FUNCTIONS
def get_pixel_value(pixel):
"""rgb pixel to int32 processing representation"""
return(int32((((255 << 8) | pixel[0]) << 8 | pixel[1]) << 8 | pixel[2]))
def get_pixel_brightness(pixel):
"""rgb pixel to brightness value"""
return(max((pixel[0], pixel[1], pixel[2])) / 255 * 100)
# PIXEL FINDING FUNCTIONS
def get_next_satisfying(vector, starting_position, condition_fun):
"""find next pixel in the vector after starting position
that satisfies the condition (boolean)
return -1 if not found"""
position = starting_position
while(position < len(vector) and
not(condition_fun(vector[position]))):
position += 1
if(position == (len(vector) - 1) and
not(condition_fun(vector[position]))):
position = - 1
return(position)
# black mode
def get_next_black(vector, starting_position):
"""next black pixel"""
condition = lambda x: int32(get_pixel_value(x)) > BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_black(vector, starting_position):
"""next non black pixel"""
condition = lambda x: int32(get_pixel_value(x)) < BLACK_VALUE
return get_next_satisfying(vector, starting_position, condition)
# bright mode
def get_next_bright(vector, starting_position):
"""next bright pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) < BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_dark(vector, starting_position):
"""next dark pixel"""
condition = lambda x: int32(get_pixel_brightness(x)) > BRIGHTNESS_VALUE
return get_next_satisfying(vector, starting_position, condition)
# white mode
def get_next_white(vector, starting_position):
"""next white pixel"""
condition = lambda x: int32(get_pixel_value(x)) < WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
def get_next_not_white(vector, starting_position):
"""next not white pixel"""
condition = lambda x: int32(get_pixel_value(x)) > WHITE_VALUE
return get_next_satisfying(vector, starting_position, condition)
FIND_FUNCTIONS = ((get_next_black, get_next_not_black), # black
(get_next_bright, get_next_dark), # bright
(get_next_white, get_next_not_white)) # white
# PIXEL SORTING FUNCTIONS
def sort_pixels(vector, mode=0, find=FIND_FUNCTIONS):
"""sort pixel in the given vector"""
assert(mode in (0, 1, 2)), "invalid use case"
vector = copy(vector)
position = 0
pos_end = None
while(position < len(vector)):
if((position == -1) or (pos_end == -1)):
break
position = find[mode][0](vector, position)
pos_end = find[mode][1](vector, position)
vector[position:pos_end] = sorted(vector[position:pos_end],
key=lambda x: get_pixel_value(x))
position = pos_end + 1
return(vector)
# IMAGE TRANSFORMATIONS
def to_vectors(rgb_image, row_or_col):
"""rgb image -> list of lists of RGB tuples"""
assert(rgb_image.mode == "RGB"), "must be a RGB image"""
assert(row_or_col in (0, 1)), "row = 0, col = 1"
vectors = []
x_size, y_size = rgb_image.size
if(row_or_col == 0):
for y_coord in range(0, y_size):
row = []
for x_coord in range(0, x_size):
row.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(row)
else:
for x_coord in range(0, x_size):
col = []
for y_coord in range(0, y_size):
col.append(rgb_image.getpixel((x_coord, y_coord)))
vectors.append(col)
return(vectors)
# COMPLETE FUNCTIONS
def sort_image(image, row_or_col, mode=0, prob=1, avg_band_size=1):
"""input: (rgb image, row or column, sort mode, probability of sorting,
average band size for sorting)
output: sorted out image)"""
x_size, y_size = image.size
sigma = avg_band_size / 4
vectors = to_vectors(image, row_or_col)
new_vectors = []
position = 0
while(position < len(vectors)):
if(random() < prob):
# calculate the indices of the rows to sort
to_sort = []
coarseness = int(gauss(avg_band_size, sigma))
for index in range(position, position + coarseness):
if(index >= len(vectors)):
break
else:
to_sort.append(index)
for index in to_sort:
new_vectors.append(sort_pixels(vectors[index], mode))
position += coarseness
else:
new_vectors.append(vectors[position])
position += 1
new_image = []
if(row_or_col == 0):
for vector in new_vectors:
for (red, green, blue) in vector:
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
else:
for i in range(0, y_size):
for vector in new_vectors:
(red, green, blue) = vector[i]
new_image.append(int(red))
new_image.append(int(green))
new_image.append(int(blue))
return(Image.fromstring('RGB', (x_size, y_size), bytes(new_image)))
__all__ = ["sort_image"]
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-i", dest="input_image_file", required=True, type=str, help="input")
parser.add_argument("-o", dest="output_image_file", required=True, type=str, help="output")
args = parser.parse_args()
image = Image.open(args.input_image_file)
sort_image(image, 0).save(args.output_image_file)
| 32.661376
| 95
| 0.638749
| 821
| 6,173
| 4.580999
| 0.190012
| 0.035363
| 0.076044
| 0.042808
| 0.373039
| 0.326243
| 0.319596
| 0.289817
| 0.244616
| 0.244616
| 0
| 0.025768
| 0.245586
| 6,173
| 188
| 96
| 32.835106
| 0.781834
| 0.135752
| 0
| 0.271186
| 0
| 0
| 0.023482
| 0
| 0
| 0
| 0
| 0
| 0.025424
| 1
| 0.101695
| false
| 0
| 0.042373
| 0
| 0.194915
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
299c02cae606323659e0dd5bb1c799eaccfa8b0a
| 2,109
|
py
|
Python
|
setup.py
|
tilakpatidar/pytest-snowflake_bdd
|
db49f0a6d844828b607a2717b96bba517995cf72
|
[
"MIT"
] | null | null | null |
setup.py
|
tilakpatidar/pytest-snowflake_bdd
|
db49f0a6d844828b607a2717b96bba517995cf72
|
[
"MIT"
] | null | null | null |
setup.py
|
tilakpatidar/pytest-snowflake_bdd
|
db49f0a6d844828b607a2717b96bba517995cf72
|
[
"MIT"
] | 1
|
2022-01-24T08:26:08.000Z
|
2022-01-24T08:26:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
gh_run_number = os.environ.get("BUILD_NUMBER", None)
build_number = None if gh_run_number is None or gh_run_number == "" else gh_run_number
version = '0.2.2'
setup(
name='pytest-snowflake_bdd',
version=f"{version}-{build_number}" if build_number else version,
author='Tilak Patidar',
author_email='[email protected]',
maintainer='Tilak Patidar',
maintainer_email='[email protected]',
license='MIT',
url='https://github.com/tilakpatidar/pytest-snowflake_bdd',
description='Setup test data and run tests on snowflake in BDD style!',
long_description=read('README.rst'),
py_modules=['pytest_snowflake_bdd'],
python_requires='>=3.6.7',
install_requires=['pytest>=6.2.0', 'pytest-bdd>=3.2.1', 'snowflake-sqlalchemy>=1.3.2', 'SQLAlchemy>=1.4.27', \
'pandas>=0.25.3', 'python-dateutil>=2.8.2'],
tests_require=[
'tox',
],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Database',
'Topic :: Software Development :: Testing :: BDD',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Pytest',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
packages=["pytest_snowflake_bdd"],
entry_points={
'pytest11': [
'pytest-snowflake-bdd = pytest_snowflake_bdd.plugin',
],
},
)
| 33.47619
| 114
| 0.625889
| 246
| 2,109
| 5.227642
| 0.447154
| 0.103421
| 0.136081
| 0.080871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023708
| 0.220009
| 2,109
| 62
| 115
| 34.016129
| 0.758055
| 0.019915
| 0
| 0.096154
| 0
| 0
| 0.505569
| 0.069734
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019231
| false
| 0
| 0.057692
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29a35e6f75f695c4d26d13d7a9c5d6dff08f119d
| 6,228
|
py
|
Python
|
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | null | null | null |
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
|
CloudGround/aptitudetech_private
|
d4d150226bd33ea0c76086264286ae7cae52457f
|
[
"MIT"
] | 1
|
2019-05-17T00:04:05.000Z
|
2019-05-17T00:04:05.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitudetech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class SimplifiedTimeReporting(Document):
def onload_post_render(self):
import json
#https://stackoverflow.com/a/610923/8291000
issues = self.load_closed_issues()
remaining_issues = []
try:
if self.timesheet_detail:
td_issues=[td.issue for td in self.get('timesheet_detail')]
for issue in issues: #Load newest issues - not in the timesheet table yet
if issue['name'] not in td_issues:
remaining_issues.append(issue)
self.add_timesheet_rows(remaining_issues)
except AttributeError:
self.employee = frappe.db.get_value("Employee", {"user_id" : frappe.session.user}, "name")
self.add_timesheet_rows(issues)
def add_timesheet_rows(self, issues = []):
import datetime
from datetime import datetime
if issues:
for issue in issues:
end_time_obj = datetime.strptime(issue['reported_work_end_time'].split('.')[0], '%Y-%m-%d %H:%M:%S')
start_time_obj = datetime.strptime(issue['reported_work_start_time'].split('.')[0], '%Y-%m-%d %H:%M:%S')
diff_time = self.get_diff_time(start_time_obj, end_time_obj)
detail = {
'issue': issue['name'],
'from_time': issue['reported_work_start_time'],
'to_time': issue['reported_work_end_time'],
'note' : issue['description'],
'project' : issue['project'] if issue['project'] else None,
'hours' : diff_time
}
self.append("timesheet_detail", detail)
def before_save(self):
import json, datetime
from frappe.utils import now_datetime
from datetime import datetime
_now = now_datetime()
self.posting_date = datetime.strptime(str(_now).split('.')[:-1][0], '%Y-%m-%d %H:%M:%S')
self.total_reported_time = self.get_total_reported_time()
self.total_captured_time = self.get_total_captured_time()
def on_submit(self):
import json
import datetime
from frappe.utils import now_datetime, getdate
if self.workflow_state == 'Approved' or self.workflow_state == 'To Approve':
_now = now_datetime()
expenses_list = []
if self.expenses:
data = json.loads(str(frappe.as_json(self.expenses))) #need to be as_json, otherwhise the json won't load because of the datetime attribute
for expense in data:
try:
description = expense["description"]
except:
description = ""
exp = {
'expense_date' : expense['date'],
'expense_type' : expense['reason'],
'description' : description,
'claim_amount' : expense['claim_amount'],
'sanctioned_amount' : expense['claim_amount']
}
expenses_list.append(exp)
frappe.new_doc('Expense Claim').update({
"employee": self.employee,
"approval_status" : "Draft",
"posting_date" : datetime.datetime.now().date(),
"expenses" : expenses_list,
"company" : "Aptitude Technologies"
}).save()
for detail in self.get('timesheet_detail'):
if not detail.billable:
continue
service = frappe.db.get_value('Dynamic Link', {'parenttype': 'Issue', 'parent': detail.issue, 'link_doctype': 'Service'}, 'link_name')
if not service:
continue
service_plan = frappe.db.get_value('Service', service, 'service_plan')
metered_feature = frappe.db.get_value('Metered Feature', {'service_plan': service_plan})
if not metered_feature:
continue
args = {
'service': service,
'customer': frappe.db.get_value('Service', service, 'customer'),
'metered_feature': metered_feature,
'consumed_units': detail.hours,
'start_date': getdate(detail.from_time),
'end_date': getdate(detail.to_date),
'item_group': frappe.db.get_value('Employee', self.employee, 'employee_name'),
'item_code': detail.name,
'item_type': detail.activity_type,
'unit': 'Hours'
}
frappe.new_doc('Metered Feature Units Log').update(args).insert()
def get_total_reported_time(self):
import json
total_reported_time = 0
issues = json.loads(str(frappe.as_json(self.timesheet_detail)))
for issue in issues:
total_reported_time = total_reported_time + issue['hours']
return total_reported_time
def get_total_captured_time(self):
import datetime
from datetime import datetime
total_captured_time = 0
issues = self.load_closed_issues()
for issue in issues:
end_time_obj = datetime.strptime(issue['captured_end_working_time'].split('.')[0], '%Y-%m-%d %H:%M:%S')
start_time_obj = datetime.strptime(issue['captured_start_working_time'].split('.')[0], '%Y-%m-%d %H:%M:%S')
diff_time = self.get_diff_time(start_time_obj, end_time_obj)
# diff_time = self.get_diff_time(issue['captured_start_working_time'], issue['captured_end_working_time'])
total_captured_time = total_captured_time + diff_time
return total_captured_time
def get_diff_time(self, start_time, end_time):
import datetime
return round(self.round_number_quarter((end_time - start_time).total_seconds()/3600), 2)
def round_number_quarter(self, number):
import math
return math.ceil(number*4)/4
def load_closed_issues(self):
import datetime, json
cur_month = datetime.datetime.now().strftime("%m")
cur_year = datetime.datetime.now().strftime("%Y")
next_month = int(cur_month) + 1
next_year = cur_year
if next_month == 13:
next_month = 1
next_year = int(next_year) + 1
start_date = "{0}-{1}-01".format(cur_year, cur_month)
end_date = "{0}-{1}-01".format(next_year, next_month)
closed_issues = frappe.db.get_all("Issue", {"kanban_status" : "Completed", "reported_work_start_time" : [ ">=", start_date ], "reported_work_end_time" : [ "<=", end_date ]},['_assign, name, reported_work_start_time, reported_work_end_time, description, captured_start_working_time, captured_end_working_time'])
self_issues = []
for issue in closed_issues:
if issue['_assign'] and frappe.session.user in issue['_assign']:
issue.project = frappe.db.get_value('Task', {'issue' : issue['name']}, 'project')
self_issues.append(issue)
return json.loads(str(frappe.as_json(self_issues)))
| 34.793296
| 312
| 0.701028
| 845
| 6,228
| 4.895858
| 0.216568
| 0.021271
| 0.021271
| 0.027073
| 0.27242
| 0.187576
| 0.129321
| 0.082185
| 0.082185
| 0.082185
| 0
| 0.008831
| 0.163616
| 6,228
| 178
| 313
| 34.988764
| 0.785371
| 0.064708
| 0
| 0.181159
| 0
| 0
| 0.193604
| 0.050034
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.137681
| 0
| 0.246377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29a405435385e49ddae23458da015f3ba0c567e1
| 442
|
py
|
Python
|
6 - Python/Collections/7 - Piling Up!.py
|
Terence-Guan/Python.HackerRank
|
165a5f0e739c7678dfac7eae95443018e2167c3d
|
[
"MIT"
] | 88
|
2016-10-23T16:41:14.000Z
|
2019-12-30T23:51:47.000Z
|
HackerRank/6 - Python/Collections/7 - Piling Up!.py
|
natalie-o-perret/coding-challenges
|
9a242e0ec54488f59be82592822b31ff51af1633
|
[
"MIT"
] | 1
|
2018-10-13T14:31:54.000Z
|
2018-10-13T14:31:54.000Z
|
HackerRank/6 - Python/Collections/7 - Piling Up!.py
|
natalie-o-perret/coding-challenges
|
9a242e0ec54488f59be82592822b31ff51af1633
|
[
"MIT"
] | 82
|
2017-02-01T17:02:56.000Z
|
2020-02-01T11:45:58.000Z
|
from collections import deque
T = int(input())
for t in range(T):
n = int(input())
lengths = deque(map(int, input().split()))
top = max(lengths)
while len(lengths) > 0:
left = lengths[0]
right = lengths[-1]
if (right >= left) and (right <= top):
top = right
lengths.pop()
elif (left >= right) and (left <= top):
top = left
lengths.popleft()
else:
break
if len(lengths) == 0:
print("YES")
else:
print("NO")
| 19.217391
| 43
| 0.595023
| 65
| 442
| 4.046154
| 0.492308
| 0.091255
| 0.08365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011696
| 0.226244
| 442
| 22
| 44
| 20.090909
| 0.75731
| 0
| 0
| 0.095238
| 0
| 0
| 0.011312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29a7cf6d7a2997edf3ae4f28829f450e4f401145
| 1,225
|
py
|
Python
|
tests/__main__.py
|
nickswebsite/nickswebsite-serializer
|
2c131a04a4105afae439670f96b5b72bdfe65854
|
[
"Unlicense"
] | 2
|
2017-09-26T16:38:36.000Z
|
2018-08-09T15:09:51.000Z
|
tests/__main__.py
|
nickswebsite/nickswebsite-serializer
|
2c131a04a4105afae439670f96b5b72bdfe65854
|
[
"Unlicense"
] | 8
|
2015-02-20T13:16:11.000Z
|
2016-12-20T14:55:43.000Z
|
tests/__main__.py
|
nickswebsite/nickswebsite-serializer
|
2c131a04a4105afae439670f96b5b72bdfe65854
|
[
"Unlicense"
] | 6
|
2015-05-20T21:26:40.000Z
|
2018-08-08T10:33:04.000Z
|
import doctest
import sys
import unittest
import r2dto
from tests.test_acceptance import AcceptanceTests
from tests.test_base_serializer import BaseSerializerTests
__all__ = ["doctest", "sys", "unittest", "r2dto", "AcceptanceTests", "BaseSerializerTests"]
try:
import pep8
except ImportError:
print("WARNING: pep8 not installed. Style will not be checked and therefore your build may fail when integrated"
"with the main branch.")
pep8 = None
PEP8_SOURCES = [
"r2dto/__init__.py",
"r2dto/base.py",
"r2dto/fields.py",
"r2dto/validators.py",
"tests/__init__.py",
"tests/__main__.py",
"tests/test_acceptance.py",
"tests/test_base_serializer.py",
]
if __name__ == "__main__":
if pep8 is not None:
sg = pep8.StyleGuide(max_line_length=120)
res = sg.check_files(PEP8_SOURCES)
if res.total_errors != 0:
print("pep8 failed")
sys.exit(1)
doctest_ctx = {
"Serializer": r2dto.Serializer,
"fields": r2dto.fields,
"ValidationError": r2dto.ValidationError,
}
results = doctest.testfile("../README.md", globs=doctest_ctx)
if results.failed != 0:
sys.exit(1)
unittest.main()
| 26.06383
| 117
| 0.660408
| 146
| 1,225
| 5.280822
| 0.472603
| 0.046693
| 0.033722
| 0.059663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02521
| 0.222857
| 1,225
| 46
| 118
| 26.630435
| 0.784664
| 0
| 0
| 0.051282
| 0
| 0
| 0.323265
| 0.043265
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.205128
| 0
| 0.205128
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29a7fecfec58a37e5770387c0619949240d50800
| 10,697
|
py
|
Python
|
manager/jobs/jobs.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 30
|
2016-03-26T12:08:04.000Z
|
2021-12-24T14:48:32.000Z
|
manager/jobs/jobs.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 1,250
|
2016-03-23T04:56:50.000Z
|
2022-03-28T02:27:58.000Z
|
manager/jobs/jobs.py
|
jlbrewe/hub
|
c737669e6493ad17536eaa240bed3394b20c6b7d
|
[
"Apache-2.0"
] | 11
|
2016-07-14T17:04:20.000Z
|
2021-07-01T16:19:09.000Z
|
"""
Module that defines the interface between the `manager` (i.e Django) and the `broker` (i.e. RabbitMQ).
Defines three functions involved in a job's lifecycle:
- `dispatch_job` - send a job to a queue
- `update_job` - update the status of a job by checking it's (intermediate) result
- `check_job` - for a parent job, trigger any child jobs, and / or update it's status
- `cancel_job` - remove job from the queue, or terminate it if already started
"""
import datetime
import logging
import time
from celery import Celery, signature
from celery.result import AsyncResult
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from jobs.models import Job, JobMethod, JobStatus, Queue, Worker
logger = logging.getLogger(__name__)
# Setup the Celery app
app = Celery("manager", broker=settings.BROKER_URL, backend=settings.CACHE_URL)
app.conf.update(
# By default Celery will keep on trying to connect to the broker forever
# This overrides that. Initially try again immediately, then add 0.5 seconds for each
# subsequent try (with a maximum of 3 seconds).
# See https://github.com/celery/celery/issues/4296
broker_transport_options={
"max_retries": 10,
"interval_start": 0,
"interval_step": 0.5,
"interval_max": 3,
},
# Needed to ensure STARTED state is emitted
task_track_started=True,
)
def dispatch_job(job: Job) -> Job:
"""
Send a job to a queue.
Decides which queue a job should be sent to and sends it.
The queue can depend upon both the project and the account (either the
account that the project is linked to, or the default account of the job
creator).
"""
if not JobMethod.is_member(job.method):
raise ValueError("Unknown job method '{}'".format(job.method))
if job.method in settings.JOB_METHODS_STAFF_ONLY and (
not job.creator or not job.creator.is_staff
):
raise PermissionDenied
if JobMethod.is_compound(job.method):
children = job.children.all().order_by("id")
if len(children) == 0:
# If there are no children (e.g. a pull job for a project with no sources)
# then job is immediately finished
job.runtime = 0
job.is_active = False
job.status = JobStatus.SUCCESS.value
else:
if job.method == JobMethod.parallel.value:
# Dispatch all child jobs simultaneously
for child in children:
dispatch_job(child)
else:
# Dispatch the first child; subsequent children
# will be status WAITING and will get dispatched later
# on update of the parent.
for index, child in enumerate(children):
if index == 0:
dispatch_job(child)
else:
child.is_active = True
child.status = JobStatus.WAITING.value
child.save()
job.is_active = True
job.status = JobStatus.DISPATCHED.value
else:
# Find queues that have active workers on them
# order by descending priority
queues = list(
Queue.objects.filter(
workers__in=Worker.objects.filter(
# Has not finished
finished__isnull=True,
# Has been updated in the last x minutes
updated__gte=timezone.now() - datetime.timedelta(minutes=15),
),
).order_by("priority")
)
# Fallback to the default Stencila queue
# Apart from anything else having this fallback is useful in development
# because if means that the `overseer` service does not need to be running
# in order keep track of the numbers of workers listening on each queue
# (during development `worker`s listen to the default queue)
if len(queues) == 0:
logger.warning("No queues found with active workers")
queue, _ = Queue.get_or_create(
account_name="stencila", queue_name="default"
)
else:
if job.creator is None or job.project is None:
# Jobs created by anonymous users go on the lowest
# priority queue
priority = 1
else:
# The priority of other jobs is determined by the
# account tier of the project
priority = job.project.account.tier.id
queue = queues[min(len(queues), priority) - 1]
# Add the job's project id, key and secrets to it's kwargs.
# Doing this here ensures it is done for all jobs
# and avoids putting the secrets in the job's `params` field.
kwargs = dict(**job.params) if job.params else {}
kwargs["project"] = job.project.id if job.project else None
kwargs["key"] = job.key
kwargs["secrets"] = job.secrets
# Send the job to the queue
task = signature(
job.method, kwargs=kwargs, queue=queue.name, task_id=str(job.id), app=app,
)
task.apply_async()
job.queue = queue
job.is_active = True
job.status = JobStatus.DISPATCHED.value
job.save()
return job
def update_job(job: Job, data={}, force: bool = False) -> Job:
"""
Update a job.
This method is triggered by a PATCH request from the
`overseer` service. It updates the status, and other fields of
the job, and if the job has a parent, updates it's status too.
See https://stackoverflow.com/a/38267978 for important considerations
in using AsyncResult.
"""
# Avoid unnecessary update
if not job.is_active and not force:
return job
was_active = job.is_active
if JobMethod.is_compound(job.method):
# Update the status of compound jobs based on children
status = job.status
is_active = False
all_previous_succeeded = True
any_previous_failed = False
for child in job.get_children():
# If the child has a 'higher' status then update the
# status of the compound job
status = JobStatus.highest([status, child.status])
# If the child is still waiting then...
if child.status == JobStatus.WAITING.value:
# If all previous have succeeded, dispatch it
if all_previous_succeeded:
dispatch_job(child)
# If any previous have failed, cancel it
elif any_previous_failed:
cancel_job(child)
if child.status != JobStatus.SUCCESS.value:
all_previous_succeeded = False
if child.status == JobStatus.FAILURE.value:
any_previous_failed = True
# If the child is still active then the compound job is active
if child.is_active:
is_active = True
job.is_active = is_active
job.status = JobStatus.RUNNING.value if is_active else status
else:
status = data.get("status")
assert status
# Do not do anything if the new status is lower rank than the
# existing status. This can exist for example when a job is
# terminated (the SUCCESS state is sent after TERMINATED)
if JobStatus.rank(status) < JobStatus.rank(job.status):
return job
# Update fields sent by `overseer` service, including `status`
for key, value in data.items():
setattr(job, key, value)
def async_result():
return AsyncResult(str(job.id), app=app)
# If job succeeded then get the result if we haven't already
if status == JobStatus.SUCCESS.value and job.result is None:
response = None
attempts = 0
while not response and attempts < 5:
try:
response = async_result().get(timeout=30)
except Exception:
# Catch all errors, but log them. Occasional
# errors encountered in prod include ResponseError and TimeoutError
logger.warning(
"Error getting async result",
exc_info=True,
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
time.sleep(1)
attempts += 1
if response:
job.result = response.get("result")
job.log = response.get("log")
else:
logger.error(
"Unable to get async result",
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
job.status = JobStatus.FAILURE.value
job.error = dict(
type="RuntimeError", message="Unable to get result of job"
)
# If job failed then get the error
# For FAILURE, `info` is the raised Exception
elif status == JobStatus.FAILURE.value:
info = async_result().info
if info:
job.error = dict(type=type(info).__name__, message=str(info))
# If the job has just ended then mark it as inactive
if JobStatus.has_ended(status):
job.is_active = False
# If the job is no longer active clear its secrets and run its callback
if was_active and not job.is_active:
job.secrets = None
job.run_callback()
# Save before updating parent (and then this again)
job.save()
# If the job has a parent then update it too
if job.parent:
update_job(job.parent)
return job
def cancel_job(job: Job) -> Job:
"""
Cancel a job.
This uses Celery's terminate options which will kill the worker child process.
This is not normally recommended but in this case is OK because there is only
one task per process.
See `worker/worker.py` for the reasoning for using `SIGUSR1`.
See https://docs.celeryproject.org/en/stable/userguide/workers.html#revoke-revoking-tasks
"""
if job.is_active:
if JobMethod.is_compound(job.method):
for child in job.children.all():
cancel_job(child)
else:
app.control.revoke(str(job.id), terminate=True, signal="SIGUSR1")
job.status = JobStatus.CANCELLED.value
job.is_active = False
job.secrets = None
job.save()
return job
| 37.013841
| 102
| 0.598299
| 1,351
| 10,697
| 4.669874
| 0.260548
| 0.021557
| 0.019179
| 0.008084
| 0.085592
| 0.059597
| 0.049136
| 0.043113
| 0.043113
| 0.014899
| 0
| 0.00515
| 0.32841
| 10,697
| 288
| 103
| 37.142361
| 0.873051
| 0.346452
| 0
| 0.219512
| 0
| 0
| 0.03952
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 1
| 0.02439
| false
| 0
| 0.054878
| 0.006098
| 0.115854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29aa089f836846e2e53f80e15d88b7aa8aa740d4
| 12,785
|
py
|
Python
|
assignment2/ptb-lm-loss-compute.py
|
adijo/ift6135-rnn
|
88ebcd621cea4042f5ada688f2452ce25d02b761
|
[
"Apache-2.0"
] | null | null | null |
assignment2/ptb-lm-loss-compute.py
|
adijo/ift6135-rnn
|
88ebcd621cea4042f5ada688f2452ce25d02b761
|
[
"Apache-2.0"
] | null | null | null |
assignment2/ptb-lm-loss-compute.py
|
adijo/ift6135-rnn
|
88ebcd621cea4042f5ada688f2452ce25d02b761
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python
# coding: utf-8
import argparse
import time
import collections
import os
import sys
import torch
import torch.nn
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
from models_grad import RNN, GRU
from models_grad import make_model as TRANSFORMER
parser = argparse.ArgumentParser(description='PyTorch Penn Treebank Language Modeling')
# Arguments you may need to set to run different experiments in 4.1 & 4.2.
parser.add_argument('--data', type=str, default='data',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='TRANSFORMER',
help='type of recurrent net (RNN, GRU, TRANSFORMER)')
parser.add_argument('--optimizer', type=str, default='SGD_LR_SCHEDULE',
help='optimization algo to use; SGD, SGD_LR_SCHEDULE, ADAM')
parser.add_argument('--seq_len', type=int, default=35,
help='number of timesteps over which BPTT is performed')
parser.add_argument('--batch_size', type=int, default=20,
help='size of one minibatch')
parser.add_argument('--initial_lr', type=float, default=20.0,
help='initial learning rate')
parser.add_argument('--hidden_size', type=int, default=512,
help='size of hidden layers. IMPORTANT: for the transformer\
this must be a multiple of 16.')
parser.add_argument('--save_best', action='store_true',
help='save the model for the best validation performance')
parser.add_argument('--num_layers', type=int, default=2,
help='number of hidden layers in RNN/GRU, or number of transformer blocks in TRANSFORMER')
# Other hyperparameters you may want to tune in your exploration
parser.add_argument('--emb_size', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--num_epochs', type=int, default=40,
help='number of epochs to stop after')
parser.add_argument('--dp_keep_prob', type=float, default=0.35,
help='dropout *keep* probability. drop_prob = 1-dp_keep_prob \
(dp_keep_prob=1 means no dropout)')
# Arguments that you may want to make use of / implement more code for
parser.add_argument('--debug', action='store_true')
parser.add_argument('--save_dir', type=str, default='',
help='path to save the experimental config, logs, model \
This is automatically generated based on the command line \
arguments you pass and only needs to be set if you want a \
custom dir name')
parser.add_argument('--evaluate', action='store_true',
help="use this flag to run on the test set. Only do this \
ONCE for each model setting, and only after you've \
completed ALL hyperparameter tuning on the validation set.\
Note we are not requiring you to do this.")
# DO NOT CHANGE THIS (setting the random seed makes experiments deterministic,
# which helps for reproducibility)
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
args = parser.parse_args()
argsdict = args.__dict__
argsdict['code_file'] = sys.argv[0]
# Use the model, optimizer, and the flags passed to the script to make the
# name for the experimental dir
print("\n########## Setting Up Experiment ######################")
flags = [flag.lstrip('--') for flag in sys.argv[1:]]
current_script_path = os.path.dirname(os.path.realpath(__file__))
experiment_path = os.path.join(os.path.sep, current_script_path, args.save_dir, '_'.join([argsdict['model'], argsdict['optimizer']] + flags))
# Increment a counter so that previous results with the same args will not
# be overwritten. Comment out the next four lines if you only want to keep
# the most recent results.
i = 0
while os.path.exists(experiment_path + "_" + str(i)):
i += 1
experiment_path = experiment_path + "_" + str(i)
# Creates an experimental directory and dumps all the args to a text file
os.makedirs(experiment_path)
print("\nPutting log in %s" % experiment_path)
argsdict['save_dir'] = experiment_path
with open(os.path.join(experiment_path, 'exp_config.txt'), 'w') as f:
for key in sorted(argsdict):
f.write(key+' '+str(argsdict[key])+'\n')
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
# Use the GPU if you have one
if torch.cuda.is_available():
print("Using the GPU")
device = torch.device("cuda")
else:
print("WARNING: You are about to run on cpu, and this will likely run out \
of memory. \n You can try setting batch_size=1 to reduce memory usage")
device = torch.device("cpu")
###############################################################################
#
# DATA LOADING & PROCESSING
#
###############################################################################
# HELPER FUNCTIONS
def _read_words(filename):
with open(filename, "r") as f:
return f.read().replace("\n", "<eos>").split()
def _build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
id_to_word = dict((v, k) for k, v in word_to_id.items())
return word_to_id, id_to_word
def _file_to_word_ids(filename, word_to_id):
data = _read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
# Processes the raw data from text files
def ptb_raw_data(data_path=None, prefix="ptb"):
train_path = os.path.join(data_path, prefix + ".train.txt")
valid_path = os.path.join(data_path, prefix + ".valid.txt")
test_path = os.path.join(data_path, prefix + ".test.txt")
word_to_id, id_2_word = _build_vocab(train_path)
train_data = _file_to_word_ids(train_path, word_to_id)
valid_data = _file_to_word_ids(valid_path, word_to_id)
test_data = _file_to_word_ids(test_path, word_to_id)
return train_data, valid_data, test_data, word_to_id, id_2_word
# Yields minibatches of data
def ptb_iterator(raw_data, batch_size, num_steps):
raw_data = np.array(raw_data, dtype=np.int32)
data_len = len(raw_data)
batch_len = data_len // batch_size
data = np.zeros([batch_size, batch_len], dtype=np.int32)
for i in range(batch_size):
data[i] = raw_data[batch_len * i:batch_len * (i + 1)]
epoch_size = (batch_len - 1) // num_steps
if epoch_size == 0:
raise ValueError("epoch_size == 0, decrease batch_size or num_steps")
for i in range(epoch_size):
x = data[:, i*num_steps:(i+1)*num_steps]
y = data[:, i*num_steps+1:(i+1)*num_steps+1]
yield (x, y)
class Batch:
"""
Data processing for the transformer. This class adds a mask to the data.
"""
def __init__(self, x, pad=-1):
self.data = x
self.mask = self.make_mask(self.data, pad)
@staticmethod
def make_mask(data, pad):
"""
Create a mask to hide future words.
"""
def subsequent_mask(size):
""" helper function for creating the masks. """
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
mask = (data != pad).unsqueeze(-2)
mask = mask & Variable(
subsequent_mask(data.size(-1)).type_as(mask.data))
return mask
# LOAD DATA
print('Loading data from '+args.data)
raw_data = ptb_raw_data(data_path=args.data)
train_data, valid_data, test_data, word_to_id, id_2_word = raw_data
vocab_size = len(word_to_id)
print(' vocabulary size: {}'.format(vocab_size))
###############################################################################
#
# MODEL SETUP
#
###############################################################################
# NOTE ==============================================
# This is where your model code will be called. You may modify this code
# if required for your implementation, but it should not typically be necessary,
# and you must let the TAs know if you do so.
if args.model == 'RNN':
print("seq_length", args.seq_len)
print("batch_size", args.batch_size)
model = RNN(emb_size=args.emb_size, hidden_size=args.hidden_size,
seq_len=args.seq_len, batch_size=args.batch_size,
vocab_size=vocab_size, num_layers=args.num_layers,
dp_keep_prob=args.dp_keep_prob)
elif args.model == 'GRU':
model = GRU(emb_size=args.emb_size, hidden_size=args.hidden_size,
seq_len=args.seq_len, batch_size=args.batch_size,
vocab_size=vocab_size, num_layers=args.num_layers,
dp_keep_prob=args.dp_keep_prob)
elif args.model == 'TRANSFORMER':
if args.debug: # use a very small model
model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2)
else:
# Note that we're using num_layers and hidden_size to mean slightly
# different things here than in the RNNs.
# Also, the Transformer also has other hyper-parameters
# (such as the number of attention heads) which can change it's behavior.
model = TRANSFORMER(vocab_size=vocab_size, n_units=args.hidden_size,
n_blocks=args.num_layers, dropout=1.-args.dp_keep_prob)
# these 3 attributes don't affect the Transformer's computations;
# they are only used in run_epoch
model.batch_size = args.batch_size
model.seq_len = args.seq_len
model.vocab_size = vocab_size
else:
print("Model type not recognized.")
model = model.to(device)
# LOSS FUNCTION
loss_fn = nn.CrossEntropyLoss()
if args.optimizer == 'ADAM':
optimizer = torch.optim.Adam(model.parameters(), lr=args.initial_lr)
# LEARNING RATE SCHEDULE
lr = args.initial_lr
lr_decay_base = 1 / 1.15
m_flat_lr = 14.0 # we will not touch lr for the first m_flat_lr epochs
###############################################################################
#
# DEFINE COMPUTATIONS FOR PROCESSING ONE EPOCH
#
###############################################################################
def repackage_hidden(h):
"""
Wraps hidden states in new Tensors, to detach them from their history.
This prevents Pytorch from trying to backpropagate into previous input
sequences when we use the final hidden states from one mini-batch as the
initial hidden states for the next mini-batch.
Using the final hidden states in this way makes sense when the elements of
the mini-batches are actually successive subsequences in a set of longer sequences.
This is the case with the way we've processed the Penn Treebank dataset.
"""
if isinstance(h, Variable):
return h.detach_()
else:
return tuple(repackage_hidden(v) for v in h)
def run_epoch(model, data):
"""
One epoch of training/validation (depending on flag is_train).
"""
model.eval()
state_dict = torch.load('saved_model.pt', map_location="cpu")
model.load_state_dict(state_dict)
total_loss = np.zeros(model.seq_len)
steps = 0
# LOOP THROUGH MINI BATCHES
for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)):
steps += 1
if args.model != 'TRANSFORMER':
hidden = model.init_hidden()
hidden = hidden.to(device)
if args.model == 'TRANSFORMER':
batch = Batch(torch.from_numpy(x).long().to(device))
model.zero_grad()
outputs = model.forward(batch.data, batch.mask).transpose(1, 0)
# print ("outputs.shape", outputs.shape)
else:
inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()
model.zero_grad()
hidden = repackage_hidden(hidden)
outputs, hidden = model(inputs, hidden)
targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()
total_loss += np.array([loss_fn(outputs[i], targets[i]).item() for i in range(len(outputs))])
total_loss /= float(steps)
print(total_loss)
###############################################################################
#
# RUN MAIN LOOP (TRAIN AND VAL)
#
###############################################################################
print("\n########## Running Main Loop ##########################")
# Gradient compute
num_epochs = 1
# MAIN LOOP
for epoch in range(num_epochs):
# RUN MODEL ON VALID DATA
run_epoch(model, valid_data)
| 38.509036
| 141
| 0.630504
| 1,782
| 12,785
| 4.351291
| 0.248036
| 0.018571
| 0.035079
| 0.011607
| 0.108847
| 0.092984
| 0.084086
| 0.073253
| 0.062935
| 0.062935
| 0
| 0.008516
| 0.21009
| 12,785
| 331
| 142
| 38.625378
| 0.759283
| 0.196246
| 0
| 0.076531
| 0
| 0
| 0.118081
| 0.005061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05102
| false
| 0.005102
| 0.066327
| 0
| 0.163265
| 0.056122
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29aa65c529d8ece9233ccff13d236d4bc2a7ac6d
| 4,892
|
py
|
Python
|
python-3.4.4.amd64/Lib/site-packages/idlexlib/extensions/ClearWindow.py
|
CSnap/photogate
|
208272ef39f4e86f40d431da2ca523e21701f789
|
[
"CC0-1.0"
] | 2
|
2018-12-29T13:47:40.000Z
|
2018-12-29T13:47:49.000Z
|
Build/External/WPy3710/python-3.7.1/Lib/site-packages/idlexlib/extensions/ClearWindow.py
|
Heono/Turtle-IDE
|
aa42dd8f658284601b1a8d3ffb92f157de5022e2
|
[
"MIT"
] | 1
|
2022-03-17T16:46:04.000Z
|
2022-03-17T16:46:04.000Z
|
Lib/site-packages/idlexlib/extensions/ClearWindow.py
|
JWerbrouck/RWTH_M1_Projekt
|
7ae63a2277361fa3273cf0677b297379482b8240
|
[
"bzip2-1.0.6"
] | null | null | null |
# IDLEX EXTENSION
## """
## Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois.
## All rights reserved.
##
## Developed by: Roger D. Serwy
## University of Illinois
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal with the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## + Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimers.
## + Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimers in the
## documentation and/or other materials provided with the distribution.
## + Neither the names of Roger D. Serwy, the University of Illinois, nor
## the names of its contributors may be used to endorse or promote
## products derived from this Software without specific prior written
## permission.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR
## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
##
##
##
##
## Clear Window Extension
##
## About:
##
## It provides "Clear Shell Window" under "Options"
## with ability to undo.
##
## Part of Issue 6143
##
## """
config_extension_def = """
[ClearWindow]
enable=1
enable_editor=0
enable_shell=1
[ClearWindow_cfgBindings]
clear-window=<Control-Key-l>
"""
jn = lambda x,y: '%i.%i' % (x,y) # join integers to text coordinates
sp = lambda x: tuple(map(int, x.split('.'))) # convert tkinter Text coordinate to a line and column tuple
import sys
import re
from idlelib.UndoDelegator import DeleteCommand
ansi_re = re.compile(r'\x01?\x1b\[(.*?)m\x02?')
def strip_ansi(s):
return ansi_re.sub("", s)
class ClearWindow:
menudefs = [
('options', [
('Clear Shell Window', '<<clear-window>>'),
]),]
def __init__(self, editwin):
self.editwin = editwin
self.text = self.editwin.text
self.text.bind("<<clear-window>>", self.clear_window)
def clear_window_event(self, ev=None):
self.clear_window(ev)
return "break"
def clear_window(self, event):
per = self.editwin.per
text = per.bottom
iomark_orig = text.index('iomark')
line_io, col_io = sp(iomark_orig)
# if cursor is at the prompt, preserve the prompt (multiline)
prompt = strip_ansi(sys.ps1)
backlines = prompt.count('\n')
prompt_start = jn(line_io-backlines, 0)
maybe_prompt = text.get(prompt_start, prompt_start + '+%ic' % len(prompt))
at_prompt = maybe_prompt == prompt
if at_prompt:
endpos = text.index(prompt_start)
else:
endpos = text.index('iomark linestart')
dump = text.dump('1.0', endpos, all=True)
# Add a command to the undo delegator
undo = self.editwin.undo
if undo:
dc = ClearWindowDeleteCommand('1.0', endpos, dump)
undo.addcmd(dc)
text.edit_reset() # clear out Tkinter's undo history
class ClearWindowDeleteCommand(DeleteCommand):
def __init__(self, index1, index2, dump):
DeleteCommand.__init__(self, index1, index2)
self.dump = dump
def do(self, text):
text.delete(self.index1, self.index2)
text.see('insert')
def redo(self, text):
text.delete(self.index1, self.index2)
text.see('insert')
def undo(self, text):
# inspired by "Serializing a text widget" at http://wiki.tcl.tk/9167
dump = self.dump
tag = {} # remember the index where a tag was activated
for key, value, index in dump:
if key == 'text':
text.insert(index, value, '')
elif key == 'tagon':
tag[value] = index
elif key == 'tagoff':
text.tag_add(value, tag[value], index)
del tag[value]
# extend existing tags to the end position
for value in tag:
text.tag_add(value, tag[value], self.index2)
text.see('insert')
| 33.737931
| 107
| 0.634914
| 632
| 4,892
| 4.848101
| 0.414557
| 0.028721
| 0.019582
| 0.016645
| 0.102807
| 0.0953
| 0.080287
| 0.080287
| 0.080287
| 0.080287
| 0
| 0.010803
| 0.262061
| 4,892
| 144
| 108
| 33.972222
| 0.83795
| 0.46157
| 0
| 0.069444
| 0
| 0
| 0.10545
| 0.029621
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.041667
| 0.013889
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ad5c4ad4e9d3f8e84eb705d16ecf7d414f2aac
| 4,025
|
py
|
Python
|
tests/test_preprocessing_evaluation_pipelines.py
|
CLARIN-PL/embeddings
|
49fb59b796475ca92bc262ec2bc6def1d89a10e0
|
[
"MIT"
] | 33
|
2021-06-15T12:09:29.000Z
|
2022-03-26T14:34:16.000Z
|
tests/test_preprocessing_evaluation_pipelines.py
|
CLARIN-PL/embeddings
|
49fb59b796475ca92bc262ec2bc6def1d89a10e0
|
[
"MIT"
] | 201
|
2021-03-23T05:50:23.000Z
|
2022-03-31T22:56:04.000Z
|
tests/test_preprocessing_evaluation_pipelines.py
|
CLARIN-PL/embeddings
|
49fb59b796475ca92bc262ec2bc6def1d89a10e0
|
[
"MIT"
] | null | null | null |
from tempfile import TemporaryDirectory
from typing import Any, Dict, Tuple
import datasets
import flair
import numpy as np
import pytest
import torch
from flair.data import Corpus
from numpy import typing as nptyping
from embeddings.data.data_loader import HuggingFaceDataLoader
from embeddings.data.dataset import HuggingFaceDataset
from embeddings.pipeline.evaluation_pipeline import (
FlairSequenceLabelingEvaluationPipeline,
ModelEvaluationPipeline,
)
from embeddings.pipeline.preprocessing_pipeline import PreprocessingPipeline
from embeddings.transformation.flair_transformation.column_corpus_transformation import (
ColumnCorpusTransformation,
)
from embeddings.transformation.flair_transformation.downsample_corpus_transformation import (
DownsampleFlairCorpusTransformation,
)
from embeddings.transformation.flair_transformation.split_sample_corpus_transformation import (
SampleSplitsFlairCorpusTransformation,
)
from embeddings.utils.flair_corpus_persister import FlairConllPersister
@pytest.fixture
def result_path() -> "TemporaryDirectory[str]":
return TemporaryDirectory()
@pytest.fixture
def embedding_name() -> str:
return "allegro/herbert-base-cased"
@pytest.fixture
def ner_dataset_name() -> str:
return "clarin-pl/kpwr-ner"
@pytest.fixture
def hidden_size() -> int:
return 256
@pytest.fixture
def task_train_kwargs() -> Dict[str, int]:
return {"max_epochs": 1, "mini_batch_size": 256}
@pytest.fixture
def sequence_labeling_preprocessing_pipeline(
result_path: "TemporaryDirectory[str]",
embedding_name: str,
ner_dataset_name: str,
) -> Tuple[PreprocessingPipeline[str, datasets.DatasetDict, Corpus], "TemporaryDirectory[str]"]:
dataset = HuggingFaceDataset(ner_dataset_name)
data_loader = HuggingFaceDataLoader()
transformation = (
ColumnCorpusTransformation("tokens", "ner")
.then(SampleSplitsFlairCorpusTransformation(dev_fraction=0.1, test_fraction=0.1, seed=441))
.then(DownsampleFlairCorpusTransformation(percentage=0.005))
.persisting(FlairConllPersister(result_path.name))
)
pipeline = PreprocessingPipeline(
dataset=dataset, data_loader=data_loader, transformation=transformation
)
return pipeline, result_path
@pytest.fixture
def sequence_labeling_evaluation_pipeline(
result_path: "TemporaryDirectory[str]",
embedding_name: str,
ner_dataset_name: str,
hidden_size: int,
task_train_kwargs: Dict[str, int],
) -> Tuple[
ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]],
"TemporaryDirectory[str]",
]:
pipeline = FlairSequenceLabelingEvaluationPipeline(
dataset_path=result_path.name,
embedding_name=embedding_name,
output_path=result_path.name,
hidden_size=hidden_size,
persist_path=None,
task_train_kwargs=task_train_kwargs,
)
return pipeline, result_path
def test_sequence_labeling_preprocessing_pipeline(
result_path: "TemporaryDirectory[str]",
embedding_name: str,
ner_dataset_name: str,
hidden_size: int,
task_train_kwargs: Dict[str, int],
sequence_labeling_preprocessing_pipeline: Tuple[
PreprocessingPipeline[str, datasets.DatasetDict, Corpus], "TemporaryDirectory[str]"
],
sequence_labeling_evaluation_pipeline: Tuple[
ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]],
"TemporaryDirectory[str]",
],
) -> None:
flair.set_seed(441)
flair.device = torch.device("cpu")
preprocessing_pipeline, path = sequence_labeling_preprocessing_pipeline
preprocessing_pipeline.run()
evaluation_pipeline, _ = sequence_labeling_evaluation_pipeline
result = evaluation_pipeline.run()
np.testing.assert_almost_equal(
result["seqeval__mode_None__scheme_None"]["overall_accuracy"], 0.7881773
)
np.testing.assert_almost_equal(result["seqeval__mode_None__scheme_None"]["overall_f1"], 0)
path.cleanup()
| 31.692913
| 99
| 0.766957
| 422
| 4,025
| 7.049763
| 0.241706
| 0.030252
| 0.037647
| 0.041681
| 0.362353
| 0.282017
| 0.273613
| 0.273613
| 0.223193
| 0.223193
| 0
| 0.009014
| 0.14559
| 4,025
| 126
| 100
| 31.944444
| 0.856063
| 0
| 0
| 0.269231
| 0
| 0
| 0.087702
| 0.067578
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0.076923
| false
| 0
| 0.163462
| 0.048077
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29adb65f2ba3f76e7586b891107a612d5e21f5e3
| 672
|
py
|
Python
|
Exercises/Exercises_01/06_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 7
|
2021-12-28T23:38:42.000Z
|
2022-03-29T16:36:16.000Z
|
Exercises/Exercises_01/06_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | null | null | null |
Exercises/Exercises_01/06_exercise.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 4
|
2021-06-29T20:21:52.000Z
|
2022-03-12T10:04:17.000Z
|
# Proszę zaimplementować funkcję, która otrzymuje na wejściu posortowaną niemalejąco tablicę A
# o rozmiarze n oraz liczbę x i sprawdza, czy x występuje w A. Jeśli tak, to zwraca najmniejszy indeks,
# pod którym x występuje.
def binary_search(T, i, j, x):
if i > j:
return None
c = (i + j) // 2
if T[c] == x:
value = binary_search(T, i, c - 1, x)
if value is None:
return c
return value
if T[c] > x:
return binary_search(T, i, c - 1, x)
else:
return binary_search(T, c + 1, j, x)
T = [0, 1, 2, 3, 4, 5, 5, 5, 6]
for i in range(len(T)):
print(i, binary_search(T, 0, len(T) - 1, T[i]))
| 28
| 103
| 0.574405
| 118
| 672
| 3.228814
| 0.457627
| 0.15748
| 0.170604
| 0.110236
| 0.089239
| 0.089239
| 0.089239
| 0
| 0
| 0
| 0
| 0.032051
| 0.303571
| 672
| 23
| 104
| 29.217391
| 0.782051
| 0.324405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.375
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ae59f7491eb508b08d30811e2ad409b6a63558
| 4,508
|
py
|
Python
|
lib/sentencers/RuleBasedSentencer.py
|
gucorpling/GumDrop
|
06e705adc5b78b048f199a3d6f50d911fed398e2
|
[
"Apache-2.0"
] | null | null | null |
lib/sentencers/RuleBasedSentencer.py
|
gucorpling/GumDrop
|
06e705adc5b78b048f199a3d6f50d911fed398e2
|
[
"Apache-2.0"
] | null | null | null |
lib/sentencers/RuleBasedSentencer.py
|
gucorpling/GumDrop
|
06e705adc5b78b048f199a3d6f50d911fed398e2
|
[
"Apache-2.0"
] | null | null | null |
import re, io, os, sys
from nltk import word_tokenize
from argparse import ArgumentParser
# Allow package level imports in module
script_dir = os.path.dirname(os.path.realpath(__file__))
lib = os.path.abspath(script_dir + os.sep + "..")
sys.path.append(lib)
from conll_reader import space_join, text2conllu
class RuleBasedSplitter:
def __init__(self,lang="eng"):
lang_map = {"deu":"german","eng":"english","spa":"spanish","fra":"french","nld":"dutch","rus":"russian",
"eus":"basque","por":"portuguese", "zho": "chinese", "tur":"turkish"}
self.lang = lang
self.name = "RuleBasedSplitter"
self.long_lang = lang_map[lang] if lang in lang_map else lang
def predict(self,conllu):
if "\t" not in conllu: # this is a token list, not conllu string
conllu = text2conllu(" ".join(conllu))
tokens = space_join(conllu)
tokens = tokens.split()
# Run RuleBased sentence tokenize
with open(script_dir + os.sep + "frequency", 'r', encoding='utf-8') as f:
data = [line.strip().split() for line in f.readlines()]
sent_inital = {d[0]: d[1:] for d in data}
ratios ={}
for word in sent_inital[self.lang]:
if word.count("|") == 2:
w, r, f = word.split("|")
r = float(r)
f = int(f)
ratios[w] = r
processed = []
for token in tokens:
if token in ratios:
token = "//<->//" + token
processed.append(token)
# Reconstruct text with heuristics
text = " ".join(processed)
text = re.sub(r" ([.,,!?;;::!?。)\]}%])", r'\1', text)
text = re.sub(r"([$([{]) ", r'\1', text)
endpunct = "[!?。.!?]"
text = re.sub("(" + endpunct + ")", r'\1//<->//', text)
sents = re.split('(?://<->// ?)+', text)
sents = [s for s in sents if len(s.strip()) > 0]
# Realign to input tokens
tabbed = "\t".join(sents)
tabbed = "\t" + tabbed.replace(" ","")
output = []
for tok in tokens:
ratio = ratios[tok] if tok in ratios else -1.0
if tabbed.startswith("\t"): # This is a split point
output.append((1,ratio)) # Prediction is 1 (='segment') probability is 1.0
tabbed = tabbed[1:]
else:
output.append((0,0.0)) # Prediction is 0 (='non segment') probability is 0.0
if tabbed.startswith(tok):
tabbed = tabbed[len(tok):]
# Verify we are returning as many predictions as we received input tokens
assert len(tokens) == len(output)
return output
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("-f", "--file", default=None, help="file to tokenize")
p.add_argument("-l", "--lang", default="eng", help="language 3 letter code",
choices=["eng", "spa", "fra", "deu", "eus", "nld", "rus", "por", "zho", "tur"])
opts = p.parse_args()
infile = opts.file
lang = opts.lang
# Run test
sentencer = RuleBasedSplitter(lang=lang)
if infile is None:
# Some default test tokens if no file provided
if lang == "zho":
tokens = ['闽', '台', '经贸', '合作', '的', '深入', '发展', '为', '福建', '汽车', '工业', '注入', '了', '生机', '。',
'去年', '初', '以来', ',', '台湾', '最', '具', '实力', '的', '汽车', '公司', '——', '裕隆', '集团', '中华',
'汽车', '公司', '多', '次', '组', '团', '访', '闽', ',', '就', '合作', '发展', '汽车', '工业', '进行',
'了', '积极', '的', '蹉商', ';', "新华社", '福建', '方面', '则', '成立', '了', '由', '省委', '书记', '贾庆林', '、',
'省长', '陈明义', '任', '正', '、', '副', '组长', '的', '省', '汽车', '工业', '领导', '小组', ',', '将',
'发展', '本', '省', '汽车', '工业', '摆上', '重要', '议事', '日程', '。']
elif lang == "nld":
tokens = ['Een', 'ieder', 'heeft', 'recht', 'op', 'onderwijs', ';', 'het', 'onderwijs', 'zal', 'kosteloos',
'zijn,', 'althans', 'wat', 'het', 'lager', 'en', 'basisonderwijs', 'betreft', '.', 'Het', 'lager',
'onderwijs', 'zal', 'verplicht', 'zijn', '.', 'Ambachtsonderwijs', 'en', 'beroepsopleiding',
'zullen', 'algemeen', 'beschikbaar', 'worden', 'gesteld', '.', 'Hoger', 'onderwijs', 'zal',
'openstaan', 'voor', 'een', 'ieder,', 'die', 'daartoe', 'de', 'begaafdheid', 'bezit', '.',
'Het', 'onderwijs', 'zal', 'gericht', 'zijn', 'op', 'de', 'volle', 'ontwikkeling', 'van', 'de',
'menselijke', 'persoonlijkheid', 'en', 'op', 'de', 'versterking', 'van', 'de', 'eerbied', 'voor',
'de', 'rechten', 'van', 'de', 'mens', 'en', 'de', 'fundamentele', 'vrijheden', '.']
else:
tokens = ['Introduction', 'Research', 'has', 'shown', 'examples', '.', 'But', 'we', 'need', 'more', '.']
else:
text = io.open(infile, encoding="utf8").read()
tokens = word_tokenize(text)
sent_starts = sentencer.predict(tokens)
print([(tok, boundary) for tok, boundary in (zip(tokens, sent_starts))])
| 37.882353
| 110
| 0.562999
| 603
| 4,508
| 4.154229
| 0.454395
| 0.006387
| 0.013174
| 0.011178
| 0.012774
| 0.012774
| 0.012774
| 0
| 0
| 0
| 0
| 0.006821
| 0.187001
| 4,508
| 118
| 111
| 38.20339
| 0.676126
| 0.091837
| 0
| 0.034091
| 0
| 0
| 0.232239
| 0.005145
| 0
| 0
| 0
| 0
| 0.011364
| 1
| 0.022727
| false
| 0
| 0.045455
| 0
| 0.090909
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b0e35636d971fec8136ffc141e0dd2c3c239b5
| 2,878
|
py
|
Python
|
pyogp/lib/client/tests/test_appearance.py
|
grobertson/PyOGP.lib.Client
|
681492d95b9a901a79071b70c77bfdd55cdb02db
|
[
"Apache-2.0"
] | null | null | null |
pyogp/lib/client/tests/test_appearance.py
|
grobertson/PyOGP.lib.Client
|
681492d95b9a901a79071b70c77bfdd55cdb02db
|
[
"Apache-2.0"
] | null | null | null |
pyogp/lib/client/tests/test_appearance.py
|
grobertson/PyOGP.lib.Client
|
681492d95b9a901a79071b70c77bfdd55cdb02db
|
[
"Apache-2.0"
] | null | null | null |
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/trunk/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/LICENSE.txt
$/LicenseInfo$
"""
# standard python libs
import unittest
from binascii import unhexlify
#related
# pyogp
from pyogp.lib.client.appearance import *
from pyogp.lib.client.settings import Settings
from pyogp.lib.client.agent import Agent
from pyogp.lib.client.region import Region
from pyogp.lib.base.datatypes import *
# pyogp messaging
from pyogp.lib.base.message.udpdeserializer import UDPMessageDeserializer
# pyogp tests
import pyogp.lib.base.tests.config
class TestAppearance(unittest.TestCase):
def setUp(self):
self.settings = Settings()
self.agent = Agent()
self.appearance = AppearanceManager(self.agent, settings = self.settings)
self.agent.agent_id = UUID("01234567-89ab-cdef-0123-456789abcdef")
self.agent.session_id = UUID("fedcba98-7654-3210-fedc-ba9876543210")
self.agent.region = DummyRegion()
def tearDown(self):
pass
def test_request_agent_wearables(self):
self.agent.appearance.request_agent_wearables()
packet_list = self.agent.region.dummy_packet_holder
self.assertEquals(len(packet_list), 1)
packet = packet_list.pop()
self.assertEquals(self.agent.agent_id, packet["AgentData"][0]['AgentID'])
self.assertEquals(self.agent.session_id, packet["AgentData"][0]['SessionID'])
def test_request_agent_noAgentIDorSessionID(self):
packet_list = self.agent.region.dummy_packet_holder
self.agent.agent_id = None
self.agent.appearance.request_agent_wearables()
self.assertEquals(len(packet_list), 0)
self.agent.agent_id = UUID()
self.agent.appearance.request_agent_wearables()
self.assertEquals(len(packet_list), 0)
self.agent.agent_id = UUID("01234567-89ab-cdef-0123-456789abcdef")
self.agent.session_id = None
self.agent.appearance.request_agent_wearables()
self.assertEquals(len(packet_list), 0)
self.agent.session_id = UUID()
self.agent.appearance.request_agent_wearables()
self.assertEquals(len(packet_list), 0)
def test_send_AgentIsNowWearing(self):
pass
class DummyRegion(Region):
dummy_packet_holder = []
def enqueue_message(self, packet, reliable = False):
self.dummy_packet_holder.append(packet)
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestAppearance))
return suite
| 32.704545
| 89
| 0.723767
| 364
| 2,878
| 5.598901
| 0.313187
| 0.083906
| 0.035329
| 0.039254
| 0.36261
| 0.344946
| 0.325319
| 0.325319
| 0.325319
| 0.280177
| 0
| 0.03682
| 0.169562
| 2,878
| 87
| 90
| 33.08046
| 0.8159
| 0.177554
| 0
| 0.288462
| 0
| 0
| 0.060323
| 0.045879
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.134615
| false
| 0.038462
| 0.192308
| 0
| 0.403846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b134fd22e0ec5acfe0ea6bb8fddd3eb700cbd7
| 1,018
|
py
|
Python
|
tests/validators/test_symbol_required.py
|
Ennkua/wtforms
|
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/validators/test_symbol_required.py
|
Ennkua/wtforms
|
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
|
[
"BSD-3-Clause"
] | null | null | null |
tests/validators/test_symbol_required.py
|
Ennkua/wtforms
|
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from wtforms.validators import symbol_required
from wtforms.validators import ValidationError
@pytest.mark.parametrize("min_v", [2, 3, 4, 5, 6])
def test_correct_symbol_required(min_v, dummy_form, dummy_field):
"""
It should pass for the string with correct count of required symbol.
"""
dummy_field.data = "-A%s^D*f(G87KJ@hg8J.&"
validator = symbol_required(min_v)
validator(dummy_form, dummy_field)
@pytest.mark.parametrize(
("validator", "message"),
(
(
symbol_required(2, "at least 2 symbol"),
"at least 2 symbol letter",
),
(symbol_required(2), "at least 2 symbol"),
),
)
def test_symbol_required_messages(dummy_form, dummy_field, validator, message):
"""
It should raise ValidationError for string with incorect symbol_required.
"""
dummy_field.data = "foo123Bar"
with pytest.raises(ValidationError) as e:
validator(dummy_form, dummy_field)
assert str(e.value) == message
| 28.277778
| 79
| 0.674853
| 130
| 1,018
| 5.1
| 0.415385
| 0.147813
| 0.084465
| 0.11463
| 0.171946
| 0.087481
| 0.087481
| 0
| 0
| 0
| 0
| 0.02005
| 0.21611
| 1,018
| 35
| 80
| 29.085714
| 0.810777
| 0.139489
| 0
| 0.173913
| 0
| 0
| 0.128994
| 0.024852
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b2e2e2b5e0b11ab0a21e7a356d8c2fabd4abe1
| 1,028
|
py
|
Python
|
src/Nodes/WhileOp.py
|
gabrielzezze/z-lang
|
89be471fd5618a9d1c9e3eb955608cdc888511c2
|
[
"MIT"
] | null | null | null |
src/Nodes/WhileOp.py
|
gabrielzezze/z-lang
|
89be471fd5618a9d1c9e3eb955608cdc888511c2
|
[
"MIT"
] | null | null | null |
src/Nodes/WhileOp.py
|
gabrielzezze/z-lang
|
89be471fd5618a9d1c9e3eb955608cdc888511c2
|
[
"MIT"
] | null | null | null |
from src.Node import Node
from src.Nodes import Block
from src.SymbolTable import SymbolTable
class WhileOp(Node):
def __init__(self, child: Block, condition: Node):
self.condition = condition
self.child = child
super().__init__(
value=condition,
children=[child, condition],
node_type='WhileOp'
)
def Evaluate(self, symbol_table: SymbolTable):
while_entry = self.builder.append_basic_block(name=f'while_{self.id}')
while_exit = self.builder.append_basic_block(name=f'exit_while_{self.id}')
condition_i = self.condition.Evaluate(symbol_table=symbol_table)
self.builder.cbranch(condition_i, while_entry, while_exit)
self.builder.position_at_start(while_entry)
self.child.Evaluate(symbol_table=symbol_table)
condition_i = self.condition.Evaluate(symbol_table)
self.builder.cbranch(condition_i, while_entry, while_exit)
self.builder.position_at_start(while_exit)
return
| 35.448276
| 82
| 0.694553
| 128
| 1,028
| 5.28125
| 0.28125
| 0.097633
| 0.057692
| 0.088757
| 0.510355
| 0.465976
| 0.465976
| 0.263314
| 0.263314
| 0.263314
| 0
| 0
| 0.213035
| 1,028
| 28
| 83
| 36.714286
| 0.8356
| 0
| 0
| 0.086957
| 0
| 0
| 0.040856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b61776c27c79d1d7092a2b9bd2ee11a295186e
| 251
|
py
|
Python
|
Check_if_subarray_with_0_sum_is_exists_or_not.py
|
KiranPesarlanka9/data-structures-and-algorithms-Problems
|
557e3ca7f04b37fa5a709295f455b6338815486e
|
[
"MIT"
] | 1
|
2019-11-28T12:21:51.000Z
|
2019-11-28T12:21:51.000Z
|
Check_if_subarray_with_0_sum_is_exists_or_not.py
|
KiranPesarlanka9/data-structures-and-algorithms-Problems
|
557e3ca7f04b37fa5a709295f455b6338815486e
|
[
"MIT"
] | null | null | null |
Check_if_subarray_with_0_sum_is_exists_or_not.py
|
KiranPesarlanka9/data-structures-and-algorithms-Problems
|
557e3ca7f04b37fa5a709295f455b6338815486e
|
[
"MIT"
] | 1
|
2019-12-06T09:18:41.000Z
|
2019-12-06T09:18:41.000Z
|
def check(arr):
sum_log = set()
_sum = 0
for i in xrange(len(arr)):
if _sum in sum_log:
return True
_sum += 1
sum_log.add(_sum)
return False
arr = [1, 0, -2, 5, -4, 1, 9, -2]
print(check(arr))
| 15.6875
| 34
| 0.49004
| 41
| 251
| 2.829268
| 0.560976
| 0.155172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.063291
| 0.370518
| 251
| 15
| 35
| 16.733333
| 0.670886
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b90065070b5025868557255475b9c600fb78b4
| 1,588
|
py
|
Python
|
scripts/join_completed.py
|
shannonfenn/data-tools
|
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
|
[
"MIT"
] | null | null | null |
scripts/join_completed.py
|
shannonfenn/data-tools
|
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
|
[
"MIT"
] | null | null | null |
scripts/join_completed.py
|
shannonfenn/data-tools
|
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import argparse
import pandas as pd
import numpy as np
def check_dataframe(filename, data_frame, key_columns):
if any(col not in data_frame for col in key_columns):
raise ValueError('Key columns not in {}.'.format(filename))
nonzero = np.count_nonzero(data_frame['trg_error'])
if nonzero:
print('Warning, some failed runs in {}.'.format(filename))
def join_completed(filenames, key_columns=None):
completed = None
# build single dataframe of successful runs
for filename in filenames:
df = pd.read_json(filename)
check_dataframe(df, key_columns)
if completed is None:
completed = df[df['trg_error'] == 0]
else:
completed = pd.concat([completed, df[df['trg_error'] == 0]],
ignore_index=True)
# check if rows are unique on given columns
if key_columns:
completed.sort(key_columns)
duplicated = completed.duplicated(key_columns).sum()
if duplicated > 0:
raise ValueError('Duplicate rows: {}'.format(duplicated))
return completed
def main():
parser = argparse.ArgumentParser(
description='Join results with zero training error.')
parser.add_argument('-i', type=str, nargs='+', required=True,
help='list of input files')
parser.add_argument('-o', type=str, required=True,
help='file to store result')
args = parser.parse_args()
join_completed(args.i).to_json(args.o)
if __name__ == '__main__':
main()
| 31.137255
| 72
| 0.632872
| 199
| 1,588
| 4.889447
| 0.462312
| 0.08222
| 0.024666
| 0.032888
| 0.045221
| 0.045221
| 0
| 0
| 0
| 0
| 0
| 0.002549
| 0.258816
| 1,588
| 50
| 73
| 31.76
| 0.824129
| 0.066121
| 0
| 0
| 0
| 0
| 0.127703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.194444
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29b95a7e7b6ab6d04a7196faa187fadcabb8c0e4
| 9,859
|
py
|
Python
|
pet/preprocessor.py
|
YerongLi/pet
|
8323080e9033c38c234431aecacad154ed477472
|
[
"Apache-2.0"
] | null | null | null |
pet/preprocessor.py
|
YerongLi/pet
|
8323080e9033c38c234431aecacad154ed477472
|
[
"Apache-2.0"
] | null | null | null |
pet/preprocessor.py
|
YerongLi/pet
|
8323080e9033c38c234431aecacad154ed477472
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List, Optional
import numpy as np
from pet.utils import InputFeatures, InputExample, PLMInputFeatures, GenerativeInputFeatures, GenerativeInputExample
from pet.pvp import PVPS, PVP
class Preprocessor(ABC):
"""
A preprocessor that transforms an :class:`InputExample` into a :class:`InputFeatures` object so that it can be
processed by the model being used.
"""
def __init__(self, wrapper, task_name: str, pattern_ids: Optional[List[int]] = None, verbalizer_file: str = None):
"""
Create a new preprocessor.
:param wrapper: the wrapper for the language model to use
:param task_name: the name of the task
:param pattern_ids: the ids of the PVPs to be used
:param verbalizer_file: path to a file containing a verbalizer that overrides the default verbalizer
"""
self.wrapper = wrapper
if pattern_ids is not None:
self.pvps = {pid: PVPS[task_name](self.wrapper, pid, verbalizer_file) for pid in pattern_ids}
self.label_map = {label: i for i, label in enumerate(self.wrapper.config.label_list)}
@abstractmethod
def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False,
**kwargs) -> InputFeatures:
"""Convert the given example into a set of input features"""
pass
class MLMPreprocessor(Preprocessor):
"""Preprocessor for models pretrained using a masked language modeling objective (e.g., BERT)."""
def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False,
**kwargs) -> InputFeatures:
pvp = self.pvps[pattern_id] # type: PVP
if priming:
input_ids, token_type_ids = pvp.encode(example, priming=True)
priming_data = example.meta['priming_data'] # type: List[InputExample]
priming_input_ids = []
for priming_example in priming_data:
pe_input_ids, _ = pvp.encode(priming_example, priming=True, labeled=True)
priming_input_ids += pe_input_ids
input_ids = priming_input_ids + input_ids
token_type_ids = self.wrapper.tokenizer.create_token_type_ids_from_sequences(input_ids)
input_ids = self.wrapper.tokenizer.build_inputs_with_special_tokens(input_ids)
else:
input_ids, token_type_ids = pvp.encode(example)
if self.wrapper.config.model_type == 'pegasus':
# bugfix: Transformers' create_token_type_ids_from_sequences seems to ignore the final </s> token in Pegasus
token_type_ids += [0]
attention_mask = [1] * len(input_ids)
padding_length = self.wrapper.config.max_seq_length - len(input_ids)
if padding_length < 0:
raise ValueError(f"Maximum sequence length is too small, got {len(input_ids)} input ids")
input_ids = input_ids + ([self.wrapper.tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
assert len(input_ids) == self.wrapper.config.max_seq_length
assert len(attention_mask) == self.wrapper.config.max_seq_length
assert len(token_type_ids) == self.wrapper.config.max_seq_length
label = self.label_map[example.label] if example.label is not None else -100
logits = example.logits if example.logits else [-1]
if labelled:
mlm_labels = pvp.get_mask_positions(input_ids)
if self.wrapper.config.model_type == 'gpt2':
# shift labels to the left by one
mlm_labels.append(mlm_labels.pop(0))
else:
mlm_labels = [-1] * self.wrapper.config.max_seq_length
return InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
label=label, mlm_labels=mlm_labels, logits=logits, idx=example.idx, pattern_id=pattern_id)
class PLMPreprocessor(MLMPreprocessor):
"""Preprocessor for models pretrained using a permuted language modeling objective (e.g., XLNet)."""
def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False,
**kwargs) -> PLMInputFeatures:
input_features = super().get_input_features(example, pattern_id, labelled=labelled, priming=priming, **kwargs)
input_ids = input_features.input_ids
pvp = self.pvps[pattern_id] # type: PVP
num_masks = 1 # currently, PLMPreprocessor supports only replacements that require exactly one mask
perm_mask = np.zeros((len(input_ids), len(input_ids)), dtype=np.float)
label_idx = input_ids.index(pvp.mask_id)
perm_mask[:, label_idx] = 1 # the masked token is not seen by any other token
target_mapping = np.zeros((num_masks, len(input_ids)), dtype=np.float)
target_mapping[0, label_idx] = 1.0
return PLMInputFeatures(perm_mask=perm_mask, target_mapping=target_mapping, **input_features.__dict__)
class GenerativePreprocessor(MLMPreprocessor):
"""Preprocessor for a generative language model and generative task."""
def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False,
**kwargs) -> GenerativeInputFeatures:
input_features = super().get_input_features(example, pattern_id, labelled=False, priming=False, **kwargs)
assert isinstance(example, GenerativeInputExample)
if example.output_text is not None:
generative_prefix = self.pvps[pattern_id].generative_prefix_ids()
max_length = self.wrapper.config.output_max_seq_length - len(generative_prefix)
output_ids = self.wrapper.tokenizer.encode(example.output_text, add_special_tokens=True,
max_length=max_length, padding='max_length',
truncation='only_first')
pad_token = self.wrapper.tokenizer.pad_token_id
output_loss_mask = [0] * len(generative_prefix) + [0 if tok_id == pad_token else 1 for tok_id in output_ids]
output_ids = generative_prefix + output_ids
else:
output_ids = [self.wrapper.tokenizer.pad_token_id]
output_loss_mask = [0]
if 'token_ids' in example.meta:
token_ids = example.meta['token_ids']
token_probabilities = example.meta['token_probabilities']
len_output_ids = sum(1 for x in output_ids if x != self.wrapper.tokenizer.pad_token_id)
assert len(token_ids) == len_output_ids, \
f"If given, there should be as many token ids as there are output ids. Got {len(token_ids)} token " \
f"ids and {len_output_ids} output ids."
padding_entry = [0] * len(token_ids[0])
padding = [padding_entry] * (self.wrapper.config.output_max_seq_length - len(token_ids))
input_features.meta['token_ids'] = token_ids + padding
input_features.meta['token_probabilities'] = token_probabilities + padding
return GenerativeInputFeatures(output_ids=output_ids, output_loss_mask=output_loss_mask,
**input_features.__dict__)
class SequenceClassifierPreprocessor(Preprocessor):
"""Preprocessor for a regular sequence classification model."""
def get_input_features(self, example: InputExample, **kwargs) -> InputFeatures:
inputs = self.wrapper.task_helper.get_sequence_classifier_inputs(example) if self.wrapper.task_helper else None
if inputs is None:
inputs = self.wrapper.tokenizer.encode_plus(
example.text_a if example.text_a else None,
example.text_b if example.text_b else None,
add_special_tokens=True,
max_length=self.wrapper.config.max_seq_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs.get("token_type_ids")
attention_mask = [1] * len(input_ids)
padding_length = self.wrapper.config.max_seq_length - len(input_ids)
input_ids = input_ids + ([self.wrapper.tokenizer.pad_token_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
if not token_type_ids:
token_type_ids = [0] * self.wrapper.config.max_seq_length
else:
token_type_ids = token_type_ids + ([0] * padding_length)
mlm_labels = [-1] * len(input_ids)
assert len(input_ids) == self.wrapper.config.max_seq_length
assert len(attention_mask) == self.wrapper.config.max_seq_length
assert len(token_type_ids) == self.wrapper.config.max_seq_length
label = self.label_map[example.label] if example.label is not None else -100
logits = example.logits if example.logits else [-1]
return InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
label=label, mlm_labels=mlm_labels, logits=logits, idx=example.idx)
| 48.566502
| 120
| 0.674713
| 1,266
| 9,859
| 5.008689
| 0.191153
| 0.047942
| 0.037849
| 0.034695
| 0.420912
| 0.397414
| 0.337802
| 0.305315
| 0.281974
| 0.269989
| 0
| 0.004935
| 0.239578
| 9,859
| 202
| 121
| 48.806931
| 0.84087
| 0.165027
| 0
| 0.276423
| 0
| 0.00813
| 0.040789
| 0
| 0
| 0
| 0
| 0
| 0.065041
| 1
| 0.04878
| false
| 0.00813
| 0.04065
| 0
| 0.162602
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29bcfd631b01019c349e3bbedaeeb2cbda9283d5
| 2,832
|
py
|
Python
|
src/cogs/xpevent.py
|
nsde/lhxp
|
ef6d1004c704c1156b9b01172e4748634b31b541
|
[
"MIT"
] | 2
|
2021-12-18T11:44:31.000Z
|
2022-01-07T23:27:00.000Z
|
src/cogs/xpevent.py
|
nsde/lhxp
|
ef6d1004c704c1156b9b01172e4748634b31b541
|
[
"MIT"
] | null | null | null |
src/cogs/xpevent.py
|
nsde/lhxp
|
ef6d1004c704c1156b9b01172e4748634b31b541
|
[
"MIT"
] | null | null | null |
try:
from .helpers import config, management, xp, spam
except ImportError:
import helpers.config, helpers.management, helpers.xp, helpers.spam
import time
import discord
from discord.ext import commands
from discord.commands import slash_command
class XPEvent(commands.Cog):
def __init__(self, client):
self.client = client
async def antispam(self, message):
message_is_spam = False
message_content = message.content
if spam.is_spam(message_content):
message_is_spam = True
await message.delete()
last_message = await message.channel.history().get(author__name=message.author.name)
if spam.is_spam(message_content + last_message.content):
message_is_spam = True
messages = []
async for msg in message.channel.history(limit=2):
messages.append(msg)
if message_is_spam or messages[0].content == messages[1].content:
try:
await message.delete()
await last_message.delete()
except:
pass
return message_is_spam
async def give_xp(self, message):
text = message.content.replace(' ', '') # avoid spam
xp_gain = text.count(' ')*config.load()['word-reward-xp'] # word count
if xp_gain < 2: # don't go into negative XP numbers!
xp_gain = config.load()['word-reward-xp']
xp.add(message.author, xp_gain)
async def daily_check(self, message):
is_empty = message.author.id not in list(config.load('dailystep').keys())
if is_empty:
config.set('dailystep', message.author.id, 0)
if config.load('dailystep')[message.author.id] > 31:
config.set('dailystep', message.author.id, 0)
if not config.load('lastmessage').get(message.author.id):
config.set('lastmessage', message.author.id, -1)
else:
config.set('lastmessage', message.author.id, time.time())
penultimate_message_time = config.load('lastmessage')[message.author.id]
today_begin = (time.time()//86400)*86400
if today_begin > penultimate_message_time:
config.change('dailystep', message.author.id, 1)
daily_reward = config.load()['daily-rewards'][int(time.strftime('%d'))-1]
xp.add(message.author, daily_reward*config.load()['daily-reward-multiplier'])
@commands.Cog.listener()
async def on_message(self, message):
if message.author.bot:
return
was_spam = await self.antispam(message)
if was_spam:
return
await self.give_xp(message)
await self.daily_check(message)
await self.client.process_commands(message)
def setup(client):
client.add_cog(XPEvent(client))
| 32.551724
| 92
| 0.628884
| 347
| 2,832
| 4.994236
| 0.273775
| 0.097519
| 0.0779
| 0.055395
| 0.195038
| 0.139642
| 0.041546
| 0.041546
| 0
| 0
| 0
| 0.009995
| 0.258121
| 2,832
| 87
| 93
| 32.551724
| 0.81485
| 0.019774
| 0
| 0.15625
| 0
| 0
| 0.056978
| 0.008294
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.015625
| 0.109375
| 0
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29be043b68e9b14821af31619772ea7a817c2a7b
| 2,199
|
py
|
Python
|
utilities/utils.py
|
jluech/PGAcloud_Manager
|
9008fac26f9d762b2ab527034e46d467b5b0c26f
|
[
"MIT"
] | null | null | null |
utilities/utils.py
|
jluech/PGAcloud_Manager
|
9008fac26f9d762b2ab527034e46d467b5b0c26f
|
[
"MIT"
] | null | null | null |
utilities/utils.py
|
jluech/PGAcloud_Manager
|
9008fac26f9d762b2ab527034e46d467b5b0c26f
|
[
"MIT"
] | null | null | null |
import logging
import os
import subprocess
import sys
import yaml
files_dir = ""
# --- General util commands ---
def execute_command(
command,
working_directory,
environment_variables,
executor,
logger=logging,
livestream=False
):
logger_prefix = ""
if executor:
logger_prefix = executor + ": "
process = subprocess.Popen(
command,
cwd=working_directory,
env=environment_variables,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
)
logger.debug(logger_prefix + "command: " + command)
stdout = ""
for line in iter(process.stdout.readline, b''):
line = str(line, "utf-8")
stdout += line
if livestream:
sys.stdout.write(line)
else:
logger.debug(logger_prefix + "command output: " + line.rstrip())
return_code = process.wait()
stdout = stdout.rstrip()
return stdout, return_code
def merge_dict(dict1, dict2):
res = {**dict1, **dict2}
return res
def parse_yaml(yaml_file_path):
with open(yaml_file_path, mode="r", encoding="utf-8") as yaml_file:
content = yaml.safe_load(yaml_file) or {}
return content
# --- File and path handling commands ---
def get_uploaded_files_path(pga_id):
return os.path.join(files_dir, str(pga_id))
def get_uploaded_files_dict(pga_id):
files_dict = {}
directory = get_uploaded_files_path(pga_id)
files = os.listdir(directory)
for filename in files:
name = filename.split(".")[0]
yaml_dict = parse_yaml(os.path.join(directory, filename))
yaml_dict["_filename"] = filename
files_dict[name] = yaml_dict
return files_dict
def get_filename_from_path(file_path):
if file_path.__contains__("\\"):
filename = file_path.split("\\")[-1].split(".")[0]
else:
filename = file_path.split("/")[-1].split(".")[0]
return filename
def create_pga_subdir(pga_id):
os.makedirs(os.path.join(files_dir, str(pga_id)))
def __set_files_dir(path):
global files_dir
files_dir = os.path.join(path, 'files')
os.makedirs(files_dir, exist_ok=True)
| 23.393617
| 76
| 0.637108
| 275
| 2,199
| 4.850909
| 0.327273
| 0.041979
| 0.029985
| 0.034483
| 0.167916
| 0.122939
| 0.085457
| 0.043478
| 0.043478
| 0
| 0
| 0.006595
| 0.241473
| 2,199
| 93
| 77
| 23.645161
| 0.793165
| 0.031378
| 0
| 0.058824
| 0
| 0
| 0.028209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.073529
| 0.014706
| 0.279412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29bfe3374dd25a06358a1da66a585cb725eee7be
| 578
|
py
|
Python
|
pyscf/nao/m_color.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/nao/m_color.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/nao/m_color.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
class color:
import os
T = os.getenv('TERM')
if ( T=='cygwin' or T=='mingw' ) :
HEADER = '\033[01;35m'
BLUE = '\033[01;34m'
GREEN = '\033[01;32m'
WARNING = '\033[01;33m'
FAIL = '\033[01;31m'
RED = FAIL
ENDC = '\033[0m'
else :
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
RED = FAIL
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.RED = ''
self.ENDC = ''
| 19.931034
| 36
| 0.49308
| 77
| 578
| 3.701299
| 0.480519
| 0.087719
| 0.077193
| 0.098246
| 0.112281
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169154
| 0.304498
| 578
| 28
| 37
| 20.642857
| 0.539801
| 0
| 0
| 0.148148
| 0
| 0
| 0.214533
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29c079a0baef167378f06f75800a84013625dfce
| 7,958
|
py
|
Python
|
Scripts Daily/재무정보수집.py
|
oms1226/msbot
|
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
|
[
"MIT"
] | 1
|
2020-05-01T07:50:49.000Z
|
2020-05-01T07:50:49.000Z
|
Scripts Daily/재무정보수집.py
|
oms1226/msbot
|
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
|
[
"MIT"
] | 1
|
2021-06-01T22:36:14.000Z
|
2021-06-01T22:36:14.000Z
|
Scripts Daily/재무정보수집.py
|
oms1226/msbot
|
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
|
[
"MIT"
] | 8
|
2019-10-26T03:30:53.000Z
|
2022-03-26T08:06:25.000Z
|
# -*- coding: utf-8 -*-
import re
import calendar
import datetime, time
from datetime import timedelta
import urllib.request
import requests, json
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.io.sql as pdsql
from matplotlib import dates
import sqlite3
DATABASE = '..\\DATA\\mymoneybot.sqlite'
def sqliteconn():
conn = sqlite3.connect(DATABASE)
return conn
def get_webpage(url, encoding=""):
cj = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')]
respstr = ""
try:
op = opener.open(url)
sourcecode = op.read()
except Exception as e:
time.sleep(1)
op = opener.open(url)
sourcecode = op.read()
encodingmethod = op.info().get_param('charset')
if encodingmethod == None:
if encoding != "":
encodingmethod = encoding
if encoding != "":
encodingmethod = encoding
try:
respstr = sourcecode.decode(encoding=encodingmethod, errors='ignore')
except Exception as e:
respstr = sourcecode.decode(encoding="cp949", errors='ignore')
opener.close()
return respstr
def get_company_fundamental_fnguide(code):
def g(x):
if type(x) == str:
return datetime.datetime.strptime(x, '%Y-%m-%d')
else:
return x
# url = "http://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=&strResearchYN=" % (code)
url = "http://asp01.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A%s&NewMenuID=11&cID=50&MenuYn=N" % (code)
respstr = get_webpage(url, encoding="utf8")
# soup = BeautifulSoup(respstr)
soup = BeautifulSoup(respstr, "lxml")
# <!--IFRS 별도/연간 -->
target_table = soup.find("div", class_="um_table", id="highlight_B_Y")
# print(target_table)
result = []
try:
target_table.find_all('tr')
except Exception as e:
return (DataFrame(), DataFrame())
for tr in target_table.find_all('tr'):
# print("[%s]" % tr)
for th in tr.find_all('th'):
value = "%s" % th.text.replace('(P) : Provisional','').replace('(E) : Estimate','').replace('잠정실적','').replace('컨센서스, 추정치','').replace('(E)','').replace('(P)','').replace('/','-').strip()
if ('-02' in value):
value = value + '-28'
elif ('-04' in value) or ('-06' in value) or ('-09' in value) or ('-11' in value):
value = value + '-30'
elif ('-01' in value) or ('-03' in value) or ('-05' in value) or ('-07' in value) or ('-08' in value) or ('-10' in value) or ('-12' in value):
value = value + '-31'
result.append(value)
# print("[%s]" % th.text.replace('(E) : Estimate','').replace('컨센서스, 추정치','').strip())
for td in tr.find_all('td'):
value = td.text.strip().replace(',','')
try:
value = float(value)
except Exception as e:
value = 0
result.append(value)
# print(td.text.strip())
# print(result[1:])
result = result[1:]
dfdata = []
for x in range(0, len(result), 9):
dfdata.append(result[x:x+9])
df = DataFrame(data=dfdata, columns = [str(x) for x in range(1,10)]).T
df.columns = ['날짜', '매출액', '영업이익', '당기순이익', '자산총계', '부채총계', '자본총계', '자본금', '부채비율', '유보율', '영업이익률', '순이익률', 'ROA', 'ROE', 'EPS', 'BPS', 'DPS', 'PER', 'PBR', '발행주식수', '배당수익률']
df.drop(df.index[[0]], inplace=True)
# df['날짜'] = df['date'].apply(g)
# df.drop(['date'], axis=1, inplace=True)
df = df.convert_objects(convert_numeric=True)
# df.set_index('날짜', inplace=True)
df_year = df
# <!--IFRS 별도/분기 -->
target_table = soup.find("div", class_="um_table", id="highlight_B_Q")
# print(target_table)
result = []
for tr in target_table.find_all('tr'):
# print("[%s]" % tr)
for th in tr.find_all('th'):
value = "%s" % th.text.replace('(P) : Provisional','').replace('(E) : Estimate','').replace('잠정실적','').replace('컨센서스, 추정치','').replace('(E)','').replace('(P)','').replace('/','-').strip()
if ('-02' in value):
value = value + '-28'
elif ('-04' in value) or ('-06' in value) or ('-09' in value) or ('-11' in value):
value = value + '-30'
elif ('-01' in value) or ('-03' in value) or ('-05' in value) or ('-07' in value) or ('-08' in value) or ('-10' in value) or ('-12' in value):
value = value + '-31'
result.append(value)
# print("[%s]" % th.text.replace('(E) : Estimate','').replace('컨센서스, 추정치','').strip())
for td in tr.find_all('td'):
value = td.text.strip().replace(',','')
try:
value = float(value)
except Exception as e:
value = 0
result.append(value)
# print(td.text.strip())
# print(result[1:])
result = result[1:]
dfdata = []
for x in range(0, len(result), 9):
dfdata.append(result[x:x+9])
df = DataFrame(data=dfdata, columns = [str(x) for x in range(1,10)]).T
df.columns = ['날짜', '매출액', '영업이익', '당기순이익', '자산총계', '부채총계', '자본총계', '자본금', '부채비율', '유보율', '영업이익률', '순이익률', 'ROA', 'ROE', 'EPS', 'BPS', 'DPS', 'PER', 'PBR', '발행주식수', '배당수익률']
df.drop(df.index[[0]], inplace=True)
# df['날짜'] = df['date'].apply(g)
# df.drop(['date'], axis=1, inplace=True)
df = df.convert_objects(convert_numeric=True)
# df.set_index('날짜', inplace=True)
df_qtr = df
return (df_year, df_qtr)
def build_fundamental_data():
with sqlite3.connect(DATABASE) as conn:
cursor = conn.cursor()
replace_sqlite = (
"replace into 재무정보( 날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률 ) "
"values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) "
)
df = pdsql.read_sql_query('select 단축코드, 종목명 from 종목코드 ', con=conn)
CODES = list(df.values)
for code, name in CODES:
print('FnGuide - %s %s' % (code, name))
try:
(df_year, df_qtr) = get_company_fundamental_fnguide(code)
except Exception as e:
continue
if len(df_year.index) > 0 or len(df_qtr.index) > 0:
if len(df_year.index) > 0:
기간구분 = '년간'
for idx, row in df_year.iterrows():
날짜, 매출액, 영업이익, 당기순이익, 자산총계, 부채총계, 자본총계, 자본금, 부채비율, 유보율, 영업이익률, 순이익률, ROA, ROE, EPS, BPS, DPS, PER, PBR, 발행주식수, 배당수익률 = row
종목코드 = code
d = (날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률)
cursor.execute(replace_sqlite, d)
conn.commit()
if len(df_qtr.index) > 0:
기간구분 = '분기'
for idx, row in df_qtr.iterrows():
날짜, 매출액, 영업이익, 당기순이익, 자산총계, 부채총계, 자본총계, 자본금, 부채비율, 유보율, 영업이익률, 순이익률, ROA, ROE, EPS, BPS, DPS, PER, PBR, 발행주식수, 배당수익률 = row
종목코드 = code
d = (날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률)
cursor.execute(replace_sqlite, d)
conn.commit()
# time.sleep(2)
# except Exception as e:
# print(code, name, str(e))
if __name__ == "__main__":
# 재무정보가져오기 - 분기에 한번 실행하면 됨
build_fundamental_data()
| 37.895238
| 199
| 0.539834
| 1,034
| 7,958
| 4.088008
| 0.236944
| 0.039745
| 0.038325
| 0.029808
| 0.615093
| 0.582446
| 0.574403
| 0.559735
| 0.559735
| 0.559735
| 0
| 0.023418
| 0.280975
| 7,958
| 209
| 200
| 38.076555
| 0.715309
| 0.112842
| 0
| 0.534247
| 0
| 0.027397
| 0.135058
| 0.018908
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034247
| false
| 0
| 0.09589
| 0
| 0.171233
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29c3750914f24305e5c021af40b18b30bd0ff4d0
| 5,387
|
py
|
Python
|
information_extraction/Preprocessor.py
|
shatha2014/Fashion_Rec
|
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
|
[
"BSD-2-Clause"
] | 11
|
2018-08-30T10:52:35.000Z
|
2021-11-08T06:04:22.000Z
|
information_extraction/Preprocessor.py
|
shatha2014/Fashion_Rec
|
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
|
[
"BSD-2-Clause"
] | 1
|
2020-09-08T19:53:48.000Z
|
2021-11-08T13:29:42.000Z
|
information_extraction/Preprocessor.py
|
shatha2014/Fashion_Rec
|
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
|
[
"BSD-2-Clause"
] | 8
|
2018-08-30T10:52:37.000Z
|
2022-02-20T09:13:40.000Z
|
# Author: Kim Hammar <[email protected]> KTH 2018
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import TweetTokenizer
from nltk.tag.perceptron import PerceptronTagger
import nltk
import emoji
nltk.download('averaged_perceptron_tagger')
nltk.download('stopwords')
nltk.download('wordnet')
class PreProcessor(object):
"""
Preprocessor module in the Information Extraction Process of Fashion Related Properties of Instagram posts.
Performs text normalization and parsing.
"""
# Class variables shared by all instances
tknzr = TweetTokenizer(strip_handles=True, reduce_len=True)
wordnet_lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}'])
tagger = PerceptronTagger()
def __init__(self, ids, comments, captions, tags):
""" Class Constructor"""
# Raw text
self.raw_id = ids
self.raw_comments = comments
self.raw_captions = captions
self.raw_tags = tags
print("Read in Raw Text")
# Preprocess raw text
self.remove_non_unicode()
self.lower_case()
self.to_unicode()
print("Normalized Raw Text")
# Tokenize and preprocess tokens
self.tokenize()
print("Tokenized the text")
self.remove_stopwords()
#self.remove_urls()
print("Normalized tokens")
# Extract specific tokens
self.lemmatize()
print("Extracted lemmas")
self.extract_emojis()
print("Extracted emojis")
self.extract_hashtags()
print("Extracted hashtags")
#self.pos_tag()
#print("Extracted POS")
def remove_non_unicode(self):
""" Remove non-unicode tokens"""
self.raw_comments = map(lambda x: x.decode('utf-8','ignore').encode("utf-8"), self.raw_comments)
self.raw_captions = map(lambda x: x.decode('utf-8', 'ignore').encode("utf-8"), self.raw_captions)
self.raw_tags = map(lambda x: x.decode('utf-8','ignore').encode("utf-8"), self.raw_tags)
def to_unicode(self):
""" Convert text to unicode """
self.raw_comments = map(lambda x: x.decode('utf-8'), self.raw_comments)
self.raw_captions = map(lambda x: x.decode('utf-8'), self.raw_captions)
self.raw_tags = map(lambda x: x.decode('utf-8'), self.raw_tags)
def tokenize(self):
""" Tokenize text with TweetTokenizer, preserve emojis, hashtags etc """
self.tokens_captions = [self.tknzr.tokenize(caption) for caption in self.raw_captions]
self.tokens_comments = [self.tknzr.tokenize(comment) for comment in self.raw_comments]
self.tokens_tags = [self.tknzr.tokenize(tag) for tag in self.raw_tags]
self.tokens_all = []
for i in range(len(self.raw_id)):
self.tokens_all.append(self.tokens_captions[i] + self.tokens_comments[i] + self.tokens_tags[i])
def lower_case(self):
""" Convert raw text into lowercase"""
self.raw_captions = [caption.lower() for caption in self.raw_captions]
self.raw_comments = [comments.lower() for comments in self.raw_comments]
self.raw_tags = [tags.lower() for tags in self.raw_tags]
def lemmatize(self):
""" Lemmatize tokens"""
self.lemma_caption = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), caption) for caption in self.tokens_captions]
self.lemma_comments = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), comments) for comments in self.tokens_comments]
self.lemma_tags = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), tags) for tags in self.tokens_tags]
self.lemma_all = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), tokens) for tokens in self.tokens_all]
def remove_urls(self):
""" Remove urls from tokens """
self.tokens_captions = [filter(lambda x: "http" not in x, caption) for caption in self.tokens_captions]
self.tokens_comments = [filter(lambda x: "http" not in x, comments) for comments in self.tokens_comments]
self.tokens_tags = [filter(lambda x: "http" not in x, tags) for tags in self.tokens_tags]
self.tokens_all = [filter(lambda x: "http" not in x, tokens) for tokens in self.tokens_all]
def remove_stopwords(self):
""" Remove stopwords from tokens """
self.tokens_captions = [[token for token in caption if token not in self.stop_words] for caption in self.tokens_captions]
self.tokens_comments = [[token for token in comments if token not in self.stop_words] for comments in self.tokens_comments]
self.tokens_tags = [[token for token in tags if token not in self.stop_words] for tags in self.tokens_tags]
self.tokens_all = [[token for token in tokens if token not in self.stop_words] for tokens in self.tokens_all]
def extract_emojis(self):
""" Extract emojis """
self.emojis = [[c for c in tokens if c in emoji.UNICODE_EMOJI] for tokens in self.tokens_all]
def extract_hashtags(self):
""" Extract hashtags """
self.hashtags = [[x for x in tokens if x.startswith("#")] for tokens in self.tokens_all]
def pos_tag(self):
""" Extract POS tags """
self.pos_tokens = [self.tagger.tag(tokens) for tokens in self.tokens_all]
| 45.652542
| 131
| 0.667347
| 726
| 5,387
| 4.81405
| 0.165289
| 0.088698
| 0.051502
| 0.018884
| 0.420029
| 0.388269
| 0.384263
| 0.325036
| 0.231187
| 0.115308
| 0
| 0.00307
| 0.213848
| 5,387
| 117
| 132
| 46.042735
| 0.822196
| 0.122888
| 0
| 0
| 0
| 0
| 0.056791
| 0.005614
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146667
| false
| 0
| 0.08
| 0
| 0.293333
| 0.093333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29c79f364e1d41c68e19d472b3c1d55bd0b5b9e5
| 1,070
|
py
|
Python
|
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2
|
2021-05-24T15:12:07.000Z
|
2022-02-17T19:58:16.000Z
|
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 1
|
2022-02-27T03:01:06.000Z
|
2022-02-27T03:01:06.000Z
|
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
|
SkynetRTN/afterglow-access-server
|
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
|
[
"Apache-2.0"
] | 2
|
2021-06-08T18:16:40.000Z
|
2021-07-09T14:19:49.000Z
|
"""
Afterglow Core: photometric calibration job schemas
"""
from typing import List as TList
from marshmallow.fields import Integer, List, Nested
from ..job import JobSchema, JobResultSchema
from ..field_cal import FieldCalSchema, FieldCalResultSchema
from ..photometry import PhotSettingsSchema
from .source_extraction_job import SourceExtractionSettingsSchema
__all__ = ['FieldCalJobResultSchema', 'FieldCalJobSchema']
class FieldCalJobResultSchema(JobResultSchema):
data: TList[FieldCalResultSchema] = List(
Nested(FieldCalResultSchema), default=[])
class FieldCalJobSchema(JobSchema):
type = 'field_cal'
result: FieldCalJobResultSchema = Nested(
FieldCalJobResultSchema, default={})
file_ids: TList[int] = List(Integer(), default=[])
field_cal: FieldCalSchema = Nested(FieldCalSchema, default={})
source_extraction_settings: SourceExtractionSettingsSchema = Nested(
SourceExtractionSettingsSchema, default=None)
photometry_settings: PhotSettingsSchema = Nested(
PhotSettingsSchema, default=None)
| 31.470588
| 72
| 0.774766
| 92
| 1,070
| 8.869565
| 0.434783
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142991
| 1,070
| 33
| 73
| 32.424242
| 0.889858
| 0.047664
| 0
| 0
| 0
| 0
| 0.048467
| 0.02275
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29c7ff7b0f45d2d5b8a537d89fbcc9e55ee8907c
| 2,692
|
py
|
Python
|
Python/addRow.py
|
alexwu2021/practice
|
ff786d4d16afdef3e031002d22b58a976c8ed16b
|
[
"MIT"
] | null | null | null |
Python/addRow.py
|
alexwu2021/practice
|
ff786d4d16afdef3e031002d22b58a976c8ed16b
|
[
"MIT"
] | 1
|
2021-11-22T05:54:33.000Z
|
2021-11-22T05:54:33.000Z
|
Python/addRow.py
|
alexwu2021/practice
|
ff786d4d16afdef3e031002d22b58a976c8ed16b
|
[
"MIT"
] | null | null | null |
#import unittest
def addRow(r, d, v):
dmo = []
getHeightAndMore(r, 0, dmo, d)
if len(dmo) <= 0:
print ('no way to add row for no d-1 nodes found')
return
print('dmo has %d' % len(dmo))
print('dmo: %s' % ','.join([str(x.val) for x in dmo]))
for n in dmo:
left, right = Node(v), Node(v)
left.left = n.left
n.left = left
right.right = n.right
n.right = right
def getHeightAndMore(r, h, dmo, d):
h += 1
if d == h:
dmo.append(r)
if r.left != None:
getHeightAndMore(r.left, h, dmo, d)
if r.right != None:
getHeightAndMore(r.right, h, dmo, d)
class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
def __expr__(self):
msg = 'Node({self.val})'.format(self=self)
return msg
def insertIntoBinaryTreeWithAGivenIntArray(root, intArray):
n = len(intArray)
if n <= 0: return
root = Node(intArray[0])
if n == 1: return
nodeArray = [root]
i = 1
while i < n:
temp = Node(intArray[i])
if i % 2 == 0:
parentIndex = (i - 2) // 2
nodeArray[parentIndex].right = temp
else:
parentIndex = (i - 1) // 2
nodeArray[parentIndex].left = temp
nodeArray.append(temp)
i += 1
for n in nodeArray:
print('content: %s' % (n))
def binary_insert(root, node):
if root is None:
root = node
else:
if root.val > node.val:
if root.left is None:
root.left = node
else:
binary_insert(root.left, node)
else:
if root.right is None:
root.right = node
else:
binary_insert(root.right, node)
def in_order_print(root):
if not root:
return
in_order_print(root.left)
print (root.val)
in_order_print(root.right)
def pre_order_print(root):
if not root:
return
print ('%s left:%s right:%s' % (str(root.val), str(root.left.val if root.left != None else ''), str(root.right.val if root.right != None else '')))
pre_order_print(root.left)
pre_order_print(root.right)
#case 1
t = Node(4)
#binary_insert(t, Node(2))
#binary_insert(t, Node(7))
#binary_insert(t, Node(3))
#binary_insert(t, Node(6))
#binary_insert(t, Node(2))
#binary_insert(t, Node(5))
#insertIntoBinaryTreeWithAGivenIntArray(t, [4, 2, 6, 3, 1, 5])
t.left = Node(2)
t.right = Node(6)
t.left.left = Node(3)
t.left.right = Node(1)
t.right.left = Node(5)
pre_order_print(t)
d = 2
v = 99
addRow(t, d, v)
pre_order_print(t)
#in_order_print(t)
#case 2
| 23.206897
| 154
| 0.556092
| 398
| 2,692
| 3.673367
| 0.170854
| 0.073871
| 0.057456
| 0.069767
| 0.120383
| 0.087551
| 0.087551
| 0.04788
| 0.04788
| 0
| 0
| 0.020321
| 0.305349
| 2,692
| 115
| 155
| 23.408696
| 0.761497
| 0.094725
| 0
| 0.139535
| 0
| 0
| 0.043299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0
| 0
| 0.151163
| 0.162791
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29cb4ed39bb073f7561e68074f27a72bbc5b7c7c
| 7,167
|
py
|
Python
|
tests/test_editor_common.py
|
jpfxgood/ped
|
f753ca27e4462c321ed28f00e1ef47fbde62990e
|
[
"MIT"
] | null | null | null |
tests/test_editor_common.py
|
jpfxgood/ped
|
f753ca27e4462c321ed28f00e1ef47fbde62990e
|
[
"MIT"
] | 21
|
2020-07-03T13:14:15.000Z
|
2020-07-14T14:27:43.000Z
|
tests/test_editor_common.py
|
jpfxgood/ped
|
f753ca27e4462c321ed28f00e1ef47fbde62990e
|
[
"MIT"
] | null | null | null |
from ped_core import editor_common
import io
import pprint
import os
import curses
import curses.ascii
import time
import re
from ped_core import keymap
from ped_core import keytab
from ped_core import clipboard
from ped_test_util import read_str, match_attr, undo_all, window_pos, play_macro, validate_mark, validate_screen, editor_test_suite
import subprocess
def test_memline():
m = editor_common.MemLine( "01234567890123456789" )
assert( m.length() == 20 )
assert( m.getContent() == "01234567890123456789" )
def test_EditFile(testdir):
lines_to_test = ["This is the first line","This is the second line","This is the third line","This is the last line"]
testfile = testdir.makefile(".txt",lines_to_test[0],lines_to_test[1],lines_to_test[2],lines_to_test[3])
fn = str(testfile)
ef = editor_common.EditFile( fn )
assert(ef.get_tabs() == [ 4, 8 ] )
ef.set_tabs( [ 8, 16] )
assert(ef.get_tabs() == [ 8, 16 ] )
w = ef.getWorking()
assert( not w.closed )
assert( ef.getModref() == 0)
assert( isinstance(ef.getUndoMgr(), editor_common.undo.UndoManager ))
assert( not ef.isChanged() )
assert( not ef.isReadOnly() )
ef.setReadOnly( True )
assert( ef.isReadOnly() )
ef.setReadOnly( False )
assert( not ef.isReadOnly() )
assert( ef.getFilename() == fn )
ef.setFilename( "bogus.txt" )
assert( ef.getFilename() == "bogus.txt" )
ef.setFilename( fn )
assert( ef.getFilename() == fn )
fls = ef.getLines()
assert( ef.numLines() == 4 )
ef.close()
assert( ef.getWorking() == None )
ef.load()
w = ef.getWorking()
assert( not w.closed )
for line in range(0,len(lines_to_test)):
assert(ef.length(line) == len(lines_to_test[line]))
fl = ef.getLine(line)
assert(fl.rstrip() == lines_to_test[line])
assert(fls[line].rstrip() == lines_to_test[line])
fls = ef.getLines(1,3)
assert(len(fls) == 2 )
assert(fls[0].rstrip() == lines_to_test[1] and fls[1].rstrip() == lines_to_test[2])
ef.deleteLine(1)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[2] and fls[2].rstrip() == lines_to_test[3] )
assert(ef.numLines() == 3 )
assert(ef.getModref() == 1 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 2)
assert(not ef.isChanged() )
new_test_line = "This is the line for insert"
ef.insertLine(2,new_test_line)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == new_test_line and fls[3].rstrip() == lines_to_test[2] and fls[4].rstrip() == lines_to_test[3] )
assert(ef.numLines() == 5 )
assert(ef.getModref() == 3 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 4)
assert(not ef.isChanged() )
ef.replaceLine(3,new_test_line)
fls = ef.getLines()
assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == lines_to_test[2] and fls[3].rstrip() == new_test_line )
assert(ef.numLines() == 4 )
assert(ef.getModref() == 5 )
assert(ef.isChanged() )
um = ef.getUndoMgr()
um.undo_transaction()
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(ef.numLines() == 4)
assert(ef.getModref() == 6)
assert(not ef.isChanged() )
fd = str(testdir.tmpdir)
backup_filepath = ef.make_backup_dir( fn, fd )
assert(os.path.exists(os.path.dirname(backup_filepath)))
ef.insertLine(10,new_test_line)
ef.backuproot = fd
ef.save()
assert(os.path.exists(backup_filepath))
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(fls[10].rstrip() == new_test_line)
newname = os.path.join(fd,"1_"+os.path.basename(fn))
ef.save(newname)
assert(os.path.exists(newname))
ef.close()
ef.load()
assert(ef.getFilename() == newname)
fls = ef.getLines()
for line in range(0,len(lines_to_test)):
assert(fls[line].rstrip() == lines_to_test[line])
assert(fls[10].rstrip() == new_test_line)
assert(ef.get_tab_stop(4) == 8)
assert(ef.get_tab_stop(10) == 16 )
assert(ef.get_tab_stop(10,True) == 8)
tabby_string = "01234\t56789012\t3456789"
expanded_string = "01234 56789012 3456789"
assert(ef.expand_tabs(tabby_string) == expanded_string)
def test_Editor_unwrapped(testdir,capsys):
with capsys.disabled():
curses.wrapper(editor_test_suite,testdir,False,None)
def test_Editor_wrapped(testdir,capsys):
with capsys.disabled():
curses.wrapper(editor_test_suite,testdir,True,None)
def test_StreamEditor(testdir,capsys):
with capsys.disabled():
def main(stdscr,testdir):
max_y,max_x = stdscr.getmaxyx()
generator_lines = [
"for i in range(0,1000000):",
" print('Line %d of test file'%i)",
]
generator_script = testdir.makepyfile(**{ "generator": "\n".join(generator_lines)})
cmd = 'python3 %s'%str(generator_script)
se = editor_common.StreamEditor(stdscr,stdscr.subwin(max_y,max_x,0,0),"Test Stream",subprocess.Popen(cmd,
shell=True,
bufsize=1024,
encoding="utf-8",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).stdout)
starting_num_lines = se.numLines()
time.sleep(1)
for i in range(0,100):
se.main(False)
assert(se.getContent(i) == 'Line %d of test file'%i)
current_line = se.getLine()
se.main(False,6) # ctrl-f
for i in range(0,200):
se.main(False)
assert(se.follow == True and se.getLine() > current_line)
se.main(False,6) # ctrl-f
current_line = se.getLine()
for i in range(0,200):
se.main(False)
assert(se.follow == False and se.getLine() == current_line)
play_macro(se, [keytab.KEYTAB_ALTO,keytab.KEYTAB_TAB,keytab.KEYTAB_DOWN]+list("testout.out")+[keytab.KEYTAB_CR,keytab.KEYTAB_CR])
assert(se.getFilename().endswith("testout.out") and os.path.exists(se.getFilename()))
se.close()
curses.wrapper(main,testdir)
| 40.954286
| 205
| 0.599972
| 960
| 7,167
| 4.320833
| 0.194792
| 0.052314
| 0.082208
| 0.077869
| 0.425747
| 0.391273
| 0.36162
| 0.324012
| 0.293153
| 0.283028
| 0
| 0.036176
| 0.255616
| 7,167
| 174
| 206
| 41.189655
| 0.741331
| 0.001814
| 0
| 0.391566
| 0
| 0
| 0.052161
| 0.003356
| 0
| 0
| 0
| 0
| 0.373494
| 1
| 0.036145
| false
| 0
| 0.078313
| 0
| 0.114458
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29cf80f6c6965927720d1b295a0c8b626681599d
| 254
|
py
|
Python
|
Store/robot-test/say.py
|
Quanta-Robotics/Robot-Blueberry
|
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
|
[
"MIT"
] | 25
|
2021-06-08T07:09:30.000Z
|
2021-12-30T06:28:35.000Z
|
Store/robot-test/say.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 2
|
2021-05-23T12:54:51.000Z
|
2021-06-07T17:47:56.000Z
|
Store/robot-test/say.py
|
ICT-CoU/Robot-Blueberry
|
d19fd1be037df9d67de64df57a87006d74cd6c43
|
[
"MIT"
] | 14
|
2021-06-08T13:02:28.000Z
|
2021-12-30T20:07:18.000Z
|
import pyttsx3
engine = pyttsx3.init()
engine.setProperty('rate', 150)
voices = engine.getProperty('voices')
engine.setProperty("voice", 'english_rp+f4')
def talk(text):
engine.say(text)
engine.runAndWait()
talk("My name is robot leena")
| 15.875
| 44
| 0.704724
| 33
| 254
| 5.393939
| 0.69697
| 0.191011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02765
| 0.145669
| 254
| 15
| 45
| 16.933333
| 0.792627
| 0
| 0
| 0
| 0
| 0
| 0.198413
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29d3aa7dc92ae861ca049b62d573cabdb669506d
| 1,579
|
py
|
Python
|
tests/integration/test_dug_utils.py
|
helxplatform/roger
|
60c1c1198c41949804692217c74848e2aa8b9ea2
|
[
"MIT"
] | null | null | null |
tests/integration/test_dug_utils.py
|
helxplatform/roger
|
60c1c1198c41949804692217c74848e2aa8b9ea2
|
[
"MIT"
] | 7
|
2021-04-08T12:17:27.000Z
|
2022-02-08T23:12:32.000Z
|
tests/integration/test_dug_utils.py
|
helxplatform/roger
|
60c1c1198c41949804692217c74848e2aa8b9ea2
|
[
"MIT"
] | 3
|
2020-12-07T20:49:43.000Z
|
2021-06-12T19:49:43.000Z
|
import tempfile
from pathlib import Path
import pytest
from dug_helpers.dug_utils import FileFetcher, get_topmed_files, get_dbgap_files
from roger.Config import config
def test_fetch_network_file():
filename = "README.md"
with tempfile.TemporaryDirectory() as tmp_dir:
fetch1 = FileFetcher(
"https://github.com",
"/helxplatform/roger/blob/main/",
tmp_dir,
)
expected_path = Path(tmp_dir) / filename
assert not expected_path.exists()
fetch1(filename)
assert expected_path.exists()
with tempfile.TemporaryDirectory() as tmp_dir:
fetch2 = FileFetcher(
"https://github.com",
Path("/helxplatform/roger/blob/main/"),
Path(tmp_dir),
)
expected_path = Path(tmp_dir) / filename
assert not expected_path.exists()
fetch2(filename)
assert expected_path.exists()
def test_fetcher_errors():
filename = "DOES NOT EXIST.md"
with tempfile.TemporaryDirectory() as tmp_dir:
fetch = FileFetcher(
"https://github.com",
Path("/helxplatform/roger/blob/main/"),
Path(tmp_dir),
)
with pytest.raises(RuntimeError):
fetch(filename)
def test_get_topmed_files():
file_names = get_topmed_files(config=config)
for file_name in file_names:
assert Path(file_name).exists()
def test_get_dbgap_files():
file_names = get_dbgap_files(config=config)
for file_name in file_names:
assert Path(file_name).exists()
| 26.316667
| 80
| 0.640912
| 184
| 1,579
| 5.266304
| 0.277174
| 0.049536
| 0.04128
| 0.099071
| 0.573787
| 0.50774
| 0.468524
| 0.385965
| 0.385965
| 0.385965
| 0
| 0.003439
| 0.263458
| 1,579
| 60
| 81
| 26.316667
| 0.829751
| 0
| 0
| 0.444444
| 0
| 0
| 0.107595
| 0.056962
| 0
| 0
| 0
| 0
| 0.133333
| 1
| 0.088889
| false
| 0
| 0.111111
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29d584e58250f68d3fe99344f92ca1d026fcfaa6
| 6,915
|
py
|
Python
|
tests/bean_test.py
|
samuelchen/truepy
|
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
|
[
"OpenSSL"
] | 40
|
2015-08-04T11:01:33.000Z
|
2022-01-17T10:45:18.000Z
|
tests/bean_test.py
|
samuelchen/truepy
|
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
|
[
"OpenSSL"
] | 9
|
2016-09-14T04:40:58.000Z
|
2021-07-22T09:07:51.000Z
|
tests/bean_test.py
|
samuelchen/truepy
|
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
|
[
"OpenSSL"
] | 13
|
2015-02-24T05:39:10.000Z
|
2022-02-03T00:41:53.000Z
|
# coding: utf-8
# truepy
# Copyright (C) 2014-2015 Moses Palmér
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from datetime import datetime
from truepy import fromstring, tostring
from truepy._bean import snake_to_camel, camel_to_snake
from truepy._bean import value_to_xml
from truepy._bean import deserialize, serialize, to_document
from truepy._bean_serializers import _DESERIALIZER_CLASSES, bean_class
class BeanTest(unittest.TestCase):
def test_snake_to_camel(self):
"""Tests that snake_to_camel works as expected"""
self.assertEqual(
'camelCase',
snake_to_camel('camel_case'))
self.assertEqual(
'camelCase',
snake_to_camel('camel__case'))
self.assertEqual(
'camelCase',
snake_to_camel('camel_case_'))
self.assertEqual(
'CamelCase',
snake_to_camel('_camel_case'))
def test_camel_to_snake(self):
"""Tests that camel_to_snake works as expected"""
self.assertEqual('snake_case', camel_to_snake('snakeCase'))
self.assertEqual('_snake_case', camel_to_snake('SnakeCase'))
self.assertEqual('_s_n_a_k_e', camel_to_snake('SNAKE'))
def test_value_to_xml_no_class(self):
"""Tests value_to_xml for no class name"""
self.assertEqual(
'<test>value</test>',
tostring(value_to_xml('value', 'test')))
def test_value_to_xml_with_class(self):
"""Tests value_to_xml for a class name"""
self.assertEqual(
'<object class="test">'
'<tag>value</tag>'
'</object>',
tostring(value_to_xml('value', 'tag', 'test')))
def test_serialize_unknown(self):
"""Serialises an unknown value"""
class unknown(object):
pass
with self.assertRaises(ValueError):
serialize(unknown())
def test_serialize_empty_class(self):
"""Serialises an empty class"""
class empty(object):
bean_class = 'test.class'
self.assertEqual(
'<object class="test.class" />',
tostring(serialize(empty())))
def test_serialize_unknown_property(self):
"""Serialises a class with an unknown property"""
class unknown(object):
pass
class has_unknown(object):
bean_class = 'test.class'
@property
def test_a(self):
return unknown()
with self.assertRaises(ValueError):
serialize(has_unknown())
def test_serialize_string(self):
"""Serialises a string"""
self.assertEqual(
'<string>hello world</string>',
tostring(serialize('hello world')))
def test_serialize_object(self):
"""Serialises an object"""
class test(object):
bean_class = 'test.class'
@property
def test_property(self):
return True
self.assertEqual(
'<object class="test.class">'
'<void property="testProperty">'
'<boolean>true</boolean>'
'</void>'
'</object>',
tostring(serialize(test())))
def test_serialize_datetime(self):
"""Serialises datetime instances"""
self.assertEqual(
'<object class="java.util.Date">'
'<long>0</long>'
'</object>',
tostring(serialize(
datetime.strptime('1970-01-01 UTC', '%Y-%m-%d %Z'))))
self.assertEqual(
'<object class="java.util.Date">'
'<long>86400000</long>'
'</object>',
tostring(serialize(
datetime.strptime('1970-01-02 UTC', '%Y-%m-%d %Z'))))
def test_deserialize_unknown_fragment(self):
"""Deserialises an unknown fragment"""
with self.assertRaises(ValueError):
deserialize(fromstring(
'<object class="unknown">'
'<void property="a">'
'<int>42</int>'
'</void>'
'</object>'))
def test_deserialize(self):
"""Deserialises invalid fragments"""
with self.assertRaises(ValueError):
deserialize(fromstring(
'<boolean>invalid</boolean>'))
with self.assertRaises(ValueError):
deserialize(fromstring(
'<int>invalid</int>'))
def test_deserialize_known_fragment(self):
"""Deserialises known fragments"""
self.assertEqual(
True,
deserialize(fromstring(
'<boolean>true</boolean>')))
self.assertEqual(
42,
deserialize(fromstring(
'<int>42</int>')))
self.assertEqual(
'hello world',
deserialize(fromstring(
'<string>hello world</string>')))
def test_deserialize_with_constructor(self):
"""Deserialises an object using constructor"""
global _DESERIALIZER_CLASSES
class_name = 'test.class'
try:
@bean_class(class_name)
class test(object):
@property
def test_a(selfself):
return self._a
def test___init__(self, a):
self._a = a
o = deserialize(fromstring(
'<object class="test.class">'
'<void property="a">'
'<string>hello world</string>'
'</void>'
'</object>'))
self.assertEqual('hello world', o.a)
self.assertEqual(test, o.__class__)
finally:
del _DESERIALIZER_CLASSES[class_name]
def test_deserialize_datetime(self):
"""Deserialises datetime objects"""
expected = datetime.strptime('2014-01-01 UTC', '%Y-%m-%d %Z')
self.assertEqual(
expected,
deserialize(serialize(expected)))
def test_to_document(self):
"""Tests that todocument creates a valid XML document"""
expected = 'hello world'
self.assertEqual(
expected,
deserialize(
fromstring(
to_document(
serialize(expected)))
[0]))
| 32.013889
| 79
| 0.572523
| 717
| 6,915
| 5.343096
| 0.232915
| 0.082224
| 0.021926
| 0.022187
| 0.321587
| 0.249804
| 0.165492
| 0.151397
| 0.087967
| 0.074393
| 0
| 0.010553
| 0.314823
| 6,915
| 215
| 80
| 32.162791
| 0.798016
| 0.175127
| 0
| 0.409396
| 0
| 0
| 0.16316
| 0.029066
| 0
| 0
| 0
| 0.004651
| 0.174497
| 1
| 0.134228
| false
| 0.013423
| 0.04698
| 0.020134
| 0.248322
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29d6295eb61db2d065b900e834740080a6c5d3ff
| 3,679
|
py
|
Python
|
normal_version/export_es_data.py
|
Logistic98/es-data-transfer
|
6ed916201e8ab701e258e156e2c71468a3c509e5
|
[
"Apache-2.0"
] | 1
|
2022-03-23T05:22:41.000Z
|
2022-03-23T05:22:41.000Z
|
normal_version/export_es_data.py
|
Logistic98/es-data-transfer
|
6ed916201e8ab701e258e156e2c71468a3c509e5
|
[
"Apache-2.0"
] | null | null | null |
normal_version/export_es_data.py
|
Logistic98/es-data-transfer
|
6ed916201e8ab701e258e156e2c71468a3c509e5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from elasticsearch import Elasticsearch
from datetime import timedelta
import datetime
import os
import json
import logging
from configparser import ConfigParser
# 生成日志文件
logging.basicConfig(filename='logging_es.log', level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def read_config():
cfg = ConfigParser()
cfg.read('./config.ini', encoding='utf-8')
host = cfg.get('SOURCE_ES', 'host')
port = cfg.get('SOURCE_ES', 'port')
user = cfg.get('SOURCE_ES', 'user')
password = cfg.get('SOURCE_ES', 'password')
timeout = cfg.get('SOURCE_ES', 'timeout')
index_list = cfg.get('SOURCE_ES', 'index_list')
es_dict = {}
es_dict['host'] = host
es_dict['port'] = port
es_dict['user'] = user
es_dict['password'] = password
es_dict['timeout'] = timeout
es_dict['index_list'] = index_list
return es_dict
def write_list_to_json(list, json_file_name, json_file_save_path):
"""
将list写入到json文件
:param list:
:param json_file_name: 写入的json文件名字
:param json_file_save_path: json文件存储路径
:return:
"""
if not os.path.exists(json_file_save_path):
os.makedirs(json_file_save_path)
os.chdir(json_file_save_path)
with open(json_file_name, 'w', encoding='utf-8') as f:
json.dump(list, f, ensure_ascii=False)
def es_json(es_dict, start_time, end_time):
str_separate = "==============================================================="
try:
BASE_DIR = os.getcwd()
Es = Elasticsearch(
hosts=[str(es_dict['host']) + ":" + str(es_dict['port'])],
http_auth=(str(es_dict['user']), str(es_dict['password'])),
timeout=int(es_dict['timeout'])
)
except Exception as e:
logging.error(e)
index_list = ''.join(es_dict['index_list'].split()).split(",")
for i in index_list:
print(f"保存索引{i}的数据\r")
print_info1 = "保存索引" + i + "的数据"
logging.info(print_info1)
query = {
"range": {
"@timestamp": {
# 大于上一次读取结束时间,小于等于本次读取开始时间
"gt": start_time,
"lte": end_time
}
}
}
try:
data = Es.search(index=i, query=query, size=10000)
source_list = []
for hit in data['hits']['hits']:
source_data = hit['_source']
source_data['_id'] = hit['_id']
source_list.append(source_data)
print(f"保存的时间为{start_time}到{end_time}\r")
print_info2 = "保存的时间为" + start_time + "到" + end_time + ""
logging.info(print_info2)
file_path = BASE_DIR + "/json_file"
file_name = str(i) + ".json"
if len(source_list) != 0:
write_list_to_json(source_list, file_name, file_path)
else:
print('无更新')
logging.info(str(i) + '无更新')
print(str_separate)
logging.info(str_separate)
except Exception as e:
print(e)
logging.info("es数据库到json文件的读写error" % e)
logging.info(str_separate)
if __name__ == '__main__':
start_date_time = datetime.datetime.now() + timedelta(days=-1)
end_date_time = datetime.datetime.now()
start_time = start_date_time.strftime("%Y-%m-%dT%H:00:00.000Z")
end_time = end_date_time.strftime("%Y-%m-%dT%H:00:00.000Z")
# 读取配置信息
es_dict = read_config()
# 获取当前的目录地址
BASE_DIR = os.getcwd()
# 读取es数据库中的数据,写成json文件
es_json(es_dict, start_time, end_time)
| 32.27193
| 84
| 0.57271
| 455
| 3,679
| 4.362637
| 0.292308
| 0.051385
| 0.036272
| 0.042317
| 0.125945
| 0.080605
| 0.057431
| 0.057431
| 0.029219
| 0.029219
| 0
| 0.010491
| 0.274531
| 3,679
| 113
| 85
| 32.557522
| 0.733233
| 0.055178
| 0
| 0.090909
| 0
| 0
| 0.148342
| 0.04014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034091
| false
| 0.034091
| 0.079545
| 0
| 0.125
| 0.102273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29dae29f89683a7db968db7356c874c048160ba7
| 2,645
|
py
|
Python
|
cnmodel/util/expfitting.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 5
|
2017-07-26T21:46:14.000Z
|
2020-11-27T07:53:14.000Z
|
cnmodel/util/expfitting.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 12
|
2017-07-26T07:16:16.000Z
|
2021-07-14T13:41:37.000Z
|
cnmodel/util/expfitting.py
|
pbmanis/cnmodel
|
eee593c673752c19137658d5b9a381ea9ad4580f
|
[
"BSD-3-Clause"
] | 10
|
2017-07-26T07:03:29.000Z
|
2021-06-23T15:52:37.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
expfitting.py
Provide single or double exponential fits to data.
"""
import lmfit
import numpy as np
import scipy.optimize
class ExpFitting:
"""
Parameters
----------
nexp : int
1 or 2 for single or double exponential fit
initpars : dict
dict of initial parameters. For example: {'dc': 0.,
'a1': 1., 't1': 3, 'a2' : 0.5, 'delta': 3.}, where
delta determines the ratio between the time constants.
bounds : dict
dictionary of bounds for each parameter, with a list of lower and upper values.
"""
def __init__(self, nexp=1, initpars=None, bounds=None):
self.fitpars = lmfit.Parameters()
if nexp == 1:
# (Name, Value, Vary, Min, Max, Expr)
self.fitpars.add_many(('dc', 0, True, -100., 0., None),
('a1', 1., True, -25., 25., None),
('t1', 10., True, 0.1, 50, None))
self.efunc = self.exp1_err
elif nexp == 2:
self.fitpars.add_many(('dc', 0, True, -100., 0., None),
('a1', 1., True, 0., 25., None),
('t1', 10., True, 0.1, 50, None),
('a2', 1., True, 0., 25., None),
('delta', 3., True, 3., 100., None))
if initpars is not None:
assert len(initpars) == 5
for k, v in initpars.iteritems():
self.fitpars[k].value = v
if bounds is not None:
assert len(bounds) == 5
for k, v in bounds.iteritems():
self.fitpars[k].min = v[0]
self.fitpars[k].max = v[1]
self.efunc = self.exp2_err
else:
raise ValueError
def fit(self, x, y, p, verbose=False):
kws={'maxfev': 5000}
mim = lmfit.minimize(self.efunc, p, method='least_squares', args=(x, y)) #, kws=kws)
if verbose:
lmfit.printfuncs.report_fit(mim.params)
fitpars = mim.params
return fitpars
@staticmethod
def exp1(x, dc, t1, a1):
return dc + a1*np.exp(-x/t1)
def exp1_err(self, p, x, y):
return np.fabs(y-self.exp1(x, **dict([(k,p.value) for k,p in p.items()])))
@staticmethod
def exp2(x, dc, t1, a1, a2, delta):
return dc + a1 * np.exp(-x/t1) + a2 * np.exp(-x/(t1*delta))
def exp2_err(self, p, x, y):
return np.fabs(y-self.exp2(x, **dict([(k,p.value) for k,p in p.items()])))
| 33.910256
| 92
| 0.485444
| 349
| 2,645
| 3.644699
| 0.34384
| 0.051887
| 0.028302
| 0.018868
| 0.258648
| 0.207547
| 0.207547
| 0.179245
| 0.179245
| 0.144654
| 0
| 0.052033
| 0.367864
| 2,645
| 77
| 93
| 34.350649
| 0.708732
| 0.196219
| 0
| 0.086957
| 0
| 0
| 0.018474
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.130435
| false
| 0
| 0.065217
| 0.086957
| 0.326087
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29db5cdc597125eaa323b36fcd83763a78a5f8f9
| 4,338
|
py
|
Python
|
django_cloud_tasks/models.py
|
joaodaher/django-cloud-tasks
|
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
|
[
"Apache-2.0"
] | null | null | null |
django_cloud_tasks/models.py
|
joaodaher/django-cloud-tasks
|
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
|
[
"Apache-2.0"
] | 1
|
2020-07-09T17:48:19.000Z
|
2020-07-09T17:53:33.000Z
|
django_cloud_tasks/models.py
|
joaodaher/django-cloud-tasks
|
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=no-member
from datetime import datetime
from typing import Optional, Dict
from django.db import transaction, models
from django.apps import apps
from django_cloud_tasks import tasks, serializers
class Pipeline(models.Model):
name = models.CharField(max_length=100)
def start(self):
routines = self.routines.filter(
models.Q(dependent_routines__id__isnull=True) & models.Q(status=Routine.Statuses.PENDING)
)
for routine in routines:
routine.enqueue()
def revert(self):
# TODO: Actually we don't know what to do when a routine with RUNNNING status is triggered
# to revert. We trust that it will not be a big deal for now. But would be great to support that soon
routines = self.routines.filter(
models.Q(next_routines__id__isnull=True) & ~models.Q(status=Routine.Statuses.REVERTED)
)
for routine in routines:
routine.revert()
def add_routine(self, routine: Dict) -> "Routine":
return self.routines.create(**routine)
class Routine(models.Model):
class Statuses(models.TextChoices):
PENDING = ("pending", "Pending")
SCHEDULED = ("scheduled", "Scheduled")
RUNNING = ("running", "Running")
COMPLETED = ("completed", "Completed")
FAILED = ("failed", "Failed")
REVERTING = ("reverting", "Reverting")
REVERTED = ("reverted", "Reverted")
# TODO: We have a signal to check if task_name defined does exists.
# We can do it with Django Field Validators
task_name = models.CharField(max_length=100)
pipeline = models.ForeignKey(
to="django_cloud_tasks.Pipeline",
related_name="routines",
on_delete=models.PROTECT,
)
body = models.JSONField(
default=dict,
encoder=serializers.JSONEncoder,
)
attempt_count = models.PositiveIntegerField(default=0)
max_retries = models.PositiveIntegerField(null=True)
output = models.JSONField(
null=True,
blank=True,
encoder=serializers.JSONEncoder,
)
starts_at = models.DateTimeField(null=True, blank=True)
ends_at = models.DateTimeField(null=True, blank=True)
status = models.CharField(
max_length=20,
choices=Statuses.choices,
default=Statuses.PENDING,
)
created_at = models.DateTimeField(
auto_now_add=True,
)
updated_at = models.DateTimeField(
auto_now=True,
)
next_routines = models.ManyToManyField(
to="Routine",
through="RoutineVertex",
through_fields=("routine", "next_routine"),
related_name="dependent_routines",
)
def fail(self, output: Dict):
self.output = output
self.status = self.Statuses.FAILED
self.ends_at = datetime.now()
self.save()
def complete(self, output: Dict):
self.output = output
self.status = self.Statuses.COMPLETED
self.ends_at = datetime.now()
self.save()
def enqueue(self):
with transaction.atomic():
self.status = self.Statuses.SCHEDULED
self.starts_at = datetime.now()
self.save()
def revert(self):
with transaction.atomic():
if self.status not in [self.Statuses.REVERTED, self.Statuses.REVERTING]:
self.status = self.Statuses.REVERTING
self.save()
def add_next(self, routine: Dict) -> "Routine":
routine["pipeline_id"] = self.pipeline_id
return self.next_routines.create(**routine)
@property
def task(self) -> Optional[tasks.Task]:
app = apps.get_app_config("django_cloud_tasks")
return app.get_task(name=self.task_name)
class RoutineVertex(models.Model):
next_routine = models.ForeignKey(
to="django_cloud_tasks.Routine",
on_delete=models.PROTECT,
related_name="required_routine_vertices",
)
routine = models.ForeignKey(
to="django_cloud_tasks.Routine",
related_name="next_routine_vertices",
on_delete=models.PROTECT,
)
class Meta:
constraints = [
models.UniqueConstraint(name="unique_routine_next_routine", fields=("next_routine", "routine")),
]
__all__ = (
"Routine",
"RoutineVertex",
"Pipeline",
)
| 31.434783
| 109
| 0.642692
| 487
| 4,338
| 5.581109
| 0.289528
| 0.02649
| 0.029433
| 0.032377
| 0.269316
| 0.228845
| 0.160412
| 0.13245
| 0.073584
| 0.038263
| 0
| 0.002774
| 0.251959
| 4,338
| 137
| 110
| 31.664234
| 0.834823
| 0.074228
| 0
| 0.20354
| 0
| 0
| 0.10399
| 0.037905
| 0
| 0
| 0
| 0.007299
| 0
| 1
| 0.079646
| false
| 0
| 0.044248
| 0.00885
| 0.327434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29db5eb5db45a035903363004257142de128c253
| 2,913
|
py
|
Python
|
src/ifood/model/order/event.py
|
micael95/sdk-ifood-python
|
27462d8127b62a29b5c89624e79accbea9563a80
|
[
"MIT"
] | 2
|
2021-05-06T18:50:43.000Z
|
2021-06-05T21:54:04.000Z
|
src/ifood/model/order/event.py
|
micael95/sdk-ifood-python
|
27462d8127b62a29b5c89624e79accbea9563a80
|
[
"MIT"
] | null | null | null |
src/ifood/model/order/event.py
|
micael95/sdk-ifood-python
|
27462d8127b62a29b5c89624e79accbea9563a80
|
[
"MIT"
] | 1
|
2021-05-06T18:50:54.000Z
|
2021-05-06T18:50:54.000Z
|
from datetime import datetime
from uuid import UUID
from ...serializer import IfoodSerializable
from ...utils import auto_str
from uuid import UUID
@auto_str
class Consumer(IfoodSerializable):
financial_occurrence: str
payment_type: str
@staticmethod
def unserialize(dict=None):
if dict is None:
dict = {}
instance = Consumer()
for k, v in dict.items():
setattr(instance, IfoodSerializable.camel_to_snake(k), v)
return instance
@auto_str
class CancellationOccurrence(IfoodSerializable):
restaurant: Consumer
consumer: Consumer
logistic: Consumer
@staticmethod
def unserialize(dict=None):
if dict is None:
dict = {}
instance = CancellationOccurrence()
for k, v in dict.items():
if k == "RESTAURANT":
instance.restaurant = Consumer.unserialize(v)
continue
if k == "CONSUMER":
instance.consumer = Consumer.unserialize(v)
continue
if k == "LOGISTIC":
instance.logistic = Consumer.unserialize(v)
continue
setattr(instance, IfoodSerializable.camel_to_snake(k), v)
return instance
@auto_str
class Metadata(IfoodSerializable):
cancel_stage: str
cancel_code: int
cancellation_occurrence: CancellationOccurrence
timeout_event: bool
cancel_origin: str
cancel_user: str
cancel_reason: str
cancellation_requested_event_id: UUID
def __init__(self) -> None:
pass
@staticmethod
def unserialize(dict=None):
if dict is None:
dict = {}
instance = Metadata()
for k, v in dict.items():
if k == "CANCELLATION_OCCURRENCE":
instance.cancellation_occurrence = CancellationOccurrence.unserialize(v)
continue
setattr(instance, IfoodSerializable.camel_to_snake(k), v)
return instance
@auto_str
class OrderEvent(IfoodSerializable):
created_at: datetime
full_code: str
metadata: Metadata
code: str
order_id: UUID
id: UUID
def __init__(self, created_at: datetime = None, full_code: str = None, metadata: Metadata = None, code: str = None,
order_id: UUID = None, id: UUID = None) -> None:
self.created_at = created_at
self.full_code = full_code
self.metadata = metadata
self.code = code
self.order_id = order_id
self.id = id
@staticmethod
def unserialize(dict=None):
if dict is None:
dict = {}
instance = OrderEvent()
for k, v in dict.items():
if k == "metadata":
instance.metadata = Metadata.unserialize(v)
continue
setattr(instance, IfoodSerializable.camel_to_snake(k), v)
return instance
| 26.243243
| 119
| 0.610711
| 307
| 2,913
| 5.641694
| 0.192182
| 0.009238
| 0.057737
| 0.069284
| 0.42552
| 0.405889
| 0.360855
| 0.360855
| 0.327945
| 0.327945
| 0
| 0
| 0.311363
| 2,913
| 110
| 120
| 26.481818
| 0.86341
| 0
| 0
| 0.438202
| 0
| 0
| 0.019567
| 0.007896
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067416
| false
| 0.011236
| 0.05618
| 0
| 0.426966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29dc92373ea8f436e4e33eb083ad67d7e28abdae
| 2,599
|
py
|
Python
|
scripts/pm/set_sla_kpis.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
scripts/pm/set_sla_kpis.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
scripts/pm/set_sla_kpis.py
|
supsi-dacd-isaac/parity-sidechain-interface
|
b64a5fb724955332afb4998344081d1b93ac216a
|
[
"MIT"
] | null | null | null |
# Importing section
import json
import requests
import argparse
import logging
import time
import datetime
from classes.time_utils import TimeUtils
import utilities as u
# Main
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', help='config file')
arg_parser.add_argument('-l', help='log file')
args = arg_parser.parse_args()
cfg = json.loads(open(args.c).read())
# Get configuration about connections to InfluxDB and remote service related to data retrieving
tmp_config = json.loads(open(cfg['connectionsFile']).read())
cfg.update(tmp_config)
# set logging object
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(threadName)s::%(levelname)s::%(funcName)s::%(message)s',
level=logging.INFO, filename=log_file)
url_prefix = cfg['sidechainRestApi']
logger.info('Starting program')
# Get the aggregator
res = requests.get('%s/aggregator' % cfg['sidechainRestApi'])
aggregator_id = json.loads(res.text)['Aggregator']['idx']
# Cycle over the configured SLAs
for sla in cfg['slas']:
dt_start, dt_end, _ = TimeUtils.get_start_end(sla['duration'], cfg['utils']['timeZone'])
dt_start = dt_start - datetime.timedelta(minutes=cfg['shiftBackMinutes']['kpiSetting'])
dt_end = dt_end - datetime.timedelta(minutes=cfg['shiftBackMinutes']['kpiSetting'])
sla_idx = '%s_%i-%i' % (sla['idPrefix'], int(dt_start.timestamp()), int(dt_end.timestamp()))
params = {
'idx': sla_idx,
'start': int(dt_start.timestamp()),
'end': int(dt_end.timestamp()),
}
u.send_post('%s/createSla' % url_prefix, params, logger)
time.sleep(cfg['utils']['sleepBetweenTransactions'])
# Cycle over the configured KPIs
for kpi in sla['kpis']:
params = {
'idx': '%s_%i-%i' % (kpi['idPrefix'], int(dt_start.timestamp()), int(dt_end.timestamp())),
'idxSla': sla_idx,
'rule': kpi['rule'],
'limit': kpi['limit'],
'measureUnit': kpi['mu'],
'penalty': kpi['penalty'],
'players': kpi['players'],
}
u.send_post('%s/createKpiFeatures' % url_prefix, params, logger)
time.sleep(cfg['utils']['sleepBetweenTransactions'])
logger.info('Ending program')
| 33.320513
| 106
| 0.614082
| 302
| 2,599
| 5.129139
| 0.39404
| 0.027114
| 0.019367
| 0.036798
| 0.205294
| 0.205294
| 0.136862
| 0.136862
| 0.136862
| 0
| 0
| 0.001009
| 0.237014
| 2,599
| 77
| 107
| 33.753247
| 0.780131
| 0.083109
| 0
| 0.111111
| 0
| 0
| 0.205474
| 0.050526
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29dd6adf13db2f5c89c5474bb138c114f67d7138
| 4,506
|
py
|
Python
|
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
|
albertz/music-player
|
d23586f5bf657cbaea8147223be7814d117ae73d
|
[
"BSD-2-Clause"
] | 132
|
2015-01-01T10:02:42.000Z
|
2022-03-09T12:51:01.000Z
|
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 6
|
2015-01-06T08:23:19.000Z
|
2019-03-14T12:22:06.000Z
|
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
|
mba811/music-player
|
7998986b34cfda2244ef622adefb839331b81a81
|
[
"BSD-2-Clause"
] | 27
|
2015-02-23T11:51:43.000Z
|
2022-03-07T02:34:18.000Z
|
from Cocoa import *
from Quartz import *
from SampleCIView import SampleCIView
from math import sin
import objc
NUM_POINTS=4
class CIBevelView (SampleCIView):
currentPoint = objc.ivar(type=objc._C_INT)
points = objc.ivar()
angleTime = objc.ivar(type=objc._C_FLT)
lineImage = objc.ivar()
twirlFilter = objc.ivar()
heightFieldFilter = objc.ivar()
shadedFilter = objc.ivar()
def initWithFrame_(self, frameRect):
self = super(CIBevelView, self).initWithFrame_(frameRect)
if self is None:
return None
self.points = [ None ] * NUM_POINTS
self.points[0] = CGPointMake(0.5 * frameRect.size.width, frameRect.size.height - 100.0)
self.points[1] = CGPointMake(150.0, 100.0)
self.points[2] = CGPointMake(frameRect.size.width - 150.0, 100.0)
self.points[3] = CGPointMake(0.7*self.points[0].x + 0.3*self.points[2].x, 0.7*self.points[0].y + 0.3*self.points[2].y)
url = NSURL.fileURLWithPath_(
NSBundle.mainBundle().pathForResource_ofType_("lightball", "tiff"))
self.lightball = CIImage.imageWithContentsOfURL_(url)
self.heightFieldFilter = CIFilter.filterWithName_("CIHeightFieldFromMask")
self.heightFieldFilter.setDefaults()
self.heightFieldFilter.setValue_forKey_(15.0, "inputRadius")
self.twirlFilter = CIFilter.filterWithName_("CITwirlDistortion")
self.twirlFilter.setDefaults()
self.twirlFilter.setValue_forKey_(
CIVector.vectorWithX_Y_(
0.5*frameRect.size.width,
0.5*frameRect.size.height),
"inputCenter")
self.twirlFilter.setValue_forKey_(300.0, "inputRadius")
self.twirlFilter.setValue_forKey_(0.0, "inputAngle")
self.shadedFilter = CIFilter.filterWithName_("CIShadedMaterial")
self.shadedFilter.setDefaults()
self.shadedFilter.setValue_forKey_(self.lightball, "inputShadingImage")
self.shadedFilter.setValue_forKey_(20.0, "inputScale")
# 1/30 second should give us decent animation
NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(
1.0/30.0, self, 'changeTwirlAngle:', None, True)
return self
def changeTwirlAngle_(self, timer):
self.angleTime += timer.timeInterval()
self.twirlFilter.setValue_forKey_(
-0.2 * sin(self.angleTime*5.0), 'inputAngle')
self.updateImage()
def mouseDragged_(self, event):
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
self.points[self.currentPoint].x = loc.x
self.points[self.currentPoint].y = loc.y
self.lineImage = None
# normally we'd want this, but the timer will cause us to
# redisplay anyway
#self.setNeedsDisplay_(True)
def mouseDown_(self, event):
d = 1e4
loc = self.convertPoint_fromView_(event.locationInWindow(), None)
for i in range(NUM_POINTS):
x = self.points[i].x - loc.x
y = self.points[i].y - loc.y
t = x*x + y*y
if t < d:
self.currentPoint = i
d = t
self.mouseDragged_(event)
def updateImage(self):
context = NSGraphicsContext.currentContext().CIContext()
if self.lineImage is None:
bounds = self.bounds()
layer = context.createCGLayerWithSize_info_(
CGSizeMake(NSWidth(bounds), NSHeight(bounds)), None)
cg = CGLayerGetContext(layer)
CGContextSetRGBStrokeColor(cg, 1,1,1,1)
CGContextSetLineCap(cg, kCGLineCapRound)
CGContextSetLineWidth(cg, 60.0)
CGContextMoveToPoint(cg, self.points[0].x, self.points[0].y)
for i in range(1, NUM_POINTS):
CGContextAddLineToPoint(cg, self.points[i].x, self.points[i].y)
CGContextStrokePath(cg)
self.lineImage = CIImage.alloc().initWithCGLayer_(layer)
self.heightFieldFilter.setValue_forKey_(self.lineImage, "inputImage")
self.twirlFilter.setValue_forKey_(
self.heightFieldFilter.valueForKey_("outputImage"),
"inputImage")
self.shadedFilter.setValue_forKey_(
self.twirlFilter.valueForKey_("outputImage"),
"inputImage")
self.setImage_(self.shadedFilter.valueForKey_("outputImage"))
| 36.634146
| 126
| 0.630715
| 471
| 4,506
| 5.906582
| 0.312102
| 0.061107
| 0.01977
| 0.052121
| 0.141625
| 0.050324
| 0.037383
| 0
| 0
| 0
| 0
| 0.023767
| 0.262317
| 4,506
| 122
| 127
| 36.934426
| 0.813177
| 0.031735
| 0
| 0.078652
| 0
| 0
| 0.052088
| 0.004819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.05618
| 0
| 0.224719
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29e24314b4b43a27db5d5e7fb35c4c927a75f669
| 4,226
|
py
|
Python
|
oops_fhir/r4/code_system/request_intent.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/request_intent.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/code_system/request_intent.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["RequestIntent"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class RequestIntent:
"""
RequestIntent
Codes indicating the degree of authority/intentionality associated with
a request.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/request-intent
"""
proposal = CodeSystemConcept(
{
"code": "proposal",
"definition": "The request is a suggestion made by someone/something that does not have an intention to ensure it occurs and without providing an authorization to act.",
"display": "Proposal",
}
)
"""
Proposal
The request is a suggestion made by someone/something that does not have an intention to ensure it occurs and without providing an authorization to act.
"""
plan = CodeSystemConcept(
{
"code": "plan",
"definition": "The request represents an intention to ensure something occurs without providing an authorization for others to act.",
"display": "Plan",
}
)
"""
Plan
The request represents an intention to ensure something occurs without providing an authorization for others to act.
"""
directive = CodeSystemConcept(
{
"code": "directive",
"definition": "The request represents a legally binding instruction authored by a Patient or RelatedPerson.",
"display": "Directive",
}
)
"""
Directive
The request represents a legally binding instruction authored by a Patient or RelatedPerson.
"""
order = CodeSystemConcept(
{
"code": "order",
"concept": [
{
"code": "original-order",
"definition": "The request represents an original authorization for action.",
"display": "Original Order",
},
{
"code": "reflex-order",
"definition": "The request represents an automatically generated supplemental authorization for action based on a parent authorization together with initial results of the action taken against that parent authorization.",
"display": "Reflex Order",
},
{
"code": "filler-order",
"concept": [
{
"code": "instance-order",
"definition": "An order created in fulfillment of a broader order that represents the authorization for a single activity occurrence. E.g. The administration of a single dose of a drug.",
"display": "Instance Order",
}
],
"definition": "The request represents the view of an authorization instantiated by a fulfilling system representing the details of the fulfiller's intention to act upon a submitted order.",
"display": "Filler Order",
},
],
"definition": "The request represents a request/demand and authorization for action by a Practitioner.",
"display": "Order",
}
)
"""
Order
The request represents a request/demand and authorization for action by a Practitioner.
"""
option = CodeSystemConcept(
{
"code": "option",
"definition": "The request represents a component or option for a RequestGroup that establishes timing, conditionality and/or other constraints among a set of requests. Refer to [[[RequestGroup]]] for additional information on how this status is used.",
"display": "Option",
}
)
"""
Option
The request represents a component or option for a RequestGroup that establishes timing, conditionality and/or other constraints among a set of requests. Refer to [[[RequestGroup]]] for additional information on how this status is used.
"""
class Meta:
resource = _resource
| 35.813559
| 266
| 0.600331
| 428
| 4,226
| 5.897196
| 0.32243
| 0.051506
| 0.087163
| 0.083201
| 0.513471
| 0.481775
| 0.452456
| 0.452456
| 0.452456
| 0.452456
| 0
| 0.001397
| 0.322291
| 4,226
| 117
| 267
| 36.119658
| 0.879888
| 0.046616
| 0
| 0.059701
| 0
| 0.074627
| 0.527142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044776
| 0
| 0.149254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29e35c162bb13bbac4bbfa70c3c033b9eb162d1c
| 266
|
py
|
Python
|
ABC/202/b.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/202/b.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/202/b.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
def main():
# input
S = list(input())
# compute
for i,s in enumerate(S):
if s == '6':
S[i] = '9'
elif s == '9':
S[i] = '6'
# output
print(''.join(reversed(S)))
if __name__ == '__main__':
main()
| 14.777778
| 31
| 0.406015
| 34
| 266
| 2.941176
| 0.558824
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025157
| 0.402256
| 266
| 17
| 32
| 15.647059
| 0.603774
| 0.075188
| 0
| 0
| 0
| 0
| 0.049587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29e60f021e4805e18f02c579cb9365d85a32c49b
| 371
|
py
|
Python
|
test_game.py
|
thom1555/euchre
|
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
|
[
"Apache-2.0"
] | null | null | null |
test_game.py
|
thom1555/euchre
|
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
|
[
"Apache-2.0"
] | null | null | null |
test_game.py
|
thom1555/euchre
|
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from game import Game
from suit import Suit
class TestGame(unittest.TestCase):
def test_setup(self):
g = Game('tim', 'rick', 'bob', 'james', 'ballers', 'scrubs')
self.assertEqual(len(g.players), 4)
self.assertEqual(g.dealer, 0)
self.assertEqual(g.trump, Suit.spades)
if __name__ == '__main__':
unittest.main()
| 21.823529
| 68
| 0.6469
| 48
| 371
| 4.8125
| 0.625
| 0.194805
| 0.138528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006826
| 0.210243
| 371
| 16
| 69
| 23.1875
| 0.78157
| 0
| 0
| 0
| 0
| 0
| 0.097035
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ea1aef1c82bd772907c42e68df319791525947
| 6,824
|
py
|
Python
|
Render2018/lib/create_config_bodyflow.py
|
BigOto2/BlenderRenderDNS
|
a8ff239ecffef5217f0db35d579227a0a444c32d
|
[
"MIT"
] | 1
|
2021-07-28T00:42:39.000Z
|
2021-07-28T00:42:39.000Z
|
Render2018/lib/create_config_bodyflow.py
|
BigOto2/BlenderRenderDNS
|
a8ff239ecffef5217f0db35d579227a0a444c32d
|
[
"MIT"
] | null | null | null |
Render2018/lib/create_config_bodyflow.py
|
BigOto2/BlenderRenderDNS
|
a8ff239ecffef5217f0db35d579227a0a444c32d
|
[
"MIT"
] | 1
|
2019-05-13T17:38:05.000Z
|
2019-05-13T17:38:05.000Z
|
import os.path
import configparser
from dircheck import get_yesno_input
import create_jobscripts
from create_dirname_config import config_dirname_cfg
from create_all_dirs import create_all
import socket
import cgns_load_data
# Script that creates the two configuration files (case and render files) necessary to run the scripts, with a data file from Abhiram's body flow simulation as input.
# Check whether scripts being run on Mox
if socket.gethostname()[0:3] == "mox":
mox = True
blender_dir = "/gscratch/ferrante/blender/blender-2.78c-linux-glibc219-x86_64/./"
else:
mox = False
blender_dir = ""
# Check if dirname.cfg, which contains directory paths used throughout the scripts, exists - otherwise, create it
if not os.path.exists("dirname.cfg"):
config_dirname_cfg()
# Load important directories
dirname_config = configparser.ConfigParser()
dirname_config.read("dirname.cfg")
# Get case name. This corresponds to a specific .h5dns file and is specified by the user. A case config file will be created with its name.
case_name = input("Enter case name. This can be any string that refers to a particular VIZ.cgns file. ")
create_all(case_name)
case_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + case_name + "-case.cfg"
# If existing case config file exists, the user is specifying a particular .h5dns file that is already associated with
# this case name, so move on to render settings config. Otherwise, create case config file from user input.
if os.path.exists(case_config_path):
print("Found existing case configuration: " + case_config_path)
existing_case_config = configparser.ConfigParser()
existing_case_config.read(case_config_path)
print("data file: " + existing_case_config["STRING"]["h5dns_path"])
else:
# Create new case config file
new_case_config = configparser.ConfigParser()
# There are different sections for each datatype (this is how the scripts know what data types to load, when they are all saved as strings)
new_case_config["STRING"] = {}
new_case_config["FLOAT"] = {}
new_case_config["INT"] = {}
# Save important strings
new_case_config["STRING"]["case_name"] = case_name
new_case_config["STRING"]["data_file_type"] = "bodyflow"
h5dns_path = input("Enter absolute path to data file: ")
new_case_config["STRING"]["h5dns_path"] = h5dns_path
# Load data file and save important params
params = cgns_load_data.get_important_data(h5dns_path)
new_case_config["INT"]["tres"] = str(params["tres"])
new_case_config["INT"]["ires"] = str(params["ires"])
new_case_config["INT"]["jres"] = str(params["jres"])
new_case_config["INT"]["kres"] = str(params["kres"])
# Write case config file
with open(case_config_path, "w") as case_config_file:
new_case_config.write(case_config_file)
# Get render-specific config settings from user. This specifies what type of render to perform (photorealistic, surface
# temperature, ...), and other render settings (scale of droplet to render, etc.)
render_type = int(input("Select type of render to perform (enter number).\n 1 Streamline render\n 2 Vortex line render\n"))
render_name = input("Enter render profile name. This can be any string that refers to specific rendering settings for a data case. ")
# Initialize categories based on data types
new_render_config = configparser.ConfigParser()
new_render_config["STRING"] = {}
new_render_config["INT"] = {}
new_render_config["FLOAT"] = {}
new_render_config["BOOL"] = {}
new_render_config["STRING"]["render_name"] = render_name
# Determine settings from user that are specific to each type.
if (render_type == 1): # Streamline
# Name render config file based on the type of render being performed
render_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + render_name + "-render-streamline.cfg"
# Get some other settings
elif (render_type == 2): # Vortex line
render_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + render_name + "-render-vortexline.cfg"
# General inputs
new_render_config["INT"]["num_streamlines"] = input("Specify number of streamlines: ")
new_render_config["INT"]["streamline_seed"] = "777" #input("Specify random seed number to determine streamline start positions from: ")
new_render_config["FLOAT"]["view_fraction"] = input("Specify desired render frame width as multiple of domain length: ")
new_render_config["FLOAT"]["camera_azimuth_angle"] = input("Specify camera azimuth angle from the x-axis (deg): ")
new_render_config["FLOAT"]["camera_elevation_angle"] = input("Specify camera elevation angle from the horizontal (deg): ")
bg_image_enabled = get_yesno_input("Use custom background image? ")
if bg_image_enabled:
new_render_config["STRING"]["bg_image_filepath"] = dirname_config["DIRECTORIES"]["background_images"] + input("Specify background image name (in \"Render2018/BackgroundImages\"): ")
new_render_config["STRING"]["bg_color_1"] = ""
new_render_config["STRING"]["bg_color_2"] = ""
else:
new_render_config["STRING"]["bg_image_filepath"] = ""
new_render_config["STRING"]["bg_color_1"] = input("Specify R,G,B value of lower background color (separate floats by commas, values range from 0 to 1): ")
new_render_config["STRING"]["bg_color_2"] = input("Specify R,G,B value of upper background color (separate floats by commas, values range from 0 to 1): ")
new_render_config["FLOAT"]["resolution_percentage"] = input("Specify resolution percentage out of 100, as a percentage of 4K: ")
# Write render config file
with open(render_config_path, "w") as render_config_file:
new_render_config.write(render_config_file)
# Create slurm jobscript to run on Mox
slurm_name = case_name + "_" + render_name + ".slurm"
create_jobscripts.create_mox_slurm(slurm_dir=dirname_config["DIRECTORIES"]["RenderJobscripts"], slurm_name=slurm_name, job_name=case_name+"_"+render_name, lib_dir=os.getcwd(), python_file_to_run="render_init.py", case_config_path=case_config_path, render_config_path=render_config_path)
local_py_name = case_name + "_" + render_name + ".py"
create_jobscripts.create_local_py(python_dir=dirname_config["DIRECTORIES"]["RenderJobscripts"], python_filename=local_py_name, lib_dir=dirname_config["DIRECTORIES"]["lib"], python_file_to_run="render_init.py", case_config_path=case_config_path, render_config_path=render_config_path)
# Run jobscript if user desires
if mox:
if get_yesno_input("Run " + slurm_name + " to launch this rendering job?"):
os.system("sbatch -p ferrante -A ferrante " + dirname_config["DIRECTORIES"]["RenderJobscripts"] + "/" + slurm_name)
else:
if get_yesno_input("Run " + local_py_name + " to launch this rendering job?"):
os.system("python3 " + dirname_config["DIRECTORIES"]["RenderJobscripts"] + local_py_name)
| 56.396694
| 286
| 0.75381
| 980
| 6,824
| 5.010204
| 0.242857
| 0.063136
| 0.058045
| 0.034216
| 0.275153
| 0.205092
| 0.164766
| 0.129532
| 0.105906
| 0.092057
| 0
| 0.00728
| 0.134379
| 6,824
| 120
| 287
| 56.866667
| 0.823938
| 0.228751
| 0
| 0.049383
| 0
| 0.049383
| 0.350391
| 0.029024
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29eb3307185eaf4daecd050d3551f86ee4f012bf
| 1,004
|
py
|
Python
|
util/replicate.py
|
ZvonimirSun/janusgraph-utils
|
c10e7b3ccb7c56c7662053d9d8b1d0bcb0a20bb8
|
[
"Apache-2.0"
] | 204
|
2017-08-10T02:36:53.000Z
|
2022-03-11T12:21:18.000Z
|
util/replicate.py
|
HsbcJone/Jaunsgraph-LoadBulkData-Utils-
|
9c4e3b0c0b9f9966ab43422929ae5ea4993b3bb8
|
[
"Apache-2.0"
] | 37
|
2017-08-16T01:06:02.000Z
|
2020-08-05T02:30:18.000Z
|
util/replicate.py
|
HsbcJone/Jaunsgraph-LoadBulkData-Utils-
|
9c4e3b0c0b9f9966ab43422929ae5ea4993b3bb8
|
[
"Apache-2.0"
] | 103
|
2017-08-29T14:17:32.000Z
|
2022-03-07T14:30:48.000Z
|
#!/usr/bin/python
import sys
import simplejson as json
def replicate_vertex(conf,pos, i):
p = conf["VertexTypes"][pos]["columns"]["T{}-P1".format(pos+1)]
for x in range(2, i+1):
new_key = "T{}-P{}".format(pos+1, str(x))
conf["VertexTypes"][pos]["columns"][new_key] = p
return conf
def replicate_edge(conf, pos, i):
p = conf["EdgeTypes"][pos]["columns"]["E{}-P1".format(pos+1)]
for x in range(2, i+1):
new_key ='E{}-P{}'.format(pos+1,str(x))
conf["EdgeTypes"][pos]["columns"][new_key] = p
return conf
def main():
f = open(sys.argv[1], "r")
j = json.load(f)
json.dump(replicate_vertex(j,0, int(sys.argv[3])), open(sys.argv[2], "w"))
json.dump(replicate_vertex(j,1, 2*int(sys.argv[3])), open(sys.argv[2], "w"))
json.dump(replicate_vertex(j,2, 2*int(sys.argv[3])), open(sys.argv[2], "w"))
json.dump(replicate_edge(j, 0, int(sys.argv[3])), open(sys.argv[2], "w"))
json.dump(replicate_edge(j, 1, int(sys.argv[3])), open(sys.argv[2], "w"))
if __name__ == "__main__":
main()
| 32.387097
| 77
| 0.62749
| 184
| 1,004
| 3.320652
| 0.255435
| 0.126023
| 0.10802
| 0.090016
| 0.700491
| 0.620295
| 0.620295
| 0.558101
| 0.459902
| 0.420622
| 0
| 0.031638
| 0.118526
| 1,004
| 30
| 78
| 33.466667
| 0.658757
| 0.015936
| 0
| 0.166667
| 0
| 0
| 0.109422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.083333
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29eb4a3f8932c013c8f2635314e11c22d12e4148
| 1,602
|
py
|
Python
|
commands/fight.py
|
AlexMog/IRCPokemonBot
|
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
|
[
"MIT"
] | 2
|
2015-06-10T12:16:53.000Z
|
2016-03-09T22:43:43.000Z
|
commands/fight.py
|
AlexMog/IRCPokemonBot
|
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
|
[
"MIT"
] | null | null | null |
commands/fight.py
|
AlexMog/IRCPokemonBot
|
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import copy
import random
from classes.Pokemons import *
from classes.Battle import *
def fight(connection, canal, auteur, cmds, canalobj, mogbot):
user = mogbot.getUserManager().findUser(auteur)
if user == False:
connection.privmsg(canal, auteur + " je ne te trouve pas dans la base de donnees :( - erreur 500")
return
if user.battle is not None:
connection.privmsg(canal, auteur + " tu es deja en combat avec quelqu'un!")
return
if len(cmds) > 2:
if cmds[2] == "nature":
u = pokemonsManager.getRandom()
mini = user.getActivePokemon().level - 5
if mini <= 0:
mini = 1
maxi = mini + 5
u.level = random.randint(mini, maxi)
battle = Battle(user, u)
battle.auto = True
user.battle = u.battle = battle
user.battle.accepted = True
connection.privmsg(canal, user.username + " tu es tombe sur un " + u.username + " sauvage (lvl: " + str(u.level) + " )! Attention!")
else:
u = mogbot.getUserManager().findUser(cmds[2])
if u == False:
connection.privmsg(canal, user.username + " adversaire introuvable.")
return
user.battle = u.battle = Battle(user, u)
connection.privmsg(canal, user.username + " a defie " + u.username + " en duel! Va-t-il accepter? (utilise accept pour accepter le duel et refuse pour refuser)")
else:
connection.privmsg(canal, auteur + " usage: fight <nature ou pseudo>")
| 40.05
| 173
| 0.585518
| 192
| 1,602
| 4.885417
| 0.484375
| 0.108742
| 0.140725
| 0.089552
| 0.166311
| 0.057569
| 0
| 0
| 0
| 0
| 0
| 0.009857
| 0.303371
| 1,602
| 39
| 174
| 41.076923
| 0.830645
| 0.013109
| 0
| 0.147059
| 0
| 0.029412
| 0.193794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ebeb3c0f6aa2c670636976b54b4c234e9cc858
| 3,356
|
py
|
Python
|
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
|
hp-storage/horizon-ssmc-link
|
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
|
[
"Apache-2.0"
] | 1
|
2017-01-07T13:45:57.000Z
|
2017-01-07T13:45:57.000Z
|
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
|
hp-storage/horizon-ssmc-link
|
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
|
[
"Apache-2.0"
] | null | null | null |
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
|
hp-storage/horizon-ssmc-link
|
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import horizon_hpe_storage.api.keystone_api as keystone
import horizon_hpe_storage.api.barbican_api as barbican
from horizon import exceptions
from horizon import forms
from horizon import messages
class AddSoftwareTest(forms.SelfHandlingForm):
sw_package = forms.CharField(max_length=255, label=_("Software Package"))
min_version = forms.CharField(max_length=255, label=_("Minimum Version"))
description = forms.CharField(max_length=255,
required=False,
label=_("Description"))
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def handle(self, request, data):
node_type = self.initial['node_type']
try:
self.keystone_api.do_setup(self.request)
self.barbican_api.do_setup(self.keystone_api.get_session())
self.barbican_api.add_software_test(node_type, data['sw_package'],
data['min_version'],
data['description'])
msg = _('Added softare package "%s".') % data['sw_package']
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:hpe_storage:index")
exceptions.handle(request,
_("Unable to add softare package."),
redirect=redirect)
class EditSoftwareTest(forms.SelfHandlingForm):
min_version = forms.CharField(max_length=255, label=_("Minimum Version"))
description = forms.CharField(max_length=255,
required=False,
label=_("Description"))
keystone_api = keystone.KeystoneAPI()
barbican_api = barbican.BarbicanAPI()
def handle(self, request, data):
sw_package = self.initial['sw_package']
node_type = self.initial['node_type']
try:
self.keystone_api.do_setup(self.request)
self.barbican_api.do_setup(self.keystone_api.get_session())
self.barbican_api.update_software_test(node_type, sw_package,
data['min_version'],
data['description'])
msg = _('Saved softare package "%s".') % sw_package
messages.success(request, msg)
return True
except Exception:
redirect = reverse("horizon:admin:hpe_storage:index")
exceptions.handle(request,
_("Unable to save softare package."),
redirect=redirect)
| 44.157895
| 78
| 0.616508
| 364
| 3,356
| 5.510989
| 0.343407
| 0.038385
| 0.042373
| 0.057328
| 0.542871
| 0.516949
| 0.501496
| 0.501496
| 0.460618
| 0.460618
| 0
| 0.008061
| 0.297676
| 3,356
| 75
| 79
| 44.746667
| 0.843021
| 0.155542
| 0
| 0.690909
| 0
| 0
| 0.119419
| 0.02197
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036364
| false
| 0
| 0.127273
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ecd056c8357be81181e47ac71a968400c85cc9
| 1,396
|
py
|
Python
|
tests/test_liquidity_provider_factory.py
|
diem/liquidity-emulator
|
255cccd06c0949750e42e93906b083e915ddf505
|
[
"Apache-2.0"
] | 2
|
2021-11-29T06:00:59.000Z
|
2022-01-27T18:42:29.000Z
|
tests/test_liquidity_provider_factory.py
|
hengkyherdianto/liquidity-emulator
|
255cccd06c0949750e42e93906b083e915ddf505
|
[
"Apache-2.0"
] | 1
|
2021-01-31T09:14:05.000Z
|
2021-02-01T07:43:41.000Z
|
tests/test_liquidity_provider_factory.py
|
hengkyherdianto/liquidity-emulator
|
255cccd06c0949750e42e93906b083e915ddf505
|
[
"Apache-2.0"
] | 4
|
2021-02-15T14:45:04.000Z
|
2022-03-03T02:32:45.000Z
|
from diem import chain_ids
from liquidity import create_liquidity_provider, init_liquidity_provider
from liquidity.liquidity import FaucetLiquidityProvider, DDLiquidityProvider
CUSTODY_PRIVATE_KEYS = (
'{"liquidity":"c6537e56d844fa4a15f3bf5eacd41c9123a19ef19a1026f2325a6b2dd33a13f1"}'
)
def test_faucet_liquidity_provider_factory_for_testnet_without_custody_private_keys(
patch_liquidity, monkeypatch
) -> None:
monkeypatch.setenv("CHAIN_ID", str(chain_ids.TESTNET.value))
monkeypatch.delenv("CUSTODY_PRIVATE_KEYS", raising=False)
init_liquidity_provider()
lp = create_liquidity_provider()
assert isinstance(lp, FaucetLiquidityProvider)
def test_dd_liquidity_provider_factory_for_testnet_with_custody_private_keys(
patch_liquidity, monkeypatch
) -> None:
monkeypatch.setenv("CHAIN_ID", str(chain_ids.TESTNET.value))
monkeypatch.setenv("CUSTODY_PRIVATE_KEYS", CUSTODY_PRIVATE_KEYS)
init_liquidity_provider()
lp = create_liquidity_provider()
assert isinstance(lp, DDLiquidityProvider)
def test_dd_liquidity_provider_factory_for_premainnet(
patch_liquidity, monkeypatch
) -> None:
monkeypatch.setenv("CHAIN_ID", str(chain_ids.PREMAINNET.value))
monkeypatch.setenv("CUSTODY_PRIVATE_KEYS", CUSTODY_PRIVATE_KEYS)
init_liquidity_provider()
lp = create_liquidity_provider()
assert isinstance(lp, DDLiquidityProvider)
| 33.238095
| 86
| 0.809456
| 152
| 1,396
| 7.013158
| 0.25
| 0.175422
| 0.135084
| 0.075985
| 0.672608
| 0.634146
| 0.634146
| 0.566604
| 0.566604
| 0.566604
| 0
| 0.031553
| 0.114613
| 1,396
| 41
| 87
| 34.04878
| 0.830906
| 0
| 0
| 0.6
| 0
| 0
| 0.117479
| 0.057307
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f10336d5ea889a3a24c9c3648237cbdaee7b65
| 5,586
|
py
|
Python
|
tools/remote_debugger.py
|
budelius/openstreetmap-heatmap
|
f7376671eecda68955b8edc016c63218c5ebc6a2
|
[
"Apache-2.0"
] | null | null | null |
tools/remote_debugger.py
|
budelius/openstreetmap-heatmap
|
f7376671eecda68955b8edc016c63218c5ebc6a2
|
[
"Apache-2.0"
] | null | null | null |
tools/remote_debugger.py
|
budelius/openstreetmap-heatmap
|
f7376671eecda68955b8edc016c63218c5ebc6a2
|
[
"Apache-2.0"
] | null | null | null |
"""
Remote debugging support.
This addon allows you to use a remote Python debugger with PyCharm, PyDev and
possibly other IDEs. As it is, without modification, it only supports PyCharm,
but it may work by pointing it at a similar egg file shipped with PyDev.
Before using, point the addon to your pycharm-debug-py3k.egg file in the
addon preferences screen.
For more information on how to use this addon, please read my article at
http://code.blender.org/2015/10/debugging-python-code-with-pycharm/
"""
bl_info = {
'name': 'Remote debugger',
'author': 'Sybren A. Stüvel',
'version': (0, 4),
'blender': (2, 80, 0),
'location': 'Press [Space], search for "debugger"',
'category': 'Development',
}
import bpy
import os.path
from bpy.types import AddonPreferences
from bpy.props import StringProperty
# Get references to all property definition functions in bpy.props,
# so that they can be used to replace 'x = IntProperty()' to 'x: IntProperty()'
# dynamically when working on Blender 2.80+
__all_prop_funcs = {
getattr(bpy.props, propname)
for propname in dir(bpy.props)
if propname.endswith('Property')
}
def convert_properties(class_):
"""Class decorator to avoid warnings in Blender 2.80+
This decorator replaces property definitions like this:
someprop = bpy.props.IntProperty()
to annotations, as introduced in Blender 2.80:
someprop: bpy.props.IntProperty()
No-op if running on Blender 2.79 or older.
"""
if bpy.app.version < (2, 80):
return class_
if not hasattr(class_, '__annotations__'):
class_.__annotations__ = {}
attrs_to_delete = []
for name, value in class_.__dict__.items():
if not isinstance(value, tuple) or len(value) != 2:
continue
prop_func, kwargs = value
if prop_func not in __all_prop_funcs:
continue
# This is a property definition, replace it with annotation.
attrs_to_delete.append(name)
class_.__annotations__[name] = value
for attr_name in attrs_to_delete:
delattr(class_, attr_name)
return class_
def addon_preferences(context):
try:
preferences = context.preferences
except AttributeError:
# Old (<2.80) location of user preferences
preferences = context.user_preferences
return preferences.addons[__name__].preferences
@convert_properties
class DebuggerAddonPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __name__
eggpath = StringProperty(
name='Path of the PyCharm egg file',
description='Make sure you select the py3k egg',
subtype='FILE_PATH',
default='pycharm-debug-py3k.egg'
)
pydevpath = StringProperty(
name='Path of the PyDev pydevd.py file',
subtype='FILE_PATH',
default='pydevd.py'
)
def draw(self, context):
layout = self.layout
layout.prop(self, 'pydevpath')
layout.prop(self, 'eggpath')
layout.label(text='Make sure you select the egg for Python 3.x: pycharm-debug-py3k.egg ')
class DEBUG_OT_connect_debugger_pycharm(bpy.types.Operator):
bl_idname = 'debug.connect_debugger_pycharm'
bl_label = 'Connect to remote PyCharm debugger'
bl_description = 'Connects to a PyCharm debugger on localhost:1090'
def execute(self, context):
import sys
addon_prefs = addon_preferences(context)
eggpath = os.path.abspath(addon_prefs.eggpath)
if not os.path.exists(eggpath):
self.report({'ERROR'}, 'Unable to find debug egg at %r. Configure the addon properties '
'in the User Preferences menu.' % eggpath)
return {'CANCELLED'}
if not any('pycharm-debug' in p for p in sys.path):
sys.path.append(eggpath)
import pydevd
pydevd.settrace('localhost', port=1090, stdoutToServer=True, stderrToServer=True,
suspend=False)
return {'FINISHED'}
class DEBUG_OT_connect_debugger_pydev(bpy.types.Operator):
bl_idname = 'debug.connect_debugger_pydev'
bl_label = 'Connect to remote PyDev debugger'
bl_description = 'Connects to a PyDev debugger on localhost:5678'
def execute(self, context):
import sys
addon_prefs = addon_preferences(context)
pydevpath = os.path.abspath(addon_prefs.pydevpath)
if not os.path.exists(pydevpath):
self.report({'ERROR'}, 'Unable to find pydevd.py at %r. Configure the addon properties '
'in the User Preferences menu.' % pydevpath)
return {'CANCELLED'}
dirname = os.path.dirname(pydevpath)
basename = os.path.basename(dirname)
if not any(basename in p for p in sys.path):
sys.path.append(dirname)
import pydevd
pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True,
suspend=False)
return {'FINISHED'}
def register():
bpy.utils.register_class(DEBUG_OT_connect_debugger_pycharm)
bpy.utils.register_class(DEBUG_OT_connect_debugger_pydev)
bpy.utils.register_class(DebuggerAddonPreferences)
def unregister():
bpy.utils.unregister_class(DEBUG_OT_connect_debugger_pycharm)
bpy.utils.unregister_class(DEBUG_OT_connect_debugger_pydev)
bpy.utils.unregister_class(DebuggerAddonPreferences)
if __name__ == '__main__':
register()
| 30.692308
| 100
| 0.674544
| 703
| 5,586
| 5.194879
| 0.308677
| 0.032859
| 0.019715
| 0.031216
| 0.328587
| 0.268894
| 0.215225
| 0.197152
| 0.079956
| 0.079956
| 0
| 0.012189
| 0.236305
| 5,586
| 181
| 101
| 30.861878
| 0.843882
| 0.209273
| 0
| 0.186916
| 0
| 0
| 0.194832
| 0.023325
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065421
| false
| 0
| 0.074766
| 0
| 0.317757
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f1f4d867171d615c82f43e02801e5ac479dcd4
| 2,247
|
py
|
Python
|
todo/views/accepted_petitions.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | null | null | null |
todo/views/accepted_petitions.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | 41
|
2021-03-23T12:58:25.000Z
|
2021-05-25T11:38:42.000Z
|
todo/views/accepted_petitions.py
|
josalhor/WebModels
|
6b9cde3141c53562f40b129e6e1c87448ce9853a
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from .book_assign import send_email_reject_book
from todo.forms import SearchForm
from todo.models import Task, Book, Editor, Writer
@login_required
def accepted_petitions(request) -> HttpResponse:
deleted, editor_view = False, False
thedate = datetime.datetime.now()
searchform = SearchForm(auto_id=False)
editor = Editor.objects.filter(user=request.user).first()
is_chief = False
all_lists = None
if editor:
lists = Book.objects.filter(completed=False)
all_lists = Book.objects.filter(completed=False)
if editor.chief:
is_chief = True
lists = lists.filter(editor=editor)
all_lists = all_lists.exclude(editor=None)
else:
lists = lists.filter(editor=editor)
lists = lists.exclude(rejected=True).order_by("name")
editor_view = True
else:
author = Writer.objects.filter(user=request.user)
lists = Book.objects.filter(completed=False, rejected=False, author__in=author).exclude(editor=None).order_by("name")
list_count = lists.count()
task_count = 0
for book in lists:
tasks = Task.objects.filter(book=book, completed=False).count()
task_count += tasks
if request.method == "POST":
book = Book.objects.filter(name=request.POST['delete-book']).first()
deleted = True
book.editor = None
book.rejected = True
book.save()
send_email_reject_book(book, reasons=request.POST['reasons'])
messages.success(request, "La petición correspondiente al libro '{}' ha sido eliminada de su lista de peticiones aceptadas.".format(book.name))
context = {
"editor_view": editor_view,
"deleted": deleted,
"lists": lists,
"thedate": thedate,
"searchform": searchform,
"list_count": list_count,
"task_count": task_count,
"all_lists": all_lists,
"is_chief": is_chief
}
return render(request, "todo/accepted_petitions.html", context)
| 31.208333
| 151
| 0.668002
| 272
| 2,247
| 5.375
| 0.319853
| 0.062244
| 0.046512
| 0.045144
| 0.150479
| 0.073871
| 0
| 0
| 0
| 0
| 0
| 0.000577
| 0.229194
| 2,247
| 71
| 152
| 31.647887
| 0.843533
| 0
| 0
| 0.072727
| 0
| 0
| 0.102804
| 0.012461
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0.018182
| 0.145455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f6bfc61051a4c8d3929a3bb610dca313e55859
| 7,696
|
py
|
Python
|
ajustes_UM/tesis/tesis/settings.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | 1
|
2015-03-04T13:04:33.000Z
|
2015-03-04T13:04:33.000Z
|
ajustes_UM/tesis/tesis/settings.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | null | null | null |
ajustes_UM/tesis/tesis/settings.py
|
abelgonzalez/ajustes
|
f6f99aea18cfb82750805321abfc822d8a6ec5ed
|
[
"MIT"
] | null | null | null |
"""
Django settings for tesis project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# -*- coding: utf-8 -*-
# A tuple that lists people who get code error notifications.
ADMINS = (
('Abel González Mondéjar', '[email protected]'),
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf import global_settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a6c$xd0y%_#%&ucf!uzu0cuc)6-+b+t5(63u#a__!^3cnhk)#l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# A boolean that turns on/off template debug mode.
TEMPLATE_DEBUG = True
# A list of strings representing the host/domain names that this Django site can serve.
ALLOWED_HOSTS = []
# Application definition
# A tuple of strings designating all applications that are enabled in this Django installation
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# otras apps
'pure_pagination',
'watson',
# Mis Apps
'ajustes',
'persona',
'planEstudio',
# importada y modificada
'main',
)
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 10,
'MARGIN_PAGES_DISPLAYED': 1,
}
# Middleware is a framework of hooks into Django’s request/response processing.
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# A string representing the full Python import path to your root URLconf.
ROOT_URLCONF = 'tesis.urls'
# The full Python path of the WSGI application object that Django’s built-in servers (e.g. runserver) will use.
WSGI_APPLICATION = 'tesis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
# A dictionary containing the settings for all databases to be used with Django.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': {
'read_default_file': os.path.join(BASE_DIR, 'my.cnf'),
'init_command': 'SET storage_engine=INNODB',
},
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
# Language code for this installation.
LANGUAGE_CODE = 'es-CU'
# A boolean that specifies whether Django’s translation system should be enabled.
# This provides an easy way to turn it off, for performance. If this is set to False,
# Django will make some optimizations so as not to load the translation machinery.
USE_I18N = True
# A boolean that specifies if localized formatting of data will be enabled by default or not.
# If this is set to True, e.g. Django will display numbers and dates using the format of the current locale.
USE_L10N = True
# A boolean that specifies if datetimes will be timezone-aware by default or not.
# If this is set to True, Django will use timezone-aware datetimes internally.
# Otherwise, Django will use naive datetimes in local time.
USE_TZ = True
# Number representing the first day of the week.
FIRST_DAY_OF_WEEK = 1
from django.utils.translation import ugettext_lazy as _
# A tuple of all available languages.
LANGUAGES = (
('es', _('Español')),
('en', _('English')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# URL to use when referring to static files located in STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
# Esto debe configurarse de manera similar que el media para poder servir archivos estáticos
# Puede ser algo como esta linea comentada
# STATIC_URL = 'http://localhost:90/static/'
STATIC_URL = '/static/'
# Local time zone for this installation.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Havana'
# List of locations of the template source files searched by django.template.loaders.filesystem.Loader, in search order.
# Note that these paths should use Unix-style forward slashes, even on Windows.
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), '..', 'templates').replace('\\', '/'),)
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder is
# enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view.
STATICFILES_DIRS = ((os.path.join(BASE_DIR, 'assets')),
(os.path.join(BASE_DIR, 'media')))
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = (os.path.join(BASE_DIR, 'static')) # URL prefix for static files.
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
# MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'assets/upload') # COMENTADO
PROJECT_PATH = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join("../", PROJECT_PATH)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
# Configurar esta línea es importante puede quedar algo así:
# MEDIA_URL = 'http://localhost:90/media/'
# MEDIA_URL = 'http://127.0.0.1:8000/media/' # COMENTADO
# estas las importé también
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# The URL where requests are redirected after login when the contrib.auth.login view gets no next parameter.
LOGIN_REDIRECT_URL = '/'
# The URL where requests are redirected for login, especially when using the login_required() decorator.
LOGIN_URL = '/'
# LOGIN_URL counterpart.
LOGOUT_URL = '/logoutUser'
# TEMPLATE_CONTEXT_PROCESSORS = (
# 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.request',
# )
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
"django.core.context_processors.request",
)
| 34.204444
| 120
| 0.721674
| 1,045
| 7,696
| 5.226794
| 0.394258
| 0.015379
| 0.016477
| 0.024167
| 0.145002
| 0.100513
| 0.067192
| 0.039912
| 0.025998
| 0.011351
| 0
| 0.007677
| 0.170608
| 7,696
| 225
| 121
| 34.204444
| 0.848034
| 0.594335
| 0
| 0.020408
| 0
| 0.010204
| 0.401448
| 0.246134
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030612
| 0
| 0.030612
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f709dd701c60c4489620b7e5b46e5aca1a0daf
| 7,468
|
py
|
Python
|
code/lib/models/FCRN_depth.py
|
santomon/taskonomy
|
4b22087a2686172b21b61589831061e7a386fe36
|
[
"MIT"
] | 789
|
2018-03-21T05:28:38.000Z
|
2022-03-29T19:32:47.000Z
|
code/lib/models/FCRN_depth.py
|
santomon/taskonomy
|
4b22087a2686172b21b61589831061e7a386fe36
|
[
"MIT"
] | 46
|
2018-05-03T07:11:10.000Z
|
2022-03-11T23:26:03.000Z
|
code/lib/models/FCRN_depth.py
|
santomon/taskonomy
|
4b22087a2686172b21b61589831061e7a386fe36
|
[
"MIT"
] | 152
|
2018-03-24T10:20:44.000Z
|
2022-02-09T02:38:10.000Z
|
from __future__ import absolute_import, division, print_function
from models.base_net import BaseNet
import losses.all as losses_lib
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import pdb
import optimizers.train_steps as train_steps
import optimizers.ops as optimize
from functools import partial
import models.fcrn
from models.fcrn import ResNet50UpProj
class FCRN_depth(BaseNet):
'''Standard encoder decoder model
Encodes an input into a low-dimensional representation and reconstructs
the input from the low-dimensional representation. Uses l2 loss.
Assumes inputs are scaled to [0, 1] (which will be rescaled to [-1, 1].
'''
def __init__(self, global_step, cfg):
'''
Args:
cfg: Configuration.
'''
super(FCRN_depth, self).__init__(global_step, cfg)
if 'hidden_size' not in cfg:
raise ValueError( "config.py for encoder-decoder must specify 'hidden_size'" )
#self.ones_mask = self.build_ones_mask()
def build_ones_mask(self):
'''Build a mask of ones which has the same size as the input.
'''
cfg = self.cfg
H, W = cfg['target_dim']
C = cfg['target_num_channels']
batch_size = cfg['batch_size']
mask = tf.constant(1.0, dtype=cfg['target_dtype'], shape=[batch_size, H, W, C],
name='identity_mask')
return mask
def _compute_nnz_mask(self, mask):
'''Compute the number of nonzero elements in a tensor which only
contains elements of 0 or 1 (such as a mask).
'''
return tf.reduce_sum(mask)
def build_model(self, input_imgs, is_training, targets=None, masks=None, privileged_input=None):
'''Builds the model. Assumes that the input is from range [0, 1].
Args:
input_imgs: list of input images (scaled between -1 and 1) with the
dimensions specified in the cfg
is_training: flag for whether the model is in training mode or not
mask: mask used for computing sum of squares loss. If None, we assume
it is np.ones.
'''
print('building model')
cfg = self.cfg
self.is_training = is_training
if masks is None:
masks = tf.constant( 1, dtype=tf.float32, shape=[], name='constant_mask' )
net = ResNet50UpProj({'data': input_imgs}, cfg['batch_size'], 1, False)
decoder_output = net.get_output()
decoder_output = decoder_output * 128.
decoder_output = tf.log(decoder_output + 1.) / 11.090354888959125
# if self.decoder_only:
# encoder_output = input_imgs Assume that the input is the representation
# else:
# encoder_output = self.build_encoder(input_imgs, is_training)
# print("enc:", encoder_output.shape)
# decoder_output = self.build_decoder(encoder_output, is_training)
# print("tar:", targets.shape)
# set up losses
if targets is None:
losses = self.get_losses( decoder_output, input_imgs, masks )
else:
losses = self.get_losses( decoder_output, targets, masks )
# use weight regularization
if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']:
add_reg = False
else:
add_reg = True
# get losses
#regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' )
#total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg,
# name='losses/total_loss')
self.input_images = input_imgs
self.target_images = targets
self.targets = targets
self.masks = masks
self.decoder_output = decoder_output
self.losses = losses
self.total_loss = losses[0]
# self.init_op = tf.global_variables_initializer()
# add summaries
if self.extended_summaries:
slim.summarize_variables()
slim.summarize_weights()
slim.summarize_biases()
slim.summarize_activations()
slim.summarize_collection(tf.GraphKeys.LOSSES)
#slim.summarize_tensor( regularization_loss )
#slim.summarize_tensor( total_loss )
self.model_built = True
def get_losses( self, output_imgs, desired_imgs, masks ):
'''Returns the loss. May be overridden.
Args:
output_imgs: Tensor of images output by the decoder.
desired_imgs: Tensor of target images to be output by the decoder.
masks: Tensor of masks to be applied when computing sum of squares
loss.
Returns:
losses: list of tensors representing each loss component
'''
print('setting up losses...')
self.output_images = output_imgs
self.target_images = desired_imgs
self.masks = masks
with tf.variable_scope('losses'):
l1_loss = losses_lib.get_l1_loss_with_mask(
self.output_images,
self.target_images,
self.masks,
scope='d1')
losses = [l1_loss]
return losses
def get_classification_loss(self, logits, labels):
with tf.variable_scope('losses'):
classification_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(# slim.losses.sparse_softmax_cross_entropy(
logits, labels, name='softmax_loss'))
slim.losses.add_loss(classification_loss)
losses = [classification_loss]
return losses
def get_train_step_fn( self ):
'''
Returns:
A train_step funciton which takes args:
(sess, train_ops, global_stepf)
'''
return partial( train_steps.discriminative_train_step_fn,
return_accuracy=False )
def build_train_op( self, global_step ):
'''
Builds train ops for discriminative task
Args:
global_step: A Tensor to be incremented
Returns:
[ loss_op, accuracy ]
'''
if not self.model_built or self.total_loss is None :
raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format(
self.model_built, self.losses_built ) )
self.global_step = global_step
t_vars = tf.trainable_variables()
# Create the optimizer train_op for the generator
self.optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg )
if 'clip_norm' in self.cfg:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True, clip_gradient_norm=self.cfg['clip_norm'])
else:
if self.is_training:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True )
else:
self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, is_training=False, update_global_step=True )
# Create a train_op for the discriminator
self.train_op = [ self.loss_op, 0 ]
self.train_op_built = True
return self.train_op
| 38.494845
| 152
| 0.622657
| 920
| 7,468
| 4.827174
| 0.245652
| 0.024769
| 0.014636
| 0.012159
| 0.094348
| 0.061923
| 0.047512
| 0.047512
| 0.047512
| 0.047512
| 0
| 0.009542
| 0.29834
| 7,468
| 193
| 153
| 38.694301
| 0.837977
| 0.301285
| 0
| 0.121212
| 0
| 0
| 0.071488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080808
| false
| 0
| 0.121212
| 0
| 0.272727
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f82c973044d39870f0f41f75666b3782377f54
| 13,459
|
py
|
Python
|
tests/test_handler.py
|
Tas-Kit/platform
|
34e1abb3f85b9649cbf18496333bf35f74aa6e3d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_handler.py
|
Tas-Kit/platform
|
34e1abb3f85b9649cbf18496333bf35f74aa6e3d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_handler.py
|
Tas-Kit/platform
|
34e1abb3f85b9649cbf18496333bf35f74aa6e3d
|
[
"Apache-2.0"
] | null | null | null |
# trigger build
import json
import uuid
import pytest
from mock import MagicMock, patch
from src import handler, db
from src.models import User, MiniApp, TObject
from src.constants import ROLE
from werkzeug.exceptions import BadRequest
@patch('src.db.push', side_effect=Exception)
def test_execute_obj_post_exception(mock_push):
children = [{
'labels': ['Person', 'Worker'],
'properties': {'age': 10, 'name': 'Owen'}
}, {
'labels': ['Car', 'Tesla'],
'properties': {'age': 3, 'model': 'S'}
}]
user = MagicMock()
obj = MagicMock()
with pytest.raises(Exception):
handler.execute_obj_post(user, obj, ROLE.OWNER, children)
@patch('src.db.push', side_effect=TypeError)
def test_execute_obj_post_error(mock_push):
children = [{
'labels': ['Person', 'Worker'],
'properties': {'age': 10, 'name': 'Owen'}
}, {
'labels': ['Car', 'Tesla'],
'properties': {'age': 3, 'model': 'S'}
}]
user = MagicMock()
obj = MagicMock()
with pytest.raises(BadRequest):
handler.execute_obj_post(user, obj, ROLE.OWNER, children)
@patch('src.handler.serialize_objs')
@patch('src.db.push')
def test_execute_obj_post_success(mock_push, mock_serialize_objs):
children = [{
'labels': ['Person', 'Worker'],
'properties': {'age': 10, 'name': 'Owen'}
}, {
'labels': ['Car', 'Tesla'],
'properties': {'age': 3, 'model': 'S'}
}]
user = MagicMock()
obj = MagicMock()
mock_serialize_objs.return_value = 'result'
assert 'result' == handler.execute_obj_post(user, obj, ROLE.OWNER, children)
mock_serialize_objs.assert_called_once()
args = mock_serialize_objs.call_args_list[0][0]
assert args[0] == user
person = args[1][0]
person_labels = list(person.__node__.labels)
person_labels.remove('TObject')
person_properties = dict(person.__node__)
del person_properties['oid']
assert sorted(person_labels) == sorted(['Person', 'Worker'])
assert person_properties == {'age': 10, 'name': 'Owen'}
car = args[1][1]
car_labels = list(car.__node__.labels)
car_labels.remove('TObject')
car_properties = dict(car.__node__)
del car_properties['oid']
assert sorted(car_labels) == sorted(['Car', 'Tesla'])
assert car_properties == {'age': 3, 'model': 'S'}
assert args[2] == ROLE.OWNER
def test_execute_obj_post_no_permission():
with pytest.raises(BadRequest):
handler.execute_obj_post(MagicMock(), MagicMock(), ROLE.STANDARD, MagicMock())
@patch('src.db.pull')
@patch('src.handler.execute_obj_post')
@patch('src.handler.execute_obj_delete')
def test_execute_obj_replace(mock_execute_obj_delete, mock_execute_obj_post, mock_pull):
user = MagicMock()
obj = MagicMock()
role = ROLE.ADMIN
oid_list = MagicMock()
children = MagicMock()
result = MagicMock()
mock_execute_obj_post.return_value = result
assert result == handler.execute_obj_replace(user, obj, role, oid_list, children)
mock_execute_obj_delete.assert_called_once_with(obj, role, oid_list)
mock_execute_obj_post.assert_called_once_with(user, obj, role, children)
@patch('src.handler.Subgraph')
@patch('src.db.run', side_effect=Exception)
def test_execute_obj_delete_error(mock_run, mock_subgraph):
obj = MagicMock()
child1 = MagicMock()
child2 = MagicMock()
child3 = MagicMock()
child1.oid = 'oid1'
child2.oid = 'oid2'
child3.oid = 'oid3'
child1.__node__ = 'child1'
child2.__node__ = 'child2'
child3.__node__ = 'child3'
node1 = MagicMock()
node2 = MagicMock()
node3 = MagicMock()
node4 = MagicMock()
node5 = MagicMock()
node6 = MagicMock()
node1.__node__ = 'node1'
node2.__node__ = 'node2'
node3.__node__ = 'node3'
node4.__node__ = 'node4'
node5.__node__ = 'node5'
node6.__node__ = 'node6'
child1.get_all_children.return_value = [node1, node2]
child2.get_all_children.return_value = [node3, node4]
child3.get_all_children.return_value = [node5, node6]
obj.children = [child1, child2, child3]
oid_list = ['oid0', 'oid1', 'oid3', 'oid4']
subgraph = MagicMock()
mock_subgraph.return_value = subgraph
with pytest.raises(BadRequest):
handler.execute_obj_delete(obj, ROLE.ADMIN, oid_list)
@patch('src.handler.Subgraph')
@patch('src.db.run')
def test_execute_obj_delete_success(mock_run, mock_subgraph):
obj = MagicMock()
child1 = MagicMock()
child2 = MagicMock()
child3 = MagicMock()
child1.oid = 'oid1'
child2.oid = 'oid2'
child3.oid = 'oid3'
child1.__node__ = 'child1'
child2.__node__ = 'child2'
child3.__node__ = 'child3'
node1 = MagicMock()
node2 = MagicMock()
node3 = MagicMock()
node4 = MagicMock()
node5 = MagicMock()
node6 = MagicMock()
node1.__node__ = 'node1'
node2.__node__ = 'node2'
node3.__node__ = 'node3'
node4.__node__ = 'node4'
node5.__node__ = 'node5'
node6.__node__ = 'node6'
child1.get_all_children.return_value = [node1, node2]
child2.get_all_children.return_value = [node3, node4]
child3.get_all_children.return_value = [node5, node6]
obj.children = [child1, child2, child3]
oid_list = ['oid0', 'oid1', 'oid3', 'oid4']
subgraph = MagicMock()
mock_subgraph.return_value = subgraph
assert 'SUCCESS' == handler.execute_obj_delete(obj, ROLE.ADMIN, oid_list)
mock_run.assert_called_once_with("MATCH (a:TObject)-[*0..]->(x:TObject) WHERE a.oid IN ['oid0', 'oid1', 'oid3', 'oid4'] DETACH DELETE x")
def test_execute_obj_delete_no_permission():
obj = MagicMock()
oid_list = []
with pytest.raises(BadRequest):
handler.execute_obj_delete(obj, ROLE.STANDARD, oid_list)
def test_serialize_objs():
obj1 = MagicMock(oid='oid1')
obj2 = MagicMock(oid='oid2')
obj1.serialize.return_value = 'obj1'
obj2.serialize.return_value = 'obj2'
objs = [obj1, obj2]
user = MagicMock()
assert {'oid1': 'obj1', 'oid2': 'obj2'} == handler.serialize_objs(user, objs, ROLE.ADMIN)
obj1.serialize.assert_called_once_with(user, ROLE.ADMIN)
obj2.serialize.assert_called_once_with(user, ROLE.ADMIN)
@patch('src.handler.get_graph_obj')
def test_get_obj_by_id_get_wrong_obj(mock_get_graph_obj):
user = MagicMock()
obj = MagicMock()
mock_get_graph_obj.return_value = obj
data = {
'_id': 'test_id'
}
with pytest.raises(BadRequest):
handler.get_obj_by_id(user, 'wrong_id', data)
@patch('src.utils.assert_standard')
@patch('src.handler.get_graph_obj')
def test_get_obj_by_id_platform(mock_get_graph_obj, mock_assert_standard):
user = MagicMock()
user.share.get.return_value = 5
obj = MagicMock()
mock_get_graph_obj.return_value = obj
data = {
'_id': 'platform'
}
assert obj is handler.get_obj_by_id(user, 'wrong_id', data)
assert data['role'] == 5
@patch('src.handler.get_graph_obj')
def test_get_obj_by_id_get_obj(mock_get_graph_obj):
user = MagicMock()
obj = MagicMock()
mock_get_graph_obj.return_value = obj
data = {
'_id': 'test_id'
}
assert obj == handler.get_obj_by_id(user, 'test_id', data)
mock_get_graph_obj.assert_called_once_with('test_id', TObject)
@patch('src.handler.get_graph_obj')
def test_get_obj_by_id_get_app(mock_get_graph_obj):
user = MagicMock()
obj = MagicMock()
mock_get_graph_obj.return_value = obj
data = {
'_id': 'test_id'
}
assert obj == handler.get_obj_by_id(user, 'root', data)
mock_get_graph_obj.assert_called_once_with('test_id', MiniApp)
@patch('src.handler.get_graph_obj')
def test_get_mini_apps(mock_get_graph_obj):
user = MagicMock()
app1 = MagicMock()
app2 = MagicMock()
app1.serialize.return_value = 'app1'
app2.serialize.return_value = 'app2'
user.apps = [app1, app2]
mock_get_graph_obj.return_value = user
assert handler.get_mini_apps('test_uid') == {
'mini_apps': ['app1', 'app2']
}
user.verify_key.assert_not_called()
mock_get_graph_obj.assert_called_once_with('test_uid', User)
@patch('src.handler.get_graph_obj')
def test_get_mini_app(mock_get_graph_obj):
user = MagicMock()
app = MagicMock()
app.serialize.return_value = 'mock_app'
mock_get_graph_obj.side_effect = [user, app]
assert handler.get_mini_app('test_uid', 'test_aid', 'test_platform_root_key') == {
'mini_app': 'mock_app'
}
assert mock_get_graph_obj.call_count == 2
user.verify_key.assert_called_once_with('test_platform_root_key')
@patch('src.handler.get_graph_obj')
def test_get_platform_root_key(mock_get_graph_obj):
user = MagicMock()
mock_get_graph_obj.return_value = user
user.generate_platform_root_key.return_value = 'platform_root_key'
assert handler.get_platform_root_key('test_uid') == {
'platform_root_key': 'platform_root_key'
}
mock_get_graph_obj.assert_called_once_with('test_uid', User)
def test_get_graph_obj_not_exist():
with pytest.raises(BadRequest):
handler.get_graph_obj('none existing aid', MiniApp)
def test_get_graph_obj_user_not_exist():
uid = str(uuid.uuid4())
u = handler.get_graph_obj(uid, User)
assert u.uid == uid
db.delete(u)
def test_get_graph_obj_exist():
app = MiniApp()
aid = str(uuid.uuid4())
app.aid = aid
db.push(app)
db.pull(app)
assert app == handler.get_graph_obj(aid, MiniApp)
db.delete(app)
@patch('src.handler.serialize_objs', return_value='serialize_results')
@patch('src.handler.handle_obj_params')
def test_handle_obj_get(mock_handle_obj_params, mock_serialize_objs):
parser = MagicMock()
user = MagicMock()
obj = MagicMock()
obj.children = ['test1', 'test2']
mock_handle_obj_params.return_value = {
'user': user,
'obj': obj,
'role': ROLE.ADMIN
}
assert {'result': 'serialize_results'} == handler.handle_obj_get('test_oid', parser)
mock_handle_obj_params.assert_called_once_with('test_oid', parser)
mock_serialize_objs.assert_called_once_with(user, obj.children, ROLE.ADMIN)
def test_decorator():
def dec(func):
def wrapper(a, b):
return func(a + b)
return wrapper
@dec
def main(foo):
return foo
assert 6 == main(5, 1)
def test_extra_params():
params = {
'user': 'u',
'app': 'a'
}
def func(user, **kwargs):
return user
assert 'u' == func(**params)
@patch('src.handler.get_obj_by_id')
@patch('src.handler.get_graph_obj')
def test_handle_obj_params(mock_get_graph_obj,
mock_get_obj_by_id):
user = MagicMock(spec=User)
data = {
'uid': 'test_uid',
'_id': 'test_oid',
'role': ROLE.OWNER,
'exp': 123456
}
obj = MagicMock()
mock_get_graph_obj.return_value = user
user.verify_key.return_value = data
mock_get_obj_by_id.return_value = obj
oid_list = ['oid1', 'oid2']
children = [
{
'labels': ['People', 'Worker'],
'properties': {
'name': 'Owen',
'age': '22'
}
}
]
parser = MagicMock()
parser.parse_args.return_value = {
'uid': 'test_uid',
'key': 'test_key',
'oid_list': oid_list,
'children': children
}
params = handler.handle_obj_params('test_oid', parser)
mock_get_graph_obj.assert_called_once_with('test_uid', User)
assert params == {
'user': user,
'obj': obj,
'role': ROLE.OWNER,
'oid_list': ['oid1', 'oid2'],
'children': children
}
@patch('src.db.push')
def test_execute_obj_patch_update(mock_push):
target_user = MagicMock()
target_user.share.get.return_value = 0
assert handler.execute_obj_patch(MagicMock(), 10, target_user, 5) == 'SUCCESS'
target_user.share.update.assert_called_once()
@patch('src.db.push')
def test_execute_obj_patch_remove(mock_push):
target_user = MagicMock()
target_user.share.get.return_value = 0
assert handler.execute_obj_patch(MagicMock(), 10, target_user, -1) == 'SUCCESS'
target_user.share.remove.assert_called_once()
@patch('src.db.push')
def test_execute_obj_patch_no_enough_permission(mock_push):
target_user = MagicMock()
target_user.share = MagicMock()
target_user.share.get.return_value = 5
with pytest.raises(BadRequest):
handler.execute_obj_patch(MagicMock(), 5, target_user, 0) == 'SUCCESS'
def test_handle_obj_patch_root():
with pytest.raises(BadRequest):
handler.handle_obj_patch('root', '')
@patch('src.handler.get_obj_by_id', return_value='obj')
@patch('src.handler.get_graph_obj')
@patch('src.handler.execute_obj_patch', return_value='hello')
def test_handle_obj_patch(mock_execute_obj_patch, mock_get_graph_obj, mock_get_obj_by_id):
user1 = MagicMock()
user1.verify_key.return_value = {
'role': 5
}
user2 = MagicMock()
mock_get_graph_obj.side_effect = [user1, user2]
arg_parser = MagicMock()
arg_parser.parse_args.return_value = {
'uid': 'myuid',
'key': 'mykey',
'target_uid': 'mytarget_uid',
'target_role': 0
}
assert handler.handle_obj_patch('oid', arg_parser) == {
'result': 'hello'
}
mock_execute_obj_patch.assert_called_once_with(
obj='obj', role=5, target_user=user2, target_role=0)
| 31.155093
| 141
| 0.666691
| 1,762
| 13,459
| 4.729285
| 0.093644
| 0.037442
| 0.051482
| 0.043202
| 0.602184
| 0.529941
| 0.478219
| 0.436217
| 0.388576
| 0.343094
| 0
| 0.017786
| 0.197934
| 13,459
| 431
| 142
| 31.227378
| 0.754145
| 0.000966
| 0
| 0.434667
| 0
| 0.002667
| 0.136641
| 0.04039
| 0
| 0
| 0
| 0
| 0.125333
| 1
| 0.085333
| false
| 0
| 0.021333
| 0.008
| 0.117333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f8d1a4f8b0cea46b5286a6c9367ca7d6ae25dc
| 579
|
py
|
Python
|
ersilia/utils/identifiers/long.py
|
ersilia-os/ersilia
|
eded117d6c7029ce4a497effdb514c21edfe3673
|
[
"MIT"
] | 32
|
2020-07-30T20:31:05.000Z
|
2022-03-31T17:27:14.000Z
|
ersilia/utils/identifiers/long.py
|
ersilia-os/ersilia
|
eded117d6c7029ce4a497effdb514c21edfe3673
|
[
"MIT"
] | 59
|
2022-03-21T10:00:04.000Z
|
2022-03-31T23:03:14.000Z
|
ersilia/utils/identifiers/long.py
|
ersilia-os/ersilia
|
eded117d6c7029ce4a497effdb514c21edfe3673
|
[
"MIT"
] | 44
|
2022-03-17T13:11:07.000Z
|
2022-03-31T19:44:16.000Z
|
try:
import uuid
except ModuleNotFoundError as err:
uuid = None
ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
PATTERN = [8, 4, 4, 4, 12]
SEP = "-"
class LongIdentifier(object):
def __init__(self):
super().__init__()
@staticmethod
def encode():
"""Get UUID code (long identifier)"""
if uuid is None:
alphabet = ALPHABET.lower()
for n in PATTERN:
s += ["".join([random.choice(alphabet) for _ in range(n)])]
return "-".join(s)
else:
return str(uuid.uuid4())
| 23.16
| 75
| 0.56304
| 62
| 579
| 5.112903
| 0.693548
| 0.07571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0425
| 0.309154
| 579
| 24
| 76
| 24.125
| 0.75
| 0.053541
| 0
| 0
| 0
| 0
| 0.070111
| 0.066421
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29f9eab4a69842a784121a1073e07bcadc752ced
| 3,265
|
py
|
Python
|
Realsense2CV.py
|
felix2072/pytorch-CycleGAN-and-pix2pix
|
4980106ceab5e1eb7bb20c2b492d007b6310d9e1
|
[
"BSD-3-Clause"
] | null | null | null |
Realsense2CV.py
|
felix2072/pytorch-CycleGAN-and-pix2pix
|
4980106ceab5e1eb7bb20c2b492d007b6310d9e1
|
[
"BSD-3-Clause"
] | null | null | null |
Realsense2CV.py
|
felix2072/pytorch-CycleGAN-and-pix2pix
|
4980106ceab5e1eb7bb20c2b492d007b6310d9e1
|
[
"BSD-3-Clause"
] | null | null | null |
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
###############################################
## Open CV and Numpy integration ##
###############################################
import pyrealsense2 as rs
import numpy as np
import cv2
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
width = 640
height = 480
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays
object_color = np.zeros((height, width, 3), np.uint8)
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# depth_image_rgb = cv2.merge((depth_image,depth_image,depth_image))
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
# depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
depth_image = cv2.resize(depth_image, (width, height), interpolation=cv2.INTER_AREA)
edges = auto_canny(color_image)
#edges = cv2.bitwise_not(edges)
edges_rgb = object_color.shape
edges_rgb = cv2.merge((edges,edges,edges))
#blank_image[5:10 , 5:10] = (255, 0, 0) # [x.1,x.2 , y.1,y.2] (B, G, R)
object_color[0:width, 0:height] = (76, 76, 76)
image = cv2.add(edges_rgb,object_color)
edges_rgb = cv2.bitwise_not(edges_rgb)
image = cv2.multiply(edges_rgb,image,scale = 0.003922)
image = image[0:256, 0:256]
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', image)
cv2.waitKey(1)
finally:
# Stop streaming
pipeline.stop()
| 34.368421
| 109
| 0.644717
| 440
| 3,265
| 4.625
| 0.397727
| 0.044226
| 0.025061
| 0.029484
| 0.043243
| 0.030467
| 0
| 0
| 0
| 0
| 0
| 0.049921
| 0.226953
| 3,265
| 94
| 110
| 34.734043
| 0.756339
| 0.280551
| 0
| 0
| 0
| 0
| 0.020609
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.056604
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29fbb43e9c43f01cd5a84414b7fa4416473edd33
| 566
|
py
|
Python
|
main.py
|
Benrflanders/Genetic-Algorithm-Function-Solver
|
7234aed5478d0701f0f8ce342116ac154aa40ba1
|
[
"MIT"
] | null | null | null |
main.py
|
Benrflanders/Genetic-Algorithm-Function-Solver
|
7234aed5478d0701f0f8ce342116ac154aa40ba1
|
[
"MIT"
] | null | null | null |
main.py
|
Benrflanders/Genetic-Algorithm-Function-Solver
|
7234aed5478d0701f0f8ce342116ac154aa40ba1
|
[
"MIT"
] | null | null | null |
import genetic_algorithm
#where the population will be processed and the main loop is contained
#initialise population with random candidate solutions
print("Enter a function to be solved: \n")
fitness_function = [1780, 17, -2] #n = ax + by
#function: [n, a, b]
ga = genetic_algorithm.genetic_algorithm(fitness_function)
#evaluate each candidate
#repeat until (termination condition is satifsfied ) DO
#select parents;
#recombine pairs of parents
#mutate the resulting offspring
#evaluate new candidates
#select individuals for the next generation
#OD
#END
| 21.769231
| 70
| 0.780919
| 79
| 566
| 5.531646
| 0.734177
| 0.10984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014553
| 0.150177
| 566
| 25
| 71
| 22.64
| 0.893971
| 0.65371
| 0
| 0
| 0
| 0
| 0.180328
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
29ffd5d34e2555908d5acb7cecdc5aad3a6e87bc
| 1,983
|
py
|
Python
|
src/predictionAlgorithms/machineLearning/training/convolutionalLstm.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | null | null | null |
src/predictionAlgorithms/machineLearning/training/convolutionalLstm.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | 6
|
2020-05-23T11:30:48.000Z
|
2022-03-11T23:45:06.000Z
|
src/predictionAlgorithms/machineLearning/training/convolutionalLstm.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | null | null | null |
import tensorflow
from PIL import Image
from keras.models import Sequential
from keras.layers import Conv2D, Conv2DTranspose, ConvLSTM2D
from keras.optimizers import SGD
import numpy as np
import os
from keras import backend as K
from src.predictionAlgorithms.machineLearning.algorithms.ConvLSTM import ConvLstm
from src.predictionAlgorithms.machineLearning.algorithms.ConvolutionalChannelsMovementAlgorithm import \
ConvolutionalChannelsMovementAlgorithm
from src.predictionAlgorithms.machineLearning.helpers.callbacks import Callbacks
from src.utilities.imageAnalysis.pixelsRainStrengthConverter import PixelsRainStrengthConverter
class ConvolutionalLstmTrain:
@staticmethod
def train(size, channels, validation_data, loader, val):
model = Sequential()
model.add(
ConvLSTM2D(
filters=1,
padding='same',
kernel_size=(6, 6),
activation='relu',
input_shape=(channels, 1, size, size),
data_format='channels_first',
return_sequences=False
)
)
model.add(
Conv2D(
filters=1,
kernel_size=(8, 8),
activation='relu',
padding='same',
data_format='channels_first'
)
)
model.compile(
optimizer=SGD(lr=0.01, decay=0.01/50),
loss='mse'
)
callback = Callbacks()
callback \
.set_algorithm(ConvLstm(model=model).with_size(size)) \
.set_validation_data(validation_data) \
.set_size(size) \
.set_validation_frequency(1) \
.set_base(6)
model.fit_generator(loader(), epochs=50, steps_per_epoch=20, shuffle=True, callbacks=[callback],
validation_data=val)
model.save('conv_lstm.h5')
# K: 12x12 -> lr: 0.01 -> E = 50; SpE = 10
| 34.189655
| 104
| 0.611699
| 192
| 1,983
| 6.197917
| 0.479167
| 0.030252
| 0.068067
| 0.105882
| 0.087395
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027536
| 0.304085
| 1,983
| 58
| 105
| 34.189655
| 0.834783
| 0.020171
| 0
| 0.156863
| 0
| 0
| 0.030381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.235294
| 0
| 0.27451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b00cec2aa25b2e4c87f0a86c86662d5e0d2edb1
| 1,927
|
py
|
Python
|
batchtest.py
|
nachewigkeit/CropDefender
|
e78fc48f720367ca94033f6263eb1e4a9c6b7858
|
[
"MIT"
] | 2
|
2021-10-14T08:14:15.000Z
|
2021-12-01T05:57:49.000Z
|
batchtest.py
|
nachewigkeit/CropDefender
|
e78fc48f720367ca94033f6263eb1e4a9c6b7858
|
[
"MIT"
] | null | null | null |
batchtest.py
|
nachewigkeit/CropDefender
|
e78fc48f720367ca94033f6263eb1e4a9c6b7858
|
[
"MIT"
] | 1
|
2021-12-01T05:57:53.000Z
|
2021-12-01T05:57:53.000Z
|
import bchlib
from PIL import Image, ImageOps
import numpy as np
import glob
from tqdm import tqdm
import torch
import matplotlib.pyplot as plt
from model import StegaStampDecoder
BCH_POLYNOMIAL = 137
BCH_BITS = 5
def get_bits(secret="MITPBL"):
# 输入字符串,输出BCH码
bch = bchlib.BCH(BCH_POLYNOMIAL, BCH_BITS)
data = bytearray(secret + ' ' * (7 - len(secret)), 'utf-8')
ecc = bch.encode(data)
packet = data + ecc
packet_binary = ''.join(format(x, '08b') for x in packet)
return packet_binary
def get_model(model_path):
# 输入模型参数路径, 输出模型
decoder = torch.load(model_path).cuda()
return decoder
def decode(image, model):
# 输出模型与图片,输出预测结果(图片一定要是归一化到0-1区间的!)
image = torch.from_numpy(image.transpose((2, 0, 1))).unsqueeze(0).cuda()
secret = model(image)
secret = np.array(secret[0].cpu())
secret = np.round(secret)
packet_binary = "".join([str(int(bit)) for bit in secret[:96]])
return packet_binary
def get_acc(true, pred):
# 输入预测二进制串与实际二进制串,输出准确率
secret_size = len(true)
count = 0
for i in range(secret_size):
if true[i] == pred[i]:
count += 1
acc = count / 96
return acc
if __name__ == "__main__":
dirPath = r"E:/dataset/stegastamp_crop"
modelPath = r'saved_models/decoder.pth'
file_list = glob.glob(dirPath + '/*.png')
model = StegaStampDecoder().cuda()
model.load_state_dict(torch.load(modelPath))
model.eval()
bitstring = get_bits()
store = []
with torch.no_grad():
for file in tqdm(file_list):
image = Image.open(file).convert("RGB")
image = image.crop((50, 50, 350, 350))
image = np.array(ImageOps.fit(image, (400, 400)), dtype=np.float32)
image /= 255.
result = decode(image, model)
store.append(get_acc(bitstring, result))
plt.hist(store)
plt.show()
print(np.mean(store))
| 24.705128
| 79
| 0.63259
| 261
| 1,927
| 4.544061
| 0.448276
| 0.040472
| 0.026981
| 0.035413
| 0.040472
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028513
| 0.235599
| 1,927
| 77
| 80
| 25.025974
| 0.776646
| 0.043072
| 0
| 0.036364
| 0
| 0
| 0.044589
| 0.027189
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072727
| false
| 0
| 0.145455
| 0
| 0.290909
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b070ef3534dcec1b94204596a275dcc71c8d799
| 428
|
py
|
Python
|
examples/echobot.py
|
samedamci/telegrask
|
8cd0d7663e3a7386784396462f66c176bc6543c5
|
[
"0BSD"
] | 4
|
2021-08-19T19:17:17.000Z
|
2021-10-12T19:25:59.000Z
|
examples/echobot.py
|
samedamci/telegrask
|
8cd0d7663e3a7386784396462f66c176bc6543c5
|
[
"0BSD"
] | null | null | null |
examples/echobot.py
|
samedamci/telegrask
|
8cd0d7663e3a7386784396462f66c176bc6543c5
|
[
"0BSD"
] | 1
|
2021-08-31T10:49:34.000Z
|
2021-08-31T10:49:34.000Z
|
#!/usr/bin/python3
"""Simple bot to reply exactly the same what user sent to chat."""
# This program is dedicated to the public domain under the CC0 license.
from telegrask import Telegrask
bot = Telegrask("BOT_TOKEN")
@bot.command("echo", help="repeat user words", allow_without_prefix=True)
def echo(update, context):
update.message.reply_text(update.message.text)
if __name__ == "__main__":
bot.run(debug=True)
| 25.176471
| 73
| 0.740654
| 64
| 428
| 4.765625
| 0.71875
| 0.078689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00545
| 0.142523
| 428
| 16
| 74
| 26.75
| 0.825613
| 0.345794
| 0
| 0
| 0
| 0
| 0.139194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b07a5e3542e7f446d97c19101d6130c567a06f9
| 2,238
|
py
|
Python
|
lib/emailsmtp/models.py
|
hdknr/emailqueue
|
05e108562f4fb612440f769973b9a3d02c11afcd
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
lib/emailsmtp/models.py
|
hdknr/emailqueue
|
05e108562f4fb612440f769973b9a3d02c11afcd
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
lib/emailsmtp/models.py
|
hdknr/emailqueue
|
05e108562f4fb612440f769973b9a3d02c11afcd
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from emailqueue.models import BaseModel
class Domain(BaseModel):
'''Domain:
- used for :ref:`postfix.relay_domains`, :ref:`postfix.transport_maps`
'''
domain = models.CharField(
_('Domain'), max_length=50, unique=True, db_index=True,)
'''`where_field`, also `select_field` for relay_domains '''
transport = models.CharField(
_('Transport'), max_length=200)
'''`where_field` for transport_maps'''
alias_domain = models.ForeignKey(
'Domain', verbose_name=_('Alias Transport'),
related_name='alias_domain_set',
null=True, default=None, blank=True, on_delete=models.SET_NULL)
class Meta:
verbose_name = _('Domain')
verbose_name_plural = _('Domain')
def __unicode__(self):
return self.domain
def create_alias_domain(self, name):
domain, created = Domain.objects.get_or_create(
doamin=name, transport='error',
alias=self)
return domain
def add_alias_address(self, user, alias_user=None):
if not self.alias_domain:
return
src = '{0}@{1}'.format(user, self.domain)
dst = '{0}@{1}'.format(alias_user or user, self.alias_domain.domain)
alias = self.alias_set.filter(recipient=src).first()
if alias:
alias.forward = dst
alias.save()
else:
alias = self.alias_set.create(recipient=src, forward=dst)
return alias
class Alias(BaseModel):
'''Alias
- Used in :ref:`postfix.virtual_alias_maps`
'''
domain = models.ForeignKey(
Domain,
null=True, default=None, blank=True, on_delete=models.SET_NULL)
recipient = models.EmailField(
_('Recipient Address'), max_length=100, unique=True, db_index=True)
'''`where_field` for virtual_alias_maps '''
forward = models.EmailField(
_('Forward Address'), max_length=100)
'''`select_field` for virtual_alias_maps '''
class Meta:
verbose_name = _('Alias')
verbose_name_plural = _('Alias')
def __unicode__(self):
return u"{0}>{1}".format(self.recipient, self.forward)
| 29.84
| 76
| 0.636282
| 266
| 2,238
| 5.101504
| 0.300752
| 0.040531
| 0.017686
| 0.025055
| 0.149595
| 0.117907
| 0.117907
| 0.072218
| 0.072218
| 0.072218
| 0
| 0.009971
| 0.238159
| 2,238
| 74
| 77
| 30.243243
| 0.785924
| 0.058088
| 0
| 0.12766
| 0
| 0
| 0.069657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.06383
| 0.042553
| 0.468085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b0bae7ae91cfcfff2eabb361271fc8c258445e7
| 1,628
|
py
|
Python
|
venv/Lib/site-packages/traits/tests/test_constant.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | 1
|
2022-01-18T17:56:51.000Z
|
2022-01-18T17:56:51.000Z
|
venv/Lib/site-packages/traits/tests/test_constant.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/traits/tests/test_constant.py
|
richung99/digitizePlots
|
6b408c820660a415a289726e3223e8f558d3e18b
|
[
"MIT"
] | null | null | null |
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from traits.api import Constant, HasTraits, TraitError
class TestConstantTrait(unittest.TestCase):
def test_initial_value(self):
class TestClass(HasTraits):
c_atr = Constant(5)
self.assertEqual(TestClass().c_atr, 5)
def test_mutable_initial_value(self):
class TestClass(HasTraits):
c_atr_1 = Constant([1, 2, 3, 4, 5])
c_atr_2 = Constant({"a": 1, "b": 2})
obj = TestClass()
self.assertEqual(obj.c_atr_1, [1, 2, 3, 4, 5])
self.assertEqual(obj.c_atr_2, {"a": 1, "b": 2})
def test_assign_fails(self):
class TestClass(HasTraits):
c_atr = Constant(5)
with self.assertRaises(TraitError):
TestClass(c_atr=5)
with self.assertRaises(TraitError):
del TestClass().c_atr
def test_mutate_succeeds(self):
class TestClass(HasTraits):
c_atr_1 = Constant([1, 2, 3, 4, 5])
c_atr_2 = Constant({"a": 1, "b": 2})
obj = TestClass()
obj.c_atr_1.append(6)
obj.c_atr_2["c"] = 3
self.assertEqual(obj.c_atr_1, [1, 2, 3, 4, 5, 6])
self.assertEqual(obj.c_atr_2, {"a": 1, "b": 2, "c": 3})
| 29.6
| 71
| 0.625307
| 232
| 1,628
| 4.241379
| 0.362069
| 0.060976
| 0.042683
| 0.109756
| 0.429878
| 0.367886
| 0.367886
| 0.367886
| 0.262195
| 0.262195
| 0
| 0.044371
| 0.252457
| 1,628
| 54
| 72
| 30.148148
| 0.764174
| 0.238329
| 0
| 0.466667
| 0
| 0
| 0.00813
| 0
| 0
| 0
| 0
| 0
| 0.233333
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b0c74252519e1d0763eeba5100d8c404e0ec79d
| 5,072
|
py
|
Python
|
midap_simulator/packet_manager.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 2
|
2021-03-28T16:19:06.000Z
|
2022-02-26T08:58:33.000Z
|
midap_simulator/packet_manager.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | null | null | null |
midap_simulator/packet_manager.py
|
cap-lab/MidapSim
|
4f92a9f9413c29d7e1f37e863cce90ebdde8b420
|
[
"MIT"
] | 1
|
2021-02-22T08:44:20.000Z
|
2021-02-22T08:44:20.000Z
|
import mmap
import numpy as np
from time import sleep
import os
class PacketManager(object):
buf_size = 0x1000
packet_size = 2072
#typedef struct _Packet{
# PacketType type;
# uint32_t size;
# uint64_t cycle;
# uint32_t address;
# uint8_t data[8];
# uint32_t flags;
#} Packet;
data_type = np.dtype([('type', 'u4'), ('size', 'u4'), ('cycle', 'u8'), ('address', 'u4'), ('data', 'f4', (512)), ('flags', 'u4')])
#typedef struct {
# volatile int start; /* index of oldest element */
# volatile int end; /* index at which to write new element */
# int capacity;
# int size;
# Packet elems[PKT_BUFFER_SIZE+1]; /* vector of elements */
#} PacketBuffer;
data_info_type = np.dtype([('start', 'u4'), ('end', 'u4'), ('capacity', 'u4'), ('size', 'u4')])
def __init__(self, path):
self._infoPath = path
self._lastCycle = 0
self._pType = self.enum('read', 'write', 'elapsed', 'terminated')
self._pFlag = self.enum('none', 'flush')
f = open(path, 'r')
name = f.readline()
ib_name = f.readline()
bi_name = f.readline()
f.close()
ibFile = open('/dev/shm' + ib_name.rstrip('\n'), 'r+')
self._sendBuffer = mmap.mmap(ibFile.fileno(), 0, mmap.PROT_READ | mmap.PROT_WRITE)
ibFile.close()
biFile = open('/dev/shm' + bi_name.rstrip('\n'), 'r+')
self._receiveBuffer = mmap.mmap(biFile.fileno(), 0, mmap.PROT_READ | mmap.PROT_WRITE)
biFile.close()
# Check if the connection is established.
self.writeRequest(0x0, 4, 0, 0)
def enum(self, *sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
def isEmpty(self, buffer):
start, end, _, _ = self.readBufInfo(buffer)
return start == end
def isFull(self, buffer):
start, end, _, _ = self.readBufInfo(buffer)
return (end + 1) % self.buf_size == start;
def readBufInfo(self, buffer):
buffer.seek(0)
data_info = np.array(np.frombuffer(buffer.read(16), dtype=self.data_info_type), dtype=self.data_info_type)
return data_info['start'], data_info['end'], data_info['capacity'], data_info['size']
def readPacket(self):
buffer = self._receiveBuffer
while self.isEmpty(buffer) == True:
sleep(0.000000001)
start, end, capacity, size = self.readBufInfo(self._receiveBuffer)
buffer.seek(16 + int(start) * self.packet_size)
data = np.array(np.frombuffer(buffer.read(self.packet_size), dtype=self.data_type), dtype=self.data_type)
# Increase the read index (start)
start = (start + 1) % self.buf_size
buffer.seek(0)
buffer.write(start.tobytes())
return data
def writePacket(self, packet):
buffer = self._sendBuffer
while self.isFull(buffer) == True:
sleep(0.000000001)
start, end, capacity, size = self.readBufInfo(buffer)
data = np.array(packet, dtype=self.data_type)
buffer.seek(16 + int(end) * self.packet_size)
buffer.write(data.tobytes())
# Increase the write index (end)
end = (end + 1) % self.buf_size
buffer.seek(4)
buffer.write(end.tobytes())
buffer.flush()
def readRequest(self, addr, size, cycle, flush = False):
delta_cycle = 0
if cycle > self._lastCycle:
delta_cycle = cycle - self._lastCycle
#packet = np.array((self._pType.read, size * 4, delta_cycle, addr * 4, 0, 0), dtype=self.data_type)
packet = np.array((self._pType.read, size, cycle, addr * 4, 0, 0), dtype=self.data_type)
if flush == True:
packet['flags'] = self._pFlag.flush
self.writePacket(packet)
packet = self.readPacket()
data = packet['data']
data = np.resize(data, int(size))
self._lastCycle = cycle
return data, packet['cycle']
def writeRequest(self, addr, size, data, cycle):
delta_cycle = 0
if cycle > self._lastCycle:
delta_cycle = cycle - self._lastCycle
#packet = np.array((self._pType.write, size * 4, delta_cycle, addr * 4, np.resize(data, 512), 0), dtype=self.data_type)
packet = np.array((self._pType.write, size, cycle, addr * 4, np.resize(data, 512), 0), dtype=self.data_type)
self.writePacket(packet)
self._lastCycle = cycle
def elapsedRequest(self, cycle):
delta_cycle = 0
if cycle > self._lastCycle + 100:
delta_cycle = cycle - self._lastCycle
if delta_cycle > 0:
packet = np.array((self._pType.elapsed, 0, int(cycle), 0, 0, 0), dtype=self.data_type)
self.writePacket(packet)
self._lastCycle = cycle
def terminatedRequest(self):
packet = np.array((self._pType.terminated, 0, 0, 0, 0, 0), dtype=self.data_type)
self.writePacket(packet)
| 33.813333
| 134
| 0.589708
| 634
| 5,072
| 4.580442
| 0.20347
| 0.034091
| 0.049242
| 0.052686
| 0.386364
| 0.32989
| 0.290634
| 0.278926
| 0.211777
| 0.198003
| 0
| 0.028841
| 0.268533
| 5,072
| 149
| 135
| 34.040268
| 0.753908
| 0.144322
| 0
| 0.225806
| 0
| 0
| 0.038666
| 0
| 0
| 0
| 0.002084
| 0
| 0
| 1
| 0.11828
| false
| 0
| 0.043011
| 0
| 0.27957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b0eec937bcf7b4132e9bab483c930a0a86d89bc
| 3,824
|
py
|
Python
|
amurlevel_model/model/train_test_split.py
|
RaevskyDN/aij2020-amur-noflood-public
|
d11349b1f8cc79c18bb078392731eac32b3c56ff
|
[
"Apache-2.0"
] | 7
|
2021-02-17T18:55:13.000Z
|
2021-07-30T13:56:19.000Z
|
amurlevel_model/model/train_test_split.py
|
RaevskyDN/aij2020-amur-noflood-public
|
d11349b1f8cc79c18bb078392731eac32b3c56ff
|
[
"Apache-2.0"
] | null | null | null |
amurlevel_model/model/train_test_split.py
|
RaevskyDN/aij2020-amur-noflood-public
|
d11349b1f8cc79c18bb078392731eac32b3c56ff
|
[
"Apache-2.0"
] | 1
|
2022-01-23T15:11:43.000Z
|
2022-01-23T15:11:43.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from datetime import date
from typing import Union,Tuple,Optional,List
from ..config_features import CATEGORICAL_FEATURES,NUMERICAL_FEATURES
from ..config import DAYS_FORECAST,ALL_STATIONS
from ..utils.normalizer import get_normalizer_stats
def train_test_split(amur_df: pd.DataFrame,
start_test_date: Union[date,str],
end_test_date: Union[date,str],
fname: Optional[str]=None,
numerical_features: Optional[List[str]]=None,
categorical_features: Optional[List[str]]=None) -> Tuple[np.array,np.array,np.array,np.array]:
'''
Деление на трейн, тест для обучения.
Шаг с которым идем по трейну - 1 день, шак с которым идем по тесту - 10 дней
Итоговый шейп [n,DAYS_FORECAST,n_features] - n - объем выборки,
DAYS_FORECAST - количество дней предсказания (10),
n_features - количество признаков
:param amur_df: pd.DataFrame
:param start_test_date: date,str - начало по времени тестовой выборки
:param end_test_date: date,str - конец по времени тестовой выборки
:param fname: str, путь до файла json cо статистикой mean,std для каждого поля
:param numerical_features: List[str] - список численных признаков
:param categorical_features: List[str] - список категориальных признаков
:return: tuple:
X_train - обучающая выборка
y_train - метки для обучающей выборки
X_test - тестовая выборка
y_test - метки для обучающей выборки
'''
if numerical_features is None:
numerical_features = NUMERICAL_FEATURES
if categorical_features is None:
categorical_features = CATEGORICAL_FEATURES
targets = ['sealevel_max_' + identifier for identifier in ALL_STATIONS]
train = amur_df[amur_df['date'] < start_test_date].copy()
test = amur_df[(amur_df['date'] >= start_test_date) &
(amur_df['date'] < end_test_date)].copy()
stats = get_normalizer_stats(fname)
for col in numerical_features:
_mean = stats[col]['mean']
_std = stats[col]['std']
train[col] = (train[col] - _mean) / _std
test[col] = (test[col] - _mean) / _std
train.sort_values('date', inplace=True)
train_x_array = []
train_y_array = []
step = 0
while True:
if step + DAYS_FORECAST + 1 >= len(train):
break
if train.iloc[step:step + DAYS_FORECAST][targets].count().min() < DAYS_FORECAST:
step += 1
continue
train_x_array.append(train.iloc[step:step + DAYS_FORECAST][numerical_features + categorical_features].values)
train_y_array.append(train.iloc[step:step + DAYS_FORECAST][targets].values)
step += 1
X_train = np.transpose(np.dstack(train_x_array), (2, 0, 1))
y_train = np.transpose(np.dstack(train_y_array), (2, 0, 1))
step = 0
test.sort_values('date', inplace=True)
test_x_array = []
test_y_array = []
while True:
if step >= len(test):
break
if test.iloc[step:step + DAYS_FORECAST][targets].count().min() < DAYS_FORECAST:
step += DAYS_FORECAST
continue
test_x_array.append(test.iloc[step:step + DAYS_FORECAST][numerical_features + categorical_features].values)
test_y_array.append(test.iloc[step:step + DAYS_FORECAST][targets].values)
if step + DAYS_FORECAST*2+1 >= len(test):
break
step += DAYS_FORECAST
X_test = np.transpose(np.dstack(test_x_array), (2, 0, 1))
y_test = np.transpose(np.dstack(test_y_array), (2, 0, 1))
return X_train, y_train, X_test, y_test
| 41.565217
| 117
| 0.636245
| 494
| 3,824
| 4.702429
| 0.244939
| 0.077486
| 0.068876
| 0.041326
| 0.355144
| 0.260439
| 0.191563
| 0.176496
| 0.104176
| 0.104176
| 0
| 0.008872
| 0.263075
| 3,824
| 92
| 118
| 41.565217
| 0.815472
| 0.250523
| 0
| 0.216667
| 0
| 0
| 0.014332
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.116667
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b136b651e1325beb870ea9f5a79512ec242273e
| 80,229
|
py
|
Python
|
common/ui.py
|
Regnareb/StreamManager
|
8b95e785d41c78f03725077f5dce2a5c15e0354f
|
[
"MIT"
] | null | null | null |
common/ui.py
|
Regnareb/StreamManager
|
8b95e785d41c78f03725077f5dce2a5c15e0354f
|
[
"MIT"
] | null | null | null |
common/ui.py
|
Regnareb/StreamManager
|
8b95e785d41c78f03725077f5dce2a5c15e0354f
|
[
"MIT"
] | null | null | null |
import os
import sys
import copy
import ctypes
import socket
import logging
import threading
import functools
import webbrowser
logger = logging.getLogger(__name__)
import keyboard
from PySide2 import QtCore, QtWidgets, QtGui, QtWebEngineWidgets
# TODO
# Be able to import a text file in the description/title as variables (to have counters and currentsong for example)
# Rajouter dans le menu contextuel les variables %CATEGORY% et autres fichiers monitorés
# Pouvoir ajouter un commandbot avec des commandes customs (!game !currentsong)
# Add About and Help menu entries
# Automatically switch scenes in OBS depending of the game played
# Add an XML/EDL file and add each marker created for import into premiere/resolve/FCP
# Change color tray icon to green if update channel with new process or red + toast message if error
# Add trayicons for dropped frames and stream/record states
# Do a notification if the user has not used a streaming process for X minutes if any service is online (to prevent streaming unnoticed)
# Faire un streamdeck customisable qui change automatiquement les touches selon le programme utilisé https://interactjs.io/
# Being able to put it in portrait without changing icons layout
# Add Multi Actions with pause timers
# Create an independant server that scan the foreground process and send it to the receiver, this way multi computer streaming is possible
# websocket plugin ( https://github.com/Elektordi/obs-websocket-py ) Show Scene selector, MIC and DEFAULT volume, RECORD and STREAMING status and STATS
import common.manager
import common.remote
import common.tools
import common.systray
class QLoggerHandler(common.tools.HtmlStreamHandler):
def __init__(self, signal):
super().__init__()
self.signal = signal
def emit(self, record):
message = self.format(record)
self.signal.emit(QtCore.SIGNAL("logMsg(QString)"), message)
class LogPanel(QtWidgets.QDockWidget):
changed_loglevel = QtCore.Signal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setWindowTitle('Logs')
self.setObjectName('docklogs')
self.levels = ['Debug', 'Info', 'Warning', 'Error', 'Critical']
self.interface = {}
self.interface['main'] = QtWidgets.QWidget()
self.interface['layoutv'] = QtWidgets.QVBoxLayout()
self.interface['layouth'] = QtWidgets.QHBoxLayout()
self.interface['label'] = QtWidgets.QLabel('Logs Level:')
self.interface['levels'] = QtWidgets.QComboBox()
self.interface['levels'].insertItems(0, self.levels)
self.interface['levels'].currentIndexChanged.connect(self.changed_loglevel.emit)
self.interface['textedit'] = QtWidgets.QTextBrowser()
self.interface['textedit'].setOpenLinks(False)
self.interface['clear'] = QtWidgets.QPushButton('Clear')
self.interface['clear'].clicked.connect(self.interface['textedit'].clear)
self.interface['layouth'].addStretch()
self.interface['layouth'].addWidget(self.interface['label'])
self.interface['layouth'].addWidget(self.interface['levels'])
self.interface['layouth'].addStretch()
self.interface['layouth'].addWidget(self.interface['clear'])
self.interface['layoutv'].addLayout(self.interface['layouth'])
self.interface['layoutv'].addWidget(self.interface['textedit'])
self.interface['main'].setLayout(self.interface['layoutv'])
self.setWidget(self.interface['main'])
# Use old syntax signals as you can't have multiple inheritance with QObject
self.emitter = QtCore.QObject()
self.connect(self.emitter, QtCore.SIGNAL("logMsg(QString)"), self.interface['textedit'].append)
self.handler = QLoggerHandler(self.emitter)
formatter = logging.Formatter('<span title="line %(lineno)d">%(levelname)s %(name)s.%(funcName)s() - %(message)s</span>')
self.handler.setFormatter(formatter)
logging.getLogger().addHandler(self.handler)
class DialogAddProcess(QtWidgets.QDialog):
def __init__(self, database, parent=None):
super().__init__(parent)
self.completer = QtWidgets.QCompleter(list(database.keys()))
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.linedit = QtWidgets.QLineEdit()
self.linedit.setMinimumWidth(200)
self.linedit.setCompleter(self.completer)
self.buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget(self.linedit)
self.layout.addWidget(self.buttons)
self.setLayout(self.layout)
self.setWindowTitle('Add Game')
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.cancel)
def cancel(self):
self.linedit.setText('')
self.close()
def closeEvent(self, event):
self.cancel()
super().closeEvent(event)
@common.tools.decorate_all_methods(common.tools.catch_exception(logger=logger))
class StreamManager_UI(common.systray.Window):
def __init__(self):
super().__init__()
self.setWindowTitle('Stream Manager')
self.setIcon(QtGui.QIcon('icon.png'))
self.load_stylesheet()
self.setCentralWidget(None)
self.log_panel = LogPanel()
self.log_panel.changed_loglevel.connect(self.set_loglevel)
self.manager = ManagerStreamThread()
self.manager.create_services()
self.manager.createdservices.connect(self.updated)
self.manager.validate.connect(self.update_invalidcategory)
self.manager.updated.connect(self.updated)
self.webremote = WebRemote(self.manager.config['base']['autostart'])
self.webremote.startedcheck.connect(self.start_check)
self.webremote.stoppedcheck.connect(self.stop_check)
self.webremote.start()
self.preferences = Preferences(self.manager, self)
self.preferences.updated.connect(self.preferences_updated)
self.preferences.finished.connect(self.set_shortcuts)
self.create_gamelayout()
self.create_statuslayout()
self.populate_appdata()
self.load_generalsettings()
self.create_menu()
self.setTabPosition(QtCore.Qt.AllDockWidgetAreas, QtWidgets.QTabWidget.North)
self.setDockOptions(QtWidgets.QMainWindow.AllowNestedDocks | QtWidgets.QMainWindow.AllowTabbedDocks)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.log_panel)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.panel_status['dock'])
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.gameslayout['dock'])
self.panel_status['dock'].raise_()
self.setAcceptDrops(True)
self.set_shortcuts(init=True)
self.read_qsettings()
if self.manager.config['base']['starttray']:
self.hide()
else:
self.show()
def set_dockable(self, state=None):
if state==None:
state = self.dockable.isChecked()
for i in [self.log_panel, self.gameslayout['dock'], self.panel_status['dock']]:
dummy = None if state else QtWidgets.QWidget()
i.setTitleBarWidget(dummy)
self.dockable.setChecked(state)
def read_qsettings(self):
self.settings = QtCore.QSettings('regnareb', 'Stream Manager')
if self.settings.value('initialised_once'):
self.restoreGeometry(self.settings.value('geometry'))
self.restoreState(self.settings.value('windowState'))
self.log_panel.interface['levels'].setCurrentIndex(self.log_panel.interface['levels'].findText(self.settings.value('logslevel')))
self.set_loglevel(self.settings.value('logslevel'))
logger.info('Loaded settings from last session.')
self.set_dockable(bool(self.settings.value('dockable')))
else:
self.first_launch()
def first_launch(self):
logger.info('First launch.')
self.set_loglevel('Warning')
self.tabifyDockWidget(self.panel_status['dock'], self.gameslayout['dock'])
self.tabifyDockWidget(self.gameslayout['dock'], self.log_panel)
self.log_panel.hide()
self.preferences.open()
self.preferences.tabs.setCurrentIndex(1)
self.preferences.tabs.tabBar().hide()
self.set_dockable(False)
self.settings.setValue('initialised_once', 1)
def closeEvent(self, event):
if self.trayIcon.isVisible():
if not self.settings.value('showed_quitmessage'):
QtWidgets.QMessageBox.information(self, "Minimise to System Tray", "The program will keep running in the system tray. To terminate the program, choose <b>Quit</b> in the context menu of the system tray icon.")
self.settings.setValue("showed_quitmessage", True)
self.panel_status['webpage'].load(QtCore.QUrl(""))
super().closeEvent(event)
else:
self.quit()
def restore(self):
if self.isHidden():
self.panel_status['webpage'].load(QtCore.QUrl("http://localhost:{}/".format(self.webremote.port)))
super().restore()
def quit(self):
self.manager.quit()
self.webremote.quit()
self.webremote.terminate()
self.settings.setValue("geometry", self.saveGeometry())
self.settings.setValue("windowState", self.saveState())
self.settings.setValue("dockable", self.dockable.isChecked() or '')
self.settings.setValue("logslevel", self.log_panel.interface['levels'].currentText())
if not self.manager.save_config():
msgBox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Can't Save Preferences", "Couldn't save the preferences, you can copy its content in the \"Show Detail\" to try and salvage them, or send it to the developer for debug purposes.")
msgBox.setDetailedText(str(self.manager.config))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Close | QtWidgets.QMessageBox.Cancel)
msgBox.setDefaultButton(QtWidgets.QMessageBox.Close)
ret = msgBox.exec_()
if ret==QtWidgets.QMessageBox.Cancel:
return
super().quit()
def preferences_updated(self):
self.set_shortcuts()
self.manager.process = ''
def load_stylesheet(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'qtstylesheet.css')
with open(path) as f:
stylesheet = f.read()
self.setStyleSheet(stylesheet)
def dropEvent(self, event):
for url in event.mimeData().urls():
self.manager.load_credentials(url.toLocalFile())
def dragEnterEvent(self, event):
event.acceptProposedAction()
def start_check(self):
self.manager.start()
def stop_check(self):
self.manager.quit()
def updated(self, infos=None):
self.reload()
def reload(self):
self.panel_status['webpage'].reload()
def set_loglevel(self, level=''):
block_signals(self.log_panel.interface.values(), True)
if level not in self.log_panel.levels:
level = self.log_panel.interface['levels'].currentText()
self.manager.set_loglevel(level)
self.log_panel.interface['levels'].setCurrentIndex(self.log_panel.interface['levels'].findText(level))
block_signals(self.log_panel.interface.values(), False)
def mouseDoubleClickEvent(self, *args):
pos = self.pos()
geo = self.geometry()
if self.menuBar().isVisible():
self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
else:
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint & ~QtCore.Qt.FramelessWindowHint)
self.show()
self.move(pos)
self.setGeometry(geo)
self.menuBar().setVisible(not self.menuBar().isVisible())
def create_menu(self):
def clipboard():
url = "http://localhost:{}/".format(self.webremote.port)
cb = QtWidgets.QApplication.clipboard()
cb.setText(url, mode=cb.Clipboard)
actionfile = self.menuBar().addMenu('File')
preferences = QtWidgets.QAction('&Preferences', self, triggered=self.preferences.open)
preferences.setMenuRole(QtWidgets.QAction.PreferencesRole)
actionfile.addAction(preferences)
actionfile.addAction(QtWidgets.QAction('&Copy Remote URL', self, triggered=clipboard))
actionfile.addSeparator()
actionfile.addAction(QtWidgets.QAction('&Import Preferences', self, triggered=self.import_settings))
actionfile.addAction(QtWidgets.QAction('&Export Preferences', self, triggered=self.export_settings))
actionfile.addAction(QtWidgets.QAction('&Import Game Database', self, triggered=self.import_database))
actionfile.addAction(QtWidgets.QAction('&Export Game Database', self, triggered=self.export_database))
actionfile.addSeparator()
actionfile.addAction(QtWidgets.QAction('&Quit', self, triggered=self.quit))
actionview = self.menuBar().addMenu('View')
self.dockable = QtWidgets.QAction('Dockable', self, triggered=self.set_dockable)
self.dockable.setCheckable(True)
actionview.addSeparator()
actionview.addAction(self.panel_status['dock'].toggleViewAction())
actionview.addAction(self.gameslayout['dock'].toggleViewAction())
actionview.addAction(self.log_panel.toggleViewAction())
actionview.addSeparator()
actionview.addAction(self.dockable)
actionhelp = self.menuBar().addMenu('Help')
actionhelp.addAction(QtWidgets.QAction('&Homepage', self, triggered=functools.partial(webbrowser.open, 'https://github.com/Regnareb/StreamManager')))
def create_gamelayout(self):
self.gameslayout = {}
self.gameslayout['llayout'] = QtWidgets.QVBoxLayout()
self.gameslayout['table'] = QtWidgets.QTableWidget()
self.gameslayout['table'].setObjectName('table_games')
self.gameslayout['table'].currentCellChanged.connect(self.load_appsettings)
self.gameslayout['table'].itemChanged.connect(self.rename_process)
self.gameslayout['table'].setEditTriggers(QtWidgets.QTableWidget.DoubleClicked)
self.gameslayout['table'].setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.gameslayout['table'].setColumnCount(1)
self.gameslayout['table'].setWordWrap(False)
self.gameslayout['table'].verticalHeader().setVisible(False)
self.gameslayout['table'].setMinimumWidth(200)
header = self.gameslayout['table'].horizontalHeader()
header.setMinimumHeight(40)
header.setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
header.sectionClicked.connect(self.load_generalsettings)
self.gameslayout['table'].setHorizontalHeaderLabels(['GENERAL'])
self.gameslayout['add_process'] = QtWidgets.QPushButton('+')
self.gameslayout['add_process'].setFixedSize(30, 27)
self.gameslayout['add_process'].clicked.connect(self.add_process)
self.gameslayout['remove_process'] = QtWidgets.QPushButton('-')
self.gameslayout['remove_process'].setFixedSize(30, 27)
self.gameslayout['remove_process'].clicked.connect(self.remove_process)
self.gameslayout['addremove_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['addremove_layout'].addWidget(self.gameslayout['add_process'])
self.gameslayout['addremove_layout'].addWidget(self.gameslayout['remove_process'])
self.gameslayout['addremove_layout'].addStretch()
self.gameslayout['llayout'].addWidget(self.gameslayout['table'])
self.gameslayout['llayout'].addLayout(self.gameslayout['addremove_layout'])
self.gameslayout['rlayout'] = QtWidgets.QFormLayout()
self.gameslayout['rlayout'].setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.gameslayout['stacked'] = QtWidgets.QStackedWidget()
self.gameslayout['stacked'].setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.gameslayout['stacked_processpath'] = LineEdit({True: QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.SP_DirIcon)})
self.gameslayout['stacked_processpath'].changeButtonState(True)
self.gameslayout['stacked_processpath'].editingFinished.connect(self.save_appdata)
self.gameslayout['stacked_processpath'].buttonClicked.connect(self.get_processpath)
self.gameslayout['stacked_processpath'].setToolTip('Process Name/Path')
self.gameslayout['stacked_processlayout'] = QtWidgets.QFormLayout()
self.gameslayout['stacked_processlayout'].setRowWrapPolicy(QtWidgets.QFormLayout.WrapAllRows)
self.gameslayout['stacked_processlayout'].addRow('Executable name:', self.gameslayout['stacked_processpath'])
self.gameslayout['stacked_process'] = QtWidgets.QWidget()
self.gameslayout['stacked_processlayout'].setContentsMargins(0, 0, 0, 0)
self.gameslayout['stacked_process'].setLayout(self.gameslayout['stacked_processlayout'])
self.gameslayout['stacked_label'] = QtWidgets.QLabel()
self.gameslayout['stacked_label'].setText('Applied by default for all games if there is no data\nLocks will force this setting no matter what for all games')
self.gameslayout['stacked_label'].setAlignment(QtCore.Qt.AlignCenter)
self.gameslayout['stacked'].addWidget(self.gameslayout['stacked_process'])
self.gameslayout['stacked'].addWidget(self.gameslayout['stacked_label'])
self.gameslayout['rlayout'].addRow(self.gameslayout['stacked'])
self.gameslayout['stacked'].setCurrentWidget(self.gameslayout['stacked_label'])
elements = ['title', 'tags', 'command', 'description']
folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'images'))
icons = {False: QtGui.QIcon(folder + "/unlock.png"), True: QtGui.QIcon(folder + "/lock.png")}
self.gameslayout['category_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['category_layout'].setSpacing(0)
self.gameslayout['category_conflicts'] = QtWidgets.QPushButton('...')
self.gameslayout['category_conflicts'].setStyleSheet('border: 1px solid rgba(0, 0, 0, 50); padding:4px')
self.gameslayout['category_conflicts'].setFixedWidth(self.gameslayout['category_conflicts'].sizeHint().height())
self.gameslayout['category_conflicts'].clicked.connect(self.show_assignations)
self.gameslayout['category'] = LineEdit(icons)
self.gameslayout['category'].setToolTip('Category')
self.gameslayout['category'].editingFinished.connect(functools.partial(self.save_appdata, validate=True))
self.completer = QtWidgets.QCompleter(list(self.manager.database.keys()))
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.gameslayout['category'].setCompleter(self.completer)
self.gameslayout['category_layout'].addWidget(self.gameslayout['category_conflicts'])
self.gameslayout['category_layout'].addWidget(self.gameslayout['category'])
self.gameslayout['rlayout'].addRow('Category:', self.gameslayout['category_layout'])
for key in elements:
self.gameslayout[key] = LineEdit(icons)
self.gameslayout[key].setMinimumHeight(30)
self.gameslayout[key].editingFinished.connect(self.save_appdata)
s = self.gameslayout[key].sizePolicy()
s.setRetainSizeWhenHidden(True)
self.gameslayout[key].setSizePolicy(s)
self.gameslayout[key].setSizePolicy(QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed))
self.gameslayout['rlayout'].addRow(key.title() + ':', self.gameslayout[key])
self.gameslayout[key].setToolTip(key.title())
self.gameslayout['rlayout'].labelForField(self.gameslayout['description']).setText('Game Description <span style="color:grey;">(!game)</span>:')
self.gameslayout['rlayout'].labelForField(self.gameslayout['command']).setText('Command to execute:')
self.gameslayout['container_llayout'] = QtWidgets.QWidget()
self.gameslayout['container_llayout'].setLayout(self.gameslayout['llayout'])
self.gameslayout['container_rlayout'] = QtWidgets.QWidget()
self.gameslayout['container_rlayout'].setLayout(self.gameslayout['rlayout'])
self.gameslayout['dock'] = QtWidgets.QDockWidget('Games')
self.gameslayout['dock'].setObjectName('dockgames')
self.gameslayout['dock_layout'] = QtWidgets.QHBoxLayout()
self.gameslayout['main'] = QtWidgets.QSplitter()
self.gameslayout['main'].addWidget(self.gameslayout['container_llayout'])
self.gameslayout['main'].addWidget(self.gameslayout['container_rlayout'])
self.gameslayout['main'].setStretchFactor(0, 0)
self.gameslayout['main'].setStretchFactor(1, 1)
self.gameslayout['main'].setCollapsible(0, 0)
self.gameslayout['main'].setCollapsible(1, 0)
self.gameslayout['main'].addWidget(self.gameslayout['container_rlayout'])
self.gameslayout['dock'].setWidget(self.gameslayout['main'])
def create_filedialog(self, action='open'):
if action == 'open':
path, _filters = QtWidgets.QFileDialog.getOpenFileName()
elif action == 'save':
path, _filters = QtWidgets.QFileDialog.getSaveFileName()
return path
def get_processpath(self, *args):
path = self.create_filedialog()
if path:
self.gameslayout['stacked_processpath'].setText(path)
def add_process(self):
self.nodal = DialogAddProcess(self.manager.database)
self.nodal.exec_()
name = self.nodal.linedit.text()
if name:
row = self.create_gamerow(name)
index = self.gameslayout['table'].indexFromItem(row)
self.gameslayout['table'].setCurrentIndex(index)
if not self.rename_process():
self.gameslayout['table'].removeRow(index.row())
self.load_appsettings()
def rename_process(self, *args):
current = self.gameslayout['table'].currentItem()
new = current.text()
old = current._process
if not new:
current.setText(old)
return None
if self.manager.config['appdata'].get(new, ''):
msgBox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, "That Process Already Exists", 'The process "{}" already exists, are you sure you want to do that?\nIt will replace the old settings with the current ones.'.format(new))
msgBox.setStandardButtons(QtWidgets.QMessageBox.Ok | QtWidgets.QMessageBox.Cancel)
logger.warning('The same process is already registered: {}'.format(new))
ret = msgBox.exec_()
if ret == QtWidgets.QMessageBox.Ok:
# Delete the old data and replace with current
item = [i for i in self.gameslayout['table'].findItems(new, QtCore.Qt.MatchExactly) if i is not current][0]
index = self.gameslayout['table'].indexFromItem(item)
self.gameslayout['table'].removeRow(index.row())
currentindex = self.gameslayout['table'].indexFromItem(current)
self.gameslayout['table'].setCurrentIndex(currentindex)
else:
# Return to the previous name
current.setText(old)
return None
self.manager.rename_process(old, new)
current._process = new
self.gameslayout['table'].sortByColumn(0, QtCore.Qt.AscendingOrder)
return True
def remove_process(self):
current = self.gameslayout['table'].currentItem()
if current:
self.manager.remove_process(current.text())
self.gameslayout['table'].removeRow(self.gameslayout['table'].currentRow())
def import_settings(self):
path = self.create_filedialog(action='open')
if path:
self.manager.load_config(path, backup=False)
def export_settings(self):
path = self.create_filedialog(action='save')
if path:
self.manager.save_config(path)
def import_database(self):
path = self.create_filedialog(action='open')
if path:
self.manager.import_database(path)
def export_database(self):
path = self.create_filedialog(action='save')
if path:
self.manager.export_database(path)
def save_appdata(self, validate=False):
current = self.gameslayout['table'].currentItem()
cat = self.gameslayout['category'].text()
title = self.gameslayout['title'].text()
description = self.gameslayout['description'].text()
tags = self.gameslayout['tags'].text().split(',')
command = self.gameslayout['command'].text()
tags = [i.strip() for i in tags if i]
data = {'category': cat, 'title': title, 'tags': tags, 'description': description, 'command': command}
if validate:
self.manager.config['assignations'] = self.manager.validate_assignations(self.manager.config['assignations'], cat)
if current and current.text():
self.manager.config['appdata'][current.text()].update(data)
self.manager.config['appdata'][current.text()]['path'][sys.platform] = self.gameslayout['stacked_processpath'].text()
self.update_gamerow(current)
elif not current:
for key in data.copy():
data['forced_' + key] = self.gameslayout[key].button.state
self.manager.config['base'].update(data)
self.manager.process = '' # Reset current process to be able to apply new settings
logger.debug(data)
def show_assignations(self):
category = self.gameslayout['category'].text()
self.preferences.open()
self.preferences.tabs.setCurrentIndex(2)
self.preferences.tabs.tabBar().hide()
if category:
index = self.preferences.tab_assignations.interface['processes'].findText(category)
self.preferences.tab_assignations.interface['processes'].setCurrentIndex(index)
def update_invalidcategory(self, category):
if self.manager.is_validcategories(category):
self.gameslayout['category_conflicts'].setStyleSheet('background: rgba(0, 0, 0, 15)')
elif category == self.gameslayout['category'].text():
self.gameslayout['category_conflicts'].setStyleSheet('background: rgba(255, 0, 0, 255)')
current = self.gameslayout['table'].currentItem()
if current:
self.update_gamerow(current)
def update_gamerow(self, row):
if row.text():
category = self.manager.config['appdata'].get(row.text(), {}).get('category', '')
self.gameslayout['table'].blockSignals(True)
if self.manager.is_validcategories(category):
row.setBackground(QtGui.QBrush())
else:
row.setBackground(QtGui.QColor(255,0,0))
self.gameslayout['table'].blockSignals(False)
def create_gamerow(self, process=''):
self.gameslayout['table'].blockSignals(True)
self.gameslayout['table'].itemChanged.disconnect(self.rename_process) # QtBug workaround because the signal itemChanged is not blocked
row = QtWidgets.QTableWidgetItem()
row.setText(process)
row._process = process
self.update_gamerow(row)
rowcount = self.gameslayout['table'].rowCount()
self.gameslayout['table'].insertRow(rowcount)
self.gameslayout['table'].setItem(rowcount, 0, row)
self.gameslayout['table'].itemChanged.connect(self.rename_process)
self.gameslayout['table'].blockSignals(False)
return row
def populate_appdata(self):
for process in self.manager.config['appdata']:
self.create_gamerow(process)
self.gameslayout['table'].sortByColumn(0, QtCore.Qt.AscendingOrder)
def load_appsettings(self, *args):
block_signals(self.gameslayout.values(), True)
current = self.gameslayout['table'].currentItem()
if current:
process = current.text()
self.gameslayout['stacked'].setCurrentWidget(self.gameslayout['stacked_process'])
val = self.manager.config['appdata'].get(process, {})
finalvals = self.manager.get_informations(process)
self.gameslayout['stacked_processpath'].setText(val.get('path', {}).get(sys.platform, ''))
self.gameslayout['category'].setText(val.get('category'))
self.gameslayout['title'].setText(val.get('title'))
self.gameslayout['description'].setText(val.get('description'))
self.gameslayout['tags'].setText(', '.join(val.get('tags', [])))
self.gameslayout['command'].setText(val.get('command'))
self.gameslayout['title'].setPlaceholderText(finalvals.get('title'))
self.gameslayout['category'].setPlaceholderText(finalvals.get('category'))
self.gameslayout['tags'].setPlaceholderText(', '.join(finalvals.get('tags')))
self.gameslayout['description'].setPlaceholderText(finalvals.get('description'))
self.gameslayout['command'].setPlaceholderText(finalvals.get('command'))
self.gameslayout['title'].setButtonVisibility(False)
self.gameslayout['category'].setButtonVisibility(False)
self.gameslayout['command'].setButtonVisibility(False)
self.gameslayout['description'].setButtonVisibility(False)
self.gameslayout['tags'].setButtonVisibility(False)
self.gameslayout['remove_process'].setEnabled(True)
self.update_invalidcategory(val.get('category'))
block_signals(self.gameslayout.values(), False)
def load_generalsettings(self, *args):
block_signals(self.gameslayout.values(), True)
self.gameslayout['table'].clearSelection()
self.gameslayout['table'].setCurrentCell(-1, -1)
self.gameslayout['stacked'].setCurrentWidget(self.gameslayout['stacked_label'])
val = self.manager.config['base']
elements = ['category', 'title', 'tags', 'description', 'command']
for key in elements:
self.gameslayout[key].setPlaceholderText('')
self.gameslayout['category'].setText(val.get('category'))
self.gameslayout['title'].setText(val.get('title'))
self.gameslayout['description'].setText(val.get('description'))
self.gameslayout['tags'].setText(','.join(val.get('tags', [])))
self.gameslayout['command'].setText(val.get('command'))
self.gameslayout['title'].setButtonVisibility(True)
self.gameslayout['category'].setButtonVisibility(True)
self.gameslayout['command'].setButtonVisibility(True)
self.gameslayout['description'].setButtonVisibility(True)
self.gameslayout['tags'].setButtonVisibility(True)
self.gameslayout['title'].changeButtonState(val.get('forced_title', ''))
self.gameslayout['category'].changeButtonState(val.get('forced_category', ''))
self.gameslayout['command'].changeButtonState(val.get('forced_command', ''))
self.gameslayout['description'].changeButtonState(val.get('forced_description', ''))
self.gameslayout['tags'].changeButtonState(val.get('forced_tags', []))
self.gameslayout['remove_process'].setEnabled(False)
self.update_invalidcategory(val.get('category'))
block_signals(self.gameslayout.values(), False)
def set_shortcuts(self, init=False):
if init:
QtWidgets.QShortcut(QtGui.QKeySequence("F11"), self, self.mouseDoubleClickEvent)
QtWidgets.QShortcut(QtGui.QKeySequence("F5"), self, self.reload)
keyboard.add_hotkey(self.manager.config['shortcuts']['create_clip'], self.manager.create_clip)
keyboard.add_hotkey(self.manager.config['shortcuts']['create_marker'], self.manager.create_marker)
def create_statuslayout(self):
self.panel_status = {}
self.panel_status['dock'] = QtWidgets.QDockWidget('Status')
self.panel_status['dock'].setObjectName('dockstatus')
self.panel_status['webpage'] = QtWebEngineWidgets.QWebEngineView()
self.panel_status['webpage'].setAcceptDrops(False)
self.panel_status['webpage'].page().profile().clearHttpCache()
self.panel_status['webpage'].load(QtCore.QUrl("http://localhost:{}/".format(self.webremote.port)))
self.panel_status['dock'].setWidget(self.panel_status['webpage'])
def block_signals(iterable, block):
for i in iterable:
i.blockSignals(block)
class Preferences(QtWidgets.QDialog):
updated = QtCore.Signal()
finished = QtCore.Signal()
def __init__(self, manager, parent=None):
super().__init__(parent)
self.tabs = QtWidgets.QTabWidget()
self.tab_general = Preferences_General(manager)
self.tab_streams = Preferences_Streams(manager)
self.tab_assignations = Preferences_Assignations(manager)
self.tab_pauseprocesses = Preferences_Pauseprocesses(manager)
self.tab_pauseservices = Preferences_Pauseservices(manager)
self.tabs.addTab(self.tab_general, "General")
self.tabs.addTab(self.tab_streams, "Streams Services")
self.tabs.addTab(self.tab_assignations, "Game Assignations")
self.tabs.addTab(self.tab_pauseprocesses, "Pause Processes")
if sys.platform == 'win32':
self.tabs.addTab(self.tab_pauseservices, "Pause Windows Services")
self.buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.cancel)
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.addWidget(self.tabs)
self.mainLayout.addWidget(self.buttons)
self.setLayout(self.mainLayout)
self.setWindowTitle('Preferences')
def reset(self):
self.tabs.tabBar().show()
self.tab_general.reset()
self.tab_streams.reset()
self.tab_pauseservices.reset()
self.tab_pauseprocesses.reset()
self.tab_assignations.reset()
def accept(self):
self.tab_general.accept()
self.tab_streams.accept()
self.tab_pauseservices.accept()
self.tab_pauseprocesses.accept()
self.tab_assignations.accept()
self.updated.emit()
super().accept()
def cancel(self):
self.finished.emit()
self.reject()
def closeEvent(self, event):
self.cancel()
super().closeEvent(event)
def open(self):
keyboard.unhook_all()
self.reset()
super().open()
class Preferences_General(QtWidgets.QWidget):
def __init__(self, manager, parent=None):
super().__init__(parent)
self.manager = manager
self.interface = {}
self.interface['layout'] = QtWidgets.QFormLayout()
self.interface['autostart'] = QtWidgets.QCheckBox()
self.interface['starttray'] = QtWidgets.QCheckBox()
self.interface['checktimer'] = QtWidgets.QSpinBox()
self.interface['reload'] = QtWidgets.QSpinBox()
self.interface['timeout'] = QtWidgets.QSpinBox()
self.interface['port'] = QtWidgets.QSpinBox()
self.interface['label_autostart'] = QtWidgets.QLabel('Automatically start the check')
self.interface['label_starttray'] = QtWidgets.QLabel('Automatically start minimised to the tray icon')
self.interface['label_checktimer'] = QtWidgets.QLabel('Check the foreground process every (x) seconds')
self.interface['label_reload'] = QtWidgets.QLabel('Reload the status webpage every (x) minutes')
self.interface['label_timeout'] = QtWidgets.QLabel('Number of seconds before the token creation timeouts')
self.interface['label_port'] = QtWidgets.QLabel('Port to use for the webremote (needs a restart)')
self.interface['checktimer'].setMinimum(1)
self.interface['reload'].setMinimum(5)
self.interface['timeout'].setMinimum(1)
self.interface['port'].setMinimum(1025)
self.interface['port'].setMaximum(65535)
self.interface['label_autostart'].setMinimumHeight(30)
self.interface['label_starttray'].setMinimumHeight(30)
self.interface['label_checktimer'].setMinimumHeight(30)
self.interface['label_reload'].setMinimumHeight(30)
self.interface['label_timeout'].setMinimumHeight(30)
self.interface['label_port'].setMinimumHeight(30)
self.interface['autostart'].setMinimumHeight(30)
self.interface['starttray'].setMinimumHeight(30)
self.interface['checktimer'].setMinimumHeight(30)
self.interface['reload'].setMinimumHeight(30)
self.interface['timeout'].setMinimumHeight(30)
self.interface['port'].setMinimumHeight(30)
self.interface['line'] = QtWidgets.QFrame()
self.interface['line'].setObjectName('stream_line')
self.interface['line'].setFrameShape(QtWidgets.QFrame.HLine)
self.interface['label_createclip'] = QtWidgets.QLabel('Create Clip')
self.interface['shortcut_createclip'] = KeySequenceRecorder('')
self.interface['label_createclip'].setMinimumHeight(30)
self.interface['shortcut_createclip'].setMinimumHeight(30)
self.interface['label_createmarker'] = QtWidgets.QLabel('Create Marker')
self.interface['shortcut_createmarker'] = KeySequenceRecorder('')
self.interface['label_createmarker'].setMinimumHeight(30)
self.interface['shortcut_createmarker'].setMinimumHeight(30)
self.interface['layout'].addRow(self.interface['label_autostart'], self.interface['autostart'])
self.interface['layout'].addRow(self.interface['label_starttray'], self.interface['starttray'])
self.interface['layout'].addRow(self.interface['label_checktimer'], self.interface['checktimer'])
self.interface['layout'].addRow(self.interface['label_reload'], self.interface['reload'])
self.interface['layout'].addRow(self.interface['label_timeout'], self.interface['timeout'])
self.interface['layout'].addRow(self.interface['label_port'], self.interface['port'])
self.interface['layout'].addRow(self.interface['line'])
self.interface['layout'].addRow(self.interface['label_createclip'], self.interface['shortcut_createclip'])
self.interface['layout'].addRow(self.interface['label_createmarker'], self.interface['shortcut_createmarker'])
self.setLayout(self.interface['layout'])
def accept(self):
self.manager.config['base']['checktimer'] = self.interface['checktimer'].text()
self.manager.config['base']['autostart'] = self.interface['autostart'].isChecked()
self.manager.config['base']['starttray'] = self.interface['starttray'].isChecked()
self.manager.config['base']['reload'] = self.interface['reload'].text()
self.manager.config['base']['timeout'] = self.interface['timeout'].text()
self.manager.config['base']['port'] = self.interface['port'].text()
self.manager.config['shortcuts']['create_clip'] = self.interface['shortcut_createclip'].text()
self.manager.config['shortcuts']['create_marker'] = self.interface['shortcut_createmarker'].text()
socket.setdefaulttimeout(int(self.manager.config['base']['timeout']))
def reset(self):
self.interface['checktimer'].setValue(int(self.manager.config['base']['checktimer']))
self.interface['autostart'].setChecked(self.manager.config['base']['autostart'])
self.interface['starttray'].setChecked(self.manager.config['base']['starttray'])
self.interface['reload'].setValue(int(self.manager.config['base']['reload']))
self.interface['timeout'].setValue(int(self.manager.config['base']['timeout']))
self.interface['port'].setValue(int(self.manager.config['base']['port']))
self.interface['shortcut_createclip'].setText(self.manager.config['shortcuts']['create_clip'])
self.interface['shortcut_createmarker'].setText(self.manager.config['shortcuts']['create_marker'])
class Preferences_Assignations(QtWidgets.QDialog):
def __init__(self, manager, parent=None):
super().__init__(parent)
self.manager = manager
self.interface = {}
self.interface['layout'] = QtWidgets.QVBoxLayout()
self.interface['label'] = QtWidgets.QLabel('Some stream services do not use the same name for the same activity. You can match the category for each services.\nFor example Youtube has only "Gaming" and no specific game in its database.')
self.interface['label'].setAlignment(QtCore.Qt.AlignCenter)
self.interface['hlayout'] = QtWidgets.QHBoxLayout()
self.interface['processes'] = QtWidgets.QComboBox()
self.interface['validate'] = QtWidgets.QPushButton('Check All')
self.interface['processes'].setFixedHeight(27)
self.interface['validate'].setFixedHeight(27)
self.interface['validate'].clicked.connect(self.validate)
self.interface['table'] = QtWidgets.QTableWidget()
self.interface['table'].horizontalHeader().setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.interface['table'].verticalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Fixed)
self.interface['table'].horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.interface['table'].setWordWrap(True)
self.interface['hlayout'].addWidget(self.interface['processes'])
self.interface['hlayout'].addWidget(self.interface['validate'])
self.interface['layout'].addWidget(self.interface['label'])
self.interface['layout'].addLayout(self.interface['hlayout'])
self.interface['layout'].addWidget(self.interface['table'])
self.servicesorder = sorted(common.manager.SERVICES)
self.setLayout(self.interface['layout'])
self.set_layoutvertical()
def set_layoutvertical(self):
self.interface['processes'].show()
self.interface['processes'].currentIndexChanged.connect(self.populate)
self.interface['table'].insertColumn(0)
for service in self.servicesorder:
rowcount = self.interface['table'].rowCount()
self.interface['table'].insertRow(rowcount)
widget = QtWidgets.QLineEdit()
widget.editingFinished.connect(functools.partial(self.save_assignation, service))
widget.textEdited.connect(functools.partial(self.edited, widget, service))
self.interface['table'].setCellWidget(rowcount, 0, widget)
if not common.manager.SERVICES[service].Main.features['category']:
widget.setDisabled(True)
self.interface['line_' + service] = widget
self.interface['table'].setVerticalHeaderLabels(self.servicesorder)
self.interface['table'].horizontalHeader().setVisible(False)
def edited(self, widget, service, text):
# Add a QTimer to prevent lag
service = self.manager.services.get(service)
if service:
autocompletion = service.query_category(text)
self.interface['completer'] = QtWidgets.QCompleter(list(autocompletion.keys()))
self.interface['completer'].setCompletionMode(QtWidgets.QCompleter.UnfilteredPopupCompletion)
self.interface['completer'].activated.connect(functools.partial(self.set_validautocomplete, service.name)) # If activated() then validated automatically
widget.setCompleter(self.interface['completer'])
def set_validautocomplete(self, service, text):
"""Force validation of the current category and service."""
current = self.interface['processes'].currentText()
self.temporary_settings.setdefault(current, {}).setdefault(service, {})
self.temporary_settings[current][service] = {'name': text, 'valid': True}
self.populate()
def validate(self, category=None):
if category:
category = self.interface['processes'].currentText()
self.temporary_settings = self.manager.validate_assignations(self.temporary_settings, category)
self.populate()
def populate(self):
block_signals(self.interface.values(), True)
current = self.interface['processes'].currentText()
for index, service in enumerate(self.servicesorder):
text = self.temporary_settings.get(current, {}).get(service, {}).get('name', '')
valid = self.temporary_settings.get(current, {}).get(service, {}).get('valid', None)
disabled = not common.manager.SERVICES[service].Main.features['category']
widget = self.interface['line_' + service]
widget.setText(text if not disabled else '')
if disabled:
widget.setStyleSheet('background-color:#efefef;border: transparent')
elif valid is None:
widget.setStyleSheet('background-color:#bbdefb;border: transparent')
elif not valid:
widget.setStyleSheet('background-color:#faa;border: transparent')
else:
widget.setStyleSheet('background-color:transparent')
block_signals(self.interface.values(), False)
def save_assignation(self, service):
category = self.interface['processes'].currentText()
widget = self.interface['line_' + service]
current = widget.text()
old = self.temporary_settings.get(category, {}).get(service, {}).get('name', '')
if category and current != old:
self.temporary_settings.setdefault(category, {}).setdefault(service, {})
self.temporary_settings[category][service] = {'name': current, 'valid': ''}
self.validate(category)
def accept(self):
assignations = self.manager.validate_assignations(self.temporary_settings)
self.manager.config['assignations'] = assignations
def reset(self):
block_signals(self.interface.values(), True)
self.temporary_settings = copy.deepcopy(self.manager.config['assignations'])
self.interface['processes'].clear()
categories = [i['category'] for i in self.manager.config['appdata'].values()]
self.interface['processes'].insertItems(0, sorted(categories))
self.populate()
block_signals(self.interface.values(), False)
class Preferences_Streams(QtWidgets.QWidget):
def __init__(self, manager, parent=None):
# add get token button
super().__init__(parent)
self.manager = manager
self.panel_services = {}
self.panel_services['container'] = QtWidgets.QGridLayout()
self.panel_services['llayout'] = QtWidgets.QVBoxLayout()
self.panel_services['list'] = QtWidgets.QTableWidget()
self.panel_services['list'].setObjectName('table_services')
self.panel_services['list'].setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.panel_services['list'].setColumnCount(1)
self.panel_services['list'].setWordWrap(False)
self.panel_services['list'].verticalHeader().setVisible(False)
self.panel_services['list'].verticalHeader().setDefaultSectionSize(40)
self.panel_services['list'].horizontalHeader().setVisible(False)
self.panel_services['list'].horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.panel_services['list'].currentCellChanged.connect(self.service_changed)
self.panel_services['list'].setFixedWidth(150)
self.panel_services['llayout'].addWidget(self.panel_services['list'])
self.panel_services['settings_formlayout'] = QtWidgets.QFormLayout()
self.panel_services['label_delay'] = QtWidgets.QLabel('Delay before Clip/Marker Creation')
self.panel_services['label_delay'].setToolTip('Useful when you stream with a delay timer, the clip will then be synced accordingly.')
self.panel_services['label_delay'].setMinimumHeight(30)
self.panel_services['line_delay'] = QtWidgets.QSpinBox()
self.panel_services['line_delay'].setToolTip('Useful when you stream with a delay timer, the clip will then be synced accordingly.')
self.panel_services['line_delay'].setMinimum(0)
self.panel_services['line_delay'].editingFinished.connect(functools.partial(self.save_servicedata, 'delay'))
self.panel_services['line_delay'].setMinimumHeight(30)
self.panel_services['settings_formlayout'].addRow(self.panel_services['label_delay'], self.panel_services['line_delay'])
self.elements = ['enabled', 'scope', 'redirect_uri', 'authorization_base_url', 'token_url', 'client_id', 'client_secret']
self.panel_services['advanced_settings_formlayout'] = QtWidgets.QFormLayout()
for elem in self.elements[1:]:
namelabel = 'label_' + elem
nameline = 'line_' + elem
self.panel_services[namelabel] = QtWidgets.QLabel(elem.replace('_', ' ').capitalize())
if elem in ['client_id', 'client_secret']:
self.panel_services[nameline] = LineditSpoiler()
self.panel_services[nameline].setProperty('mandatory', True)
else:
self.panel_services[nameline] = QtWidgets.QLineEdit()
self.panel_services[nameline].editingFinished.connect(functools.partial(self.save_servicedata, elem))
self.panel_services['advanced_settings_formlayout'].addRow(self.panel_services[namelabel], self.panel_services[nameline])
self.panel_services[namelabel].setObjectName(namelabel)
self.panel_services['label_client_id'].setTextFormat(QtCore.Qt.RichText)
self.panel_services['label_client_id'].setOpenExternalLinks(True)
self.panel_services['collapsible'] = CollapsibleBox("Advanced Settings")
self.panel_services['collapsible'].setContentLayout(self.panel_services['advanced_settings_formlayout'])
self.panel_services['collapsible_layout'] = QtWidgets.QVBoxLayout()
self.panel_services['collapsible_layout'].addWidget(self.panel_services['collapsible'])
self.panel_services['collapsible_layout'].addStretch()
self.panel_services['settings'] = QtWidgets.QVBoxLayout()
self.panel_services['settings'].addLayout(self.panel_services['settings_formlayout'])
self.panel_services['settings'].addLayout(self.panel_services['collapsible_layout'])
self.panel_services['label_enabled'] = QtWidgets.QLabel('Enabled')
self.panel_services['line_enabled'] = QtWidgets.QPushButton()
self.panel_services['line_enabled'].setCheckable(True)
self.panel_services['line_enabled'].setFixedWidth(71)
self.panel_services['line_enabled'].setObjectName('enable_service')
self.panel_services['line_enabled'].clicked.connect(functools.partial(self.save_servicedata, 'enabled'))
self.panel_services['label_enabled'].setMinimumHeight(30)
self.panel_services['line_enabled'].setMinimumHeight(30)
self.panel_services['reset_token'] = QtWidgets.QPushButton('Reset Auth')
self.panel_services['reset_token'].clicked.connect(self.reset_token)
self.panel_services['reset_token'].setMinimumHeight(30)
self.panel_services['hlayout'] = QtWidgets.QHBoxLayout()
self.panel_services['hlayout'].addWidget(self.panel_services['label_enabled'])
self.panel_services['hlayout'].addWidget(self.panel_services['line_enabled'])
self.panel_services['hlayout'].addStretch()
self.panel_services['hlayout'].addWidget(self.panel_services['reset_token'])
self.panel_services['line'] = QtWidgets.QFrame()
# self.panel_services['line'].setMinimumHeight(30)
self.panel_services['line'].setObjectName('stream_line')
self.panel_services['line'].setFrameShape(QtWidgets.QFrame.HLine)
self.panel_services['features_layout'] = QtWidgets.QVBoxLayout()
self.panel_services['features_layout'].setSpacing(0)
self.panel_services['label_features'] = QtWidgets.QLabel('Features')
self.panel_services['label_features'].setDisabled(True)
features = list(common.manager.SERVICES['Facebook'].Main.features.keys())
for feat in features:
name = 'feature_' + feat
self.panel_services[name] = QtWidgets.QLabel(feat)
self.panel_services[name].setAlignment(QtCore.Qt.AlignCenter)
self.panel_services[name].setObjectName('features')
self.panel_services['features_layout'].addWidget(self.panel_services['feature_' + feat])
if feat == features[0]:
updateStyle(self.panel_services[name], 'firstv', True)
elif feat == features[-1]:
updateStyle(self.panel_services[name], 'lastv', True)
self.panel_services['container'].addLayout(self.panel_services['llayout'], 0, 0, -1, 1)
self.panel_services['container'].addLayout(self.panel_services['hlayout'], 0, 1, 1, -1)
self.panel_services['container'].addWidget(self.panel_services['line'], 1, 1, 1, -1)
self.panel_services['container'].addLayout(self.panel_services['features_layout'], 3, 1)
self.panel_services['container'].addLayout(self.panel_services['settings'], 3, 3, -1, 1)
self.panel_services['container'].setRowStretch(self.panel_services['container'].rowCount(), 1)
self.setLayout(self.panel_services['container'])
self.panel_services['list'].itemSelectionChanged.connect(self.service_changed)
def paintEvent(self, paintEvent):
item = self.panel_services['list'].currentItem()
service = item.text()
imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'images', service + '.png'))
if os.path.isfile(imgpath):
pixmap = QtGui.QPixmap()
pixmap.load(imgpath)
widWidth = self.width()
widHeight = self.height()
pixmap = pixmap.scaled(10, widHeight, QtCore.Qt.KeepAspectRatioByExpanding)
paint = QtGui.QPainter(self)
paint.setOpacity(0.3)
paint.drawPixmap(widWidth-pixmap.width()*0.8, -pixmap.height()*0.2, pixmap)
def create_servicesrows(self):
self.panel_services['list'].blockSignals(True)
while self.panel_services['list'].rowCount():
self.panel_services['list'].removeRow(0)
for service in common.manager.SERVICES:
row = StreamTableWidgetItem(service)
rowcount = self.panel_services['list'].rowCount()
self.panel_services['list'].insertRow(rowcount)
self.panel_services['list'].setItem(rowcount, 0, row)
row.set_disabledrowstyle(self.temporary_settings[service].get('enabled', False))
self.panel_services['list'].setCurrentCell(rowcount, 0)
if self.temporary_settings[service].get('enabled', False):
self.service_changed()
if not self.check_service():
logger.error("The service {} is activated in the settings but it couldn't be created".format(service))
self.panel_services['list'].sortItems(QtCore.Qt.AscendingOrder)
self.panel_services['list'].blockSignals(False)
def service_changed(self):
block_signals(self.panel_services.values(), True)
item = self.panel_services['list'].currentItem()
service = item.text()
config = self.temporary_settings[service]
for elem in self.elements:
if elem == 'enabled':
val = config.get(elem, False)
self.panel_services['line_' + elem].setChecked(val)
item.set_disabledrowstyle(val)
else:
self.panel_services['line_' + elem].setText(str(config.get(elem, '')))
self.panel_services['label_client_id'].setText('Client id (<a href="{}">?</a>)'.format(common.manager.SERVICES[service].Main.devurl))
features = common.manager.SERVICES[service].Main.features
for feat, state in features.items():
updateStyle(self.panel_services['feature_' + feat], 'available', state)
if not features['clips']:
self.panel_services['label_delay'].hide()
self.panel_services['line_delay'].hide()
else:
self.panel_services['label_delay'].show()
self.panel_services['line_delay'].show()
self.panel_services['line_delay'].setValue(int(config.get('delay', 0)))
self.repaint()
block_signals(self.panel_services.values(), False)
def check_service(self):
item = self.panel_services['list'].currentItem()
service = item.text()
state = self.panel_services['line_enabled'].isChecked()
if state:
service = self.manager.create_service(service, self.temporary_settings[service], force=True)
if service:
self.temporary_settings[service.name] = service.config # Save access token
return True
if not service:
self.panel_services['line_enabled'].setChecked(False)
self.save_servicedata('enabled')
QtWidgets.QToolTip().showText(self.panel_services['line_enabled'].mapToGlobal(QtCore.QPoint(0, 20)), "<nobr>Couldn't create the service.</nobr><br><nobr>Check your <b style='color:red'>client id</b> and <b style='color:red'>client secret</b> below.</nobr> <br><br>The quota API for this service may have been reached and can't be used anymore for some time.", msecDisplayTime=10000)
return False
def save_servicedata(self, element):
item = self.panel_services['list'].currentItem()
service = item.text()
if element == 'delay':
self.temporary_settings[service][element] = self.panel_services['line_delay'].text()
return
if element == 'enabled':
result = self.panel_services['line_enabled'].isChecked()
else:
result = self.panel_services['line_' + element].text()
if self.temporary_settings[service][element] != result:
self.temporary_settings[service][element] = result
if element != 'enabled':
self.reset_token()
self.check_service()
item.set_disabledrowstyle(self.temporary_settings[service]['enabled'])
def reset_token(self):
service = self.panel_services['list'].currentItem().text()
self.temporary_settings[service]['authorization'] = {}
self.check_service()
def accept(self):
for service in self.temporary_settings:
self.manager.config['streamservices'][service] = self.temporary_settings[service]
self.manager.services = {}
self.manager.create_services()
def reset(self):
self.temporary_settings = copy.deepcopy(self.manager.config['streamservices'])
self.create_servicesrows()
self.panel_services['list'].setCurrentCell(0, 0)
class StreamTableWidgetItem(QtWidgets.QTableWidgetItem):
def __init__(self, service):
super().__init__()
self.service = service
imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'theme', 'images', self.service + '.png'))
self.setIcon(QtGui.QPixmap(imgpath))
self.setText(self.service)
self.setFlags(self.flags() & ~QtCore.Qt.ItemIsEditable)
def set_disabledrowstyle(self, val):
if val:
color = QtGui.QColor.fromRgbF(0.282, 0.855, 0.255, 1)
self.setForeground(QtGui.QColor(0, 0, 0))
else:
color = QtGui.QColor.fromRgbF(1, 0, 0, 1)
self.setForeground(QtGui.QColor(150, 150, 150))
gradient = QtGui.QRadialGradient(130, 20, 5, 120, 20)
gradient.setColorAt(0, color)
gradient.setColorAt(0.8, color)
gradient.setColorAt(1, QtGui.QColor.fromRgbF(0, 0, 0, 0))
self.setBackground(QtGui.QBrush(gradient))
class Preferences_Pause(QtWidgets.QWidget):
def __init__(self, manager, name, parent=None):
super().__init__(parent)
self.manager = manager
self.config = self.manager.config['base'][name]
self.panel_pause = {}
self.panel_pause['container'] = QtWidgets.QGridLayout()
self.panel_pause['label'] = QtWidgets.QLabel('When you click "START" any entry on the right side will be paused until the button "STOP" is pressed again.<br/>Usefull for automatically pausing applications that use bandwith or CPU.')
self.panel_pause['label'].setAlignment(QtCore.Qt.AlignCenter)
for elem in ['list', 'list_pause']:
self.panel_pause[elem] = QtWidgets.QTableWidget()
self.panel_pause[elem].setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.panel_pause[elem].setColumnCount(1)
self.panel_pause[elem].setWordWrap(False)
self.panel_pause[elem].verticalHeader().setVisible(False)
self.panel_pause[elem].horizontalHeader().setVisible(False)
self.panel_pause[elem].horizontalHeader().setSectionResizeMode(0, QtWidgets.QHeaderView.Stretch)
self.panel_pause['refresh'] = QtWidgets.QPushButton('🔃')
self.panel_pause['add'] = QtWidgets.QPushButton('→')
self.panel_pause['remove'] = QtWidgets.QPushButton('←')
self.panel_pause['refresh'].setFlat(True)
self.panel_pause['add'].setFlat(True)
self.panel_pause['remove'].setFlat(True)
self.panel_pause['refresh'].clicked.connect(self.populate_pauseprocess)
self.panel_pause['add'].clicked.connect(functools.partial(self.transfer_pauseprocess, 'add'))
self.panel_pause['remove'].clicked.connect(functools.partial(self.transfer_pauseprocess, 'remove'))
self.panel_pause['addremove_widget'] = QtWidgets.QWidget()
self.panel_pause['addremove_layout'] = QtWidgets.QVBoxLayout()
self.panel_pause['addremove_layout'].addWidget(self.panel_pause['refresh'])
self.panel_pause['addremove_layout'].addStretch()
self.panel_pause['addremove_layout'].addWidget(self.panel_pause['add'])
self.panel_pause['addremove_layout'].addWidget(self.panel_pause['remove'])
self.panel_pause['addremove_layout'].addStretch()
self.panel_pause['addremove_widget'].setLayout(self.panel_pause['addremove_layout'])
self.setLayout(self.panel_pause['container'])
self.panel_pause['container'].addWidget(self.panel_pause['label'], 0, 0, 1, -1)
self.panel_pause['container'].addWidget(self.panel_pause['list'], 1, 0, -1, 1)
self.panel_pause['container'].addWidget(self.panel_pause['addremove_widget'], 1, 1, -1, 1)
self.panel_pause['container'].addWidget(self.panel_pause['list_pause'], 1, 2, -1, 1)
def populate_pauseprocess(self):
while self.panel_pause['list'].rowCount():
self.panel_pause['list'].removeRow(0)
while self.panel_pause['list_pause'].rowCount():
self.panel_pause['list_pause'].removeRow(0)
self.currentprocesses = self.list_processes()
def insertrow(name, destination):
row = QtWidgets.QTableWidgetItem()
row.setText(name)
rowcount = destination.rowCount()
destination.insertRow(rowcount)
destination.setItem(rowcount, 0, row)
done = []
for service in self.currentprocesses.values():
if service['name'] in self.currentconfig:
insertrow(service['name'], self.panel_pause['list_pause'])
else:
insertrow(service['name'], self.panel_pause['list'])
done.append(service['name'])
for process in self.currentconfig:
if process not in done:
insertrow(process, self.panel_pause['list_pause'])
self.panel_pause['list'].sortByColumn(0, QtCore.Qt.AscendingOrder)
self.panel_pause['list_pause'].sortByColumn(0, QtCore.Qt.AscendingOrder)
def transfer_pauseprocess(self, operation):
if operation == 'add':
source = self.panel_pause['list']
destination = self.panel_pause['list_pause']
else:
source = self.panel_pause['list_pause']
destination = self.panel_pause['list']
item = source.currentItem()
if item:
item = item.text()
row = QtWidgets.QTableWidgetItem()
row.setText(item)
rowcount = destination.rowCount()
source.removeRow(source.currentRow())
destination.insertRow(rowcount)
destination.setItem(rowcount, 0, row)
self.panel_pause['list'].sortByColumn(0, QtCore.Qt.AscendingOrder)
self.panel_pause['list_pause'].sortByColumn(0, QtCore.Qt.AscendingOrder)
if operation == 'add':
self.currentconfig.append(item)
else:
self.currentconfig.remove(item)
def list_processes(self):
return {}
def accept(self):
rowdata = []
for row in range(self.panel_pause['list_pause'].rowCount()):
item = self.panel_pause['list_pause'].item(row, 0)
rowdata.append(item.text())
self.config.clear()
[self.config.append(i) for i in rowdata]
def reset(self):
self.currentconfig = self.config.copy()
self.populate_pauseprocess()
class Preferences_Pauseservices(Preferences_Pause):
def __init__(self, manager, parent=None):
super().__init__(manager, 'services', parent)
sizepolicy = self.panel_pause['refresh'].sizePolicy()
sizepolicy.setRetainSizeWhenHidden(True)
self.panel_pause['refresh'].setSizePolicy(sizepolicy)
self.panel_pause['refresh'].hide()
self.hasoverlay = False
if sys.platform == 'win32':
if not os.path.isfile('lib/pssuspend.exe'):
self.show_overlay()
admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if not admin:
self.panel_pause['label'].setText(self.panel_pause['label'].text() + '<br><b style="color:red">Requires Admin Rights!</b> Unless you gave access to services management to your account <a href="https://www.coretechnologies.com/products/ServiceSecurityEditor/">(?)</a>')
self.panel_pause['label'].setOpenExternalLinks(True)
def disable_all(self):
for i in self.panel_pause.values():
try:
i.setDisabled(True)
except AttributeError:
pass
def list_processes(self):
return common.tools.listservices()
def populate_pauseprocess(self):
super().populate_pauseprocess()
for service in self.currentprocesses.values():
try:
item = self.panel_pause['list'].findItems(service['name'], QtCore.Qt.MatchExactly)[0]
except IndexError:
item = self.panel_pause['list_pause'].findItems(service['name'], QtCore.Qt.MatchExactly)[0]
tooltip = '{} ({})\n\n{}'.format(service['display_name'], service['status'].upper(), service['description'].replace('. ', '.\n'))
item.setToolTip(tooltip.strip())
def resizeEvent(self, event):
if self.hasoverlay:
self.overlay.move(0, 0)
self.overlay.resize(self.width(), self.height())
def show_overlay(self):
self.overlay = OverlayWidget(text='This requires admin rights and the external tool pssuspend.exe from Microsoft. Due to licences limitation it must be downloaded separately.\nEverything is automated and the file weight only 3Mo.\nDo you want to download it now?', buttontext='Download', parent=self)
self.overlay.move(0, 0)
self.overlay.resize(self.width(), self.height())
self.overlay.clicked.connect(self.download_pssuspend)
self.hasoverlay = True
self.overlay.show()
def download_pssuspend(self):
if common.tools.download_pssuspend('lib'):
self.close_overlay()
else:
self.overlay.label.setText(self.overlay.text + '\nThere was a problem during the download of the file')
def close_overlay(self):
self.overlay.close()
self.hasoverlay = False
class OverlayWidget(QtWidgets.QWidget):
clicked = QtCore.Signal()
def __init__(self, text, buttontext, parent=None):
super().__init__(parent)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.text = text
self.fillColor = QtGui.QColor(30, 30, 30, 200)
self.fillColor = QtWidgets.QWidget().palette().color(QtWidgets.QWidget().backgroundRole())
self.layout = QtWidgets.QVBoxLayout(self)
self.label = QtWidgets.QLabel(self)
self.label.setWordWrap(True)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setText(self.text)
self.button = QtWidgets.QPushButton(self)
self.button.setText(buttontext)
self.button.clicked.connect(self._clicked)
self.layout.addStretch()
self.layout.addWidget(self.label)
self.layout.addWidget(self.button)
self.layout.addStretch()
def paintEvent(self, event):
s = self.size()
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing, True)
qp.setBrush(self.fillColor)
qp.drawRect(0, 0, s.width(), s.height())
def _clicked(self):
self.clicked.emit()
class Preferences_Pauseprocesses(Preferences_Pause):
def __init__(self, manager, parent=None):
super().__init__(manager, 'processes', parent)
def list_processes(self):
return common.tools.listprocesses()
def populate_pauseprocess(self):
super().populate_pauseprocess()
for process in self.currentprocesses.values():
try:
name = process['name']
item = self.panel_pause['list'].findItems(name, QtCore.Qt.MatchExactly)[0]
except IndexError:
item = self.panel_pause['list_pause'].findItems(name, QtCore.Qt.MatchExactly)[0]
tooltip = '{0} ({1:.2f}% RAM)\n{2}'.format(name, process['memory_percent'], process['exe'])
item.setToolTip(tooltip.strip())
class WebRemote(common.remote.WebRemote, QtCore.QThread):
startedcheck = QtCore.Signal()
stoppedcheck = QtCore.Signal()
def __init__(self, autostart=True):
super().__init__()
self.running = autostart
def start_check(self):
self.startedcheck.emit()
def stop_check(self):
self.stoppedcheck.emit()
def run(self):
if self.running:
self.start_check()
self.server()
self.exec_()
class ManagerStreamThread(common.manager.ManageStream, QtCore.QThread):
validate = QtCore.Signal(str)
updated = QtCore.Signal(dict)
createdservices = QtCore.Signal()
def run(self):
with common.tools.pause_processes(self.config['base']['processes']):
with common.tools.pause_services(self.config['base']['services']):
self.create_services()
self.checktimer = QtCore.QTimer()
self.checktimer.timeout.connect(self.main)
self.checktimer.start(int(self.config['base']['checktimer']) * 1000)
self.exec_()
def main(self):
self.create_commandbots()
result = self.check_application()
if result:
self.updated.emit(result)
logger.info(result)
def create_services(self):
super().create_services()
self.createdservices.emit()
# @common.tools.threaded
def validate_assignations(self, config, category=None):
result = super().validate_assignations(config, category)
if category:
self.validate.emit(category)
return result
def load_credentials(self, path=''):
if not super().load_credentials(path):
QtWidgets.QMessageBox.warning(None, "Can't Load Credentials File", "The JSON file must be wrong, check your file with a text editor or the person who sent it to you.", QtWidgets.QMessageBox.StandardButton.Ok)
def load_config(self, path='', backup=True):
if super().load_config(path, backup) == False:
msg ="The JSON file must be wrong, check your file with a text editor or validator."
if backup:
msg += "The preferences have been reset, the old preferences are still available at this path:\n{}".format(self.config_filepath+'_error')
msgBox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Can't Load Preference File", msg)
msgBox.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)
msgBox.exec_()
class StateButtons():
buttonClicked = QtCore.Signal(bool)
def __init__(self, icons, parent=None):
super().__init__(parent)
self.button = QtWidgets.QToolButton(self)
self.button.state = None
self.button.icons = icons
self.button.setStyleSheet('border: none; padding: 0px;')
self.button.setCursor(QtCore.Qt.PointingHandCursor)
self.button.clicked.connect(functools.partial(self.changeButtonState))
self.setButtonVisibility(True)
def setButtonVisibility(self, state):
frameWidth = self.style().pixelMetric(QtWidgets.QStyle.PM_DefaultFrameWidth)
buttonSize = self.button.sizeHint()
if state:
self.button.show()
self.setStyleSheet('padding-right: %dpx;' % (buttonSize.width() + frameWidth + 1))
self.setMinimumSize(max(self.minimumSizeHint().width(), buttonSize.width() + frameWidth*2 + 2),
max(self.minimumSizeHint().height(), buttonSize.height() + frameWidth*2 + 2))
else:
self.button.hide()
self.setStyleSheet('padding-right: 0px;')
def changeButtonState(self, state=None):
if state == None:
try:
keys = list(self.button.icons.keys())
i = keys.index(self.button.state)
self.button.state = keys[i+1]
except (ValueError, IndexError):
self.button.state = keys[0]
else:
self.button.state = state
self.button.setIcon(self.button.icons[self.button.state])
self.buttonClicked.emit(self.button.state)
self.editingFinished.emit()
def resizeEvent(self, event):
buttonSize = self.button.sizeHint()
frameWidth = self.style().pixelMetric(QtWidgets.QStyle.PM_DefaultFrameWidth)
self.button.move(self.rect().right() - frameWidth - buttonSize.width(),
(self.rect().bottom() - buttonSize.height() + 1)/2)
super().resizeEvent(event)
class CollapsibleBox(QtWidgets.QWidget):
def __init__(self, title="", parent=None):
super().__init__(parent)
self.toggle_button = QtWidgets.QToolButton(text=title, checkable=True, checked=False)
self.toggle_button.setMinimumHeight(30)
self.toggle_button.setStyleSheet("QToolButton { border: none; }")
self.toggle_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toggle_button.setCheckable(True)
self.toggle_button.setArrowType(QtCore.Qt.RightArrow)
self.toggle_button.pressed.connect(self.on_pressed)
self.content_area = QtWidgets.QScrollArea(maximumHeight=0, minimumHeight=0)
# self.content_area.setFrameShape(QtWidgets.QFrame.NoFrame)
lay = QtWidgets.QVBoxLayout(self)
lay.setSpacing(0)
lay.setContentsMargins(0, 0, 0, 0)
lay.addWidget(self.toggle_button)
lay.addWidget(self.content_area)
self.toggle_animation = QtCore.QParallelAnimationGroup(self)
self.toggle_animation.addAnimation(QtCore.QPropertyAnimation(self, b"minimumHeight"))
self.toggle_animation.addAnimation(QtCore.QPropertyAnimation(self, b"maximumHeight"))
self.toggle_animation.addAnimation(QtCore.QPropertyAnimation(self.content_area, b"maximumHeight"))
@QtCore.Slot()
def on_pressed(self):
checked = self.toggle_button.isChecked()
self.toggle_button.setArrowType(QtCore.Qt.DownArrow if not checked else QtCore.Qt.RightArrow)
self.toggle_animation.setDirection(QtCore.QAbstractAnimation.Backward if not checked else QtCore.QAbstractAnimation.Forward)
self.toggle_animation.start()
def setContentLayout(self, layout):
lay = self.content_area.layout()
del lay
self.content_area.setLayout(layout)
collapsed_height = (self.sizeHint().height() - self.content_area.maximumHeight())
content_height = layout.sizeHint().height()
for i in range(self.toggle_animation.animationCount()):
animation = self.toggle_animation.animationAt(i)
animation.setDuration(0)
animation.setStartValue(collapsed_height + content_height)
animation.setEndValue(collapsed_height)
content_animation = self.toggle_animation.animationAt(self.toggle_animation.animationCount() - 1)
content_animation.setDuration(0)
content_animation.setStartValue(content_height)
content_animation.setEndValue(0)
self.toggle_animation.start()
class PlainTextEdit(StateButtons, QtWidgets.QPlainTextEdit):
editingFinished = QtCore.Signal()
def focusOutEvent(self, event):
super().focusOutEvent(event)
self.editingFinished.emit()
class LineEdit(StateButtons, QtWidgets.QLineEdit):
pass
class LineditSpoiler(QtWidgets.QLineEdit):
def __init__(self, blurAmount=10, parent=None):
super().__init__(parent=parent)
self.blurAmount = blurAmount
self.effect = QtWidgets.QGraphicsBlurEffect(self)
self.effect.setBlurRadius(blurAmount)
self.setGraphicsEffect(self.effect)
def enterEvent(self, event):
self.effect.setBlurRadius(0)
super().enterEvent(event)
def leaveEvent(self, event):
self.effect.setBlurRadius(self.blurAmount)
super().leaveEvent(event)
class KeySequenceRecorder(QtWidgets.QLineEdit):
def __init__(self, keySequence, parent=None):
super().__init__(parent)
self.setKeySequence(keySequence)
def setKeySequence(self, keySequence):
try:
self.keySequence = keySequence.toString(QtGui.QKeySequence.NativeText)
except AttributeError:
self.keySequence = keySequence
self.setText(self.keySequence)
def keyPressEvent(self, e):
if e.type() == QtCore.QEvent.KeyPress:
key = e.key()
if key == QtCore.Qt.Key_unknown:
logger.warning('Unknown key for shortcut')
return
if(key == QtCore.Qt.Key_Control or
key == QtCore.Qt.Key_Shift or
key == QtCore.Qt.Key_Alt or
key == QtCore.Qt.Key_Meta):
return
modifiers = e.modifiers()
if modifiers & QtCore.Qt.ShiftModifier:
key += QtCore.Qt.SHIFT
if modifiers & QtCore.Qt.ControlModifier:
key += QtCore.Qt.CTRL
if modifiers & QtCore.Qt.AltModifier:
key += QtCore.Qt.ALT
if modifiers & QtCore.Qt.MetaModifier:
key += QtCore.Qt.META
self.setKeySequence(QtGui.QKeySequence(key))
def updateStyle(obj, name, value):
obj.setProperty(name, value)
obj.setStyle(obj.style())
| 50.874445
| 398
| 0.672114
| 8,503
| 80,229
| 6.232389
| 0.122192
| 0.037023
| 0.042344
| 0.011492
| 0.401857
| 0.271087
| 0.194192
| 0.1366
| 0.09569
| 0.075971
| 0
| 0.005079
| 0.19751
| 80,229
| 1,576
| 399
| 50.906726
| 0.81798
| 0.022785
| 0
| 0.192082
| 0
| 0.007331
| 0.132297
| 0.008256
| 0
| 0
| 0
| 0.000635
| 0
| 1
| 0.08871
| false
| 0.001466
| 0.014663
| 0.002199
| 0.13783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1766db2c0ad0a27e5899ec9658c4cad1b1b54e
| 8,242
|
py
|
Python
|
map.py
|
BenoitCorsini/world-flights
|
5e5ce6575a912cb6a71bf1caf6ef7c2d388044ce
|
[
"MIT"
] | null | null | null |
map.py
|
BenoitCorsini/world-flights
|
5e5ce6575a912cb6a71bf1caf6ef7c2d388044ce
|
[
"MIT"
] | null | null | null |
map.py
|
BenoitCorsini/world-flights
|
5e5ce6575a912cb6a71bf1caf6ef7c2d388044ce
|
[
"MIT"
] | null | null | null |
import os
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Polygon, Rectangle
from config import PARAMS
class WorldMap(object):
def __init__(self, shapes=[], params=PARAMS):
'''
The 'WorldMap' class is useful in constructing a 3D figure of the world map
and contains basic function to normalize and project map coordinates.
The list 'shapes' is a list of list of coordinates, as represented below
#################################################################################
## ##
## shapes = [ ##
## points_1 = [(longitude_1, latitude_1), (longitude_2, latitude_2), ...], ##
## points_2 = [(longitude_1, latitude_1), (longitude_2, latitude_2), ...], ##
## ... ##
## ] ##
## ##
#################################################################################
'''
self.shapes = shapes
self.params = params
self.globe = None # a globe useful to clip the figures
@staticmethod
def normalize_angle(angle):
'''
Normalizes any angle to be in [-180,180).
'''
while angle >= 180:
angle -= 360
while angle < -180:
angle += 360
assert (angle >= -180) & (angle < 180) # checking that 'angle' is well-normalized
return angle
@staticmethod
def project(coord, angle=0, turn=0, flip=False, r=1, away=10):
'''
Projects the coordinates on the 3D map.
'turn' is useful for coordinates partly at the left/right end of the other side of the globe.
'away' is useful to avoid having non-desired lines on the map.
'''
x, y = coord
y = y*np.pi/180
x = x - angle + turn*360
unseen = False # if the coordinates are on the other side of the globe
pos_x = r*np.sin(x*np.pi/180)*np.cos(y)
pos_y = r*np.sin(y)
d = pos_x**2 + pos_y**2
if (x > 90) & (d <= 1):
pos_x = away*r*np.cos(y)
pos_y *= away
unseen = True
elif (x < -90) & (d <= 1):
pos_x = - away*r*np.cos(y)
pos_y *= away
unseen = True
if flip:
pos_x = - pos_x
return (pos_x, pos_y), unseen
def set_figure(self):
'''
Resets the figure.
'''
if hasattr(self, 'fig'):
plt.close('all')
# creating the general figure
self.fig, self.ax = plt.subplots(figsize=[self.params['figure']['size']]*2)
self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
self.ax.set_axis_off()
extra = 1 + self.params['figure']['extra_space']
self.ax.set_xlim(-extra, extra)
self.ax.set_ylim(-extra, extra)
if self.params['figure']['background'] is not None:
self.ax.add_patch(Rectangle(
xy=(-2*extra, -2*extra),
width=4*extra,
height=4*extra,
color=self.params['figure']['background'],
zorder=self.params['zorder']['background']
))
def plot_globe(self, angle=0):
'''
Plots the globe and its shade as viewed from 'angle'.
'''
angle = self.normalize_angle(angle)
self.globe = Circle(
xy=(0, 0),
radius=1,
color=self.params['globe']['water_colour'],
zorder=self.params['zorder']['water'],
lw=0,
)
self.ax.add_patch(self.globe)
for shape in self.shapes:
for turn in [-1, 0, 1]: # to cover for the boundary problems
points, unseen = zip(*[self.project(point, angle, turn) for point in shape])
if not all(unseen):
# the border of the land
self.ax.add_patch(Polygon(
xy=points,
color=self.params['globe']['border_colour'],
zorder=self.params['zorder']['land_border'],
lw=self.params['globe']['border'],
clip_path=self.globe,
joinstyle='round',
))
# the main land
self.ax.add_patch(Polygon(
xy=points,
color=self.params['globe']['land_colour'],
zorder=self.params['zorder']['land'],
lw=0,
clip_path=self.globe,
))
# plotting the shade
self.plot_shade(angle)
def plot_shade(self, angle=0):
'''
Plots the shaded version of the globe.
'''
angle = self.normalize_angle(angle + self.params['shade']['angle'])
# general transformation applied on the shade
transform = self.ax.transData.get_affine()
x_shift = transform.get_matrix()[0,2]
y_shift = transform.get_matrix()[1,2]
x_scale = transform.get_matrix()[0,0]
y_scale = transform.get_matrix()[1,1]
transform.set_matrix(np.diag(np.diag(transform.get_matrix()))) # only keep the diagonal
transform.scale(
self.params['shade']['ratio']*self.params['shade']['scale'],
self.params['shade']['scale']
)
transform.rotate_deg(self.params['shade']['rotation'])
transform.translate(
x_shift + x_scale*self.params['shade']['x_pos'],
y_shift - y_scale + y_scale*self.params['shade']['y_pos']
)
# plotting the shaded world sphere
self.ax.add_patch(Circle(
xy=(0, 0),
radius=1,
color=self.params['shade']['water_colour'],
zorder=self.params['zorder']['shade_water'],
alpha=self.params['shade']['alpha'],
transform=transform,
lw=0,
))
for shape in self.shapes:
for turn in [-1, 0, 1]: # to cover for the boundary problems
points, unseen = zip(*[self.project(point, angle, turn, flip=True, away=1) for point in shape])
if not all(unseen):
self.ax.add_patch(Polygon(
xy=points,
color=self.params['shade']['land_colour'],
zorder=self.params['zorder']['shade_land'],
alpha=self.params['shade']['alpha'],
transform=transform,
lw=0,
))
def savefig(self, name='map', folder='.', title=''):
'''
Saves the current state of the figure.
'''
assert hasattr(self, 'fig')
if not osp.exists(folder):
os.makedirs(folder)
# adds a title when available
if title:
bbox = {
'boxstyle' : 'round',
'edgecolor' : self.params['text']['colour'],
'facecolor' : self.params['text']['background'],
'linewidth' : self.params['text']['border'],
}
self.ax.text(
- 1 - self.params['figure']['extra_space'] + self.params['text']['x'],
- 1 - self.params['figure']['extra_space'] + self.params['text']['y'],
title,
fontsize=self.params['text']['fontsize'],
color=self.params['text']['colour'],
#fontweight='demibold',
bbox=bbox,
)
self.fig.savefig(osp.join(folder, name + '.png'), transparent=True)
def plot(self, name='map', folder='.', title='', angle=0):
'''
Plots the world globe.
'''
self.set_figure()
self.plot_globe(angle)
self.savefig(name, folder, title)
| 36.631111
| 111
| 0.473065
| 888
| 8,242
| 4.302928
| 0.228604
| 0.091599
| 0.043182
| 0.021984
| 0.326354
| 0.291808
| 0.217482
| 0.209369
| 0.1743
| 0.112013
| 0
| 0.019242
| 0.382067
| 8,242
| 225
| 112
| 36.631111
| 0.731003
| 0.219122
| 0
| 0.25
| 0
| 0
| 0.0815
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 1
| 0.054054
| false
| 0
| 0.040541
| 0
| 0.114865
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1897d255b0413c9d4325f9c12538b29485ce83
| 2,197
|
py
|
Python
|
chapter15/cache_aside/populate_db.py
|
JoeanAmiee/Mastering-Python-Design-Patterns-Second-Edition
|
89c55dcf5e1e0e730dde593b487050f360371932
|
[
"MIT"
] | 278
|
2018-08-16T12:59:24.000Z
|
2022-03-21T08:21:11.000Z
|
chapter15/cache_aside/populate_db.py
|
50611/Mastering-Python-Design-Patterns-Second-Edition
|
6efc4a935f15d2aa6c840131f72fb8c53a493a93
|
[
"MIT"
] | 4
|
2019-05-16T11:44:45.000Z
|
2022-02-04T07:24:47.000Z
|
chapter15/cache_aside/populate_db.py
|
50611/Mastering-Python-Design-Patterns-Second-Edition
|
6efc4a935f15d2aa6c840131f72fb8c53a493a93
|
[
"MIT"
] | 166
|
2018-08-13T21:47:16.000Z
|
2022-03-18T12:20:31.000Z
|
import sys
import sqlite3
import csv
from random import randint
from faker import Faker
fake = Faker()
def setup_db():
try:
db = sqlite3.connect('data/quotes.sqlite3')
# Get a cursor object
cursor = db.cursor()
cursor.execute('''
CREATE TABLE quotes(id INTEGER PRIMARY KEY, text TEXT)
''')
db.commit()
except Exception as e:
print(e)
finally:
db.close()
def add_quotes(quotes_list):
quotes = []
try:
db = sqlite3.connect('data/quotes.sqlite3')
cursor = db.cursor()
quotes = []
for quote_text in quotes_list:
quote_id = randint(1, 100)
quote = (quote_id, quote_text)
try:
cursor.execute('''INSERT INTO quotes(id, text) VALUES(?, ?)''', quote)
quotes.append(quote)
except Exception as e:
print(f"Error with quote id {quote_id}: {e}")
db.commit()
except Exception as e:
print(e)
finally:
db.close()
return quotes
def main():
args = sys.argv
if args[1] == 'init':
setup_db()
elif args[1] == 'update_db_and_cache':
quotes_list = [fake.sentence() for _ in range(1, 11)]
quotes = add_quotes(quotes_list)
print("New (fake) quotes added to the database:")
for q in quotes:
print(f"Added to DB: {q}")
# Populate the cache with this content
with open('data/quotes_cache.csv', "a", newline="") as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=['id', 'text'],
delimiter=";")
for q in quotes:
print(f"Adding '{q[1]}' to cache")
writer.writerow({'id': str(q[0]), 'text': q[1]})
elif args[1] == 'update_db_only':
quotes_list = [fake.sentence() for _ in range(1, 11)]
quotes = add_quotes(quotes_list)
print("New (fake) quotes added to the database ONLY:")
for q in quotes:
print(f"Added to DB: {q}")
if __name__ == "__main__":
main()
| 25.252874
| 86
| 0.522531
| 266
| 2,197
| 4.18797
| 0.319549
| 0.05386
| 0.045781
| 0.048474
| 0.437163
| 0.385996
| 0.369838
| 0.305206
| 0.305206
| 0.305206
| 0
| 0.014851
| 0.356395
| 2,197
| 86
| 87
| 25.546512
| 0.772984
| 0.025489
| 0
| 0.460317
| 0
| 0
| 0.192236
| 0.009822
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.079365
| 0
| 0.142857
| 0.126984
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1a17df754eb85c581497a16047422be041c22f
| 523
|
py
|
Python
|
amulet/world_interface/chunk/interfaces/leveldb/leveldb_13/leveldb_13_interface.py
|
Podshot/Amulet-Core
|
678a722daa5e4487d193a7e947ccceacac325fd2
|
[
"MIT"
] | null | null | null |
amulet/world_interface/chunk/interfaces/leveldb/leveldb_13/leveldb_13_interface.py
|
Podshot/Amulet-Core
|
678a722daa5e4487d193a7e947ccceacac325fd2
|
[
"MIT"
] | null | null | null |
amulet/world_interface/chunk/interfaces/leveldb/leveldb_13/leveldb_13_interface.py
|
Podshot/Amulet-Core
|
678a722daa5e4487d193a7e947ccceacac325fd2
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from amulet.world_interface.chunk.interfaces.leveldb.leveldb_12.leveldb_12_interface import (
LevelDB12Interface,
)
class LevelDB13Interface(LevelDB12Interface):
def __init__(self):
LevelDB12Interface.__init__(self)
self.features["chunk_version"] = 13
@staticmethod
def is_valid(key):
if key[0] != "leveldb":
return False
if key[1] != 13:
return False
return True
INTERFACE_CLASS = LevelDB13Interface
| 21.791667
| 93
| 0.6826
| 54
| 523
| 6.259259
| 0.555556
| 0.053254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050251
| 0.239006
| 523
| 23
| 94
| 22.73913
| 0.798995
| 0
| 0
| 0.125
| 0
| 0
| 0.038241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1b507d5e7bd884f752c61b8ba7c52263c2268a
| 921
|
py
|
Python
|
oarepo_model_builder/builders/jsonschema_builder.py
|
mesemus/oarepo-model-builder
|
3dd9cc3db887c67f7b58281faae65c8162b0651e
|
[
"MIT"
] | null | null | null |
oarepo_model_builder/builders/jsonschema_builder.py
|
mesemus/oarepo-model-builder
|
3dd9cc3db887c67f7b58281faae65c8162b0651e
|
[
"MIT"
] | null | null | null |
oarepo_model_builder/builders/jsonschema_builder.py
|
mesemus/oarepo-model-builder
|
3dd9cc3db887c67f7b58281faae65c8162b0651e
|
[
"MIT"
] | null | null | null |
from oarepo_model_builder.builders.json import JSONBuilder
from oarepo_model_builder.output import JsonSchemaOutput
class JSONSchemaBuilder(JSONBuilder):
"""Handles building of jsonschema from a data model specification."""
def __init__(self):
super().__init__()
self.output = None
def pre(self, el, config, path, outputs):
if not path:
output = outputs['jsonschema'] = JsonSchemaOutput("TODO")
self.stack[0] = output.data
else:
path_skipped = path[-1].startswith('oarepo:')
if path_skipped:
self.push(self.IGNORED_SUBTREE, path)
elif isinstance(el, dict):
self.push({}, path)
elif isinstance(el, (list, tuple)):
self.push([], path)
else:
self.push(el, path)
def post(self, el, config, path, outputs):
self.pop()
| 32.892857
| 73
| 0.587405
| 100
| 921
| 5.26
| 0.48
| 0.060837
| 0.057034
| 0.08365
| 0.087452
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003115
| 0.302932
| 921
| 27
| 74
| 34.111111
| 0.816199
| 0.068404
| 0
| 0.090909
| 0
| 0
| 0.024648
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1e6350105907d7f3eb0e342a99233ff398a655
| 10,305
|
py
|
Python
|
benchmark/automated_agents_selenium/exatag_labels_agent.py
|
MedTAG/medtag-core
|
f2dae7b38230179d71babede7e4910631d91053f
|
[
"MIT"
] | 6
|
2021-12-20T12:15:17.000Z
|
2022-02-02T15:28:42.000Z
|
benchmark/automated_agents_selenium/exatag_labels_agent.py
|
MedTAG/medtag-core
|
f2dae7b38230179d71babede7e4910631d91053f
|
[
"MIT"
] | 1
|
2022-03-07T14:57:44.000Z
|
2022-03-11T18:11:55.000Z
|
benchmark/automated_agents_selenium/exatag_labels_agent.py
|
MedTAG/medtag-core
|
f2dae7b38230179d71babede7e4910631d91053f
|
[
"MIT"
] | 2
|
2021-05-29T09:44:38.000Z
|
2021-12-28T03:53:40.000Z
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import psycopg2
import time
import statistics
from selenium.webdriver.support.select import Select
import json
def wait_until_unchecked(driver,nums_3):
inp = driver.find_elements_by_xpath('//input[@name="labels"]')
count = 0
for el in nums_3:
if inp[el].is_selected() == False:
count = count +1
if count == len(nums_3):
return inp
else:
return False
def login(driver):
username = "selenium_test"
password = "selenium"
driver.get("http://examode.dei.unipd.it/exatag/")
driver.find_element_by_id("inputUsername").send_keys(username)
driver.find_element_by_id("inputPassword").send_keys(password)
driver.find_element_by_xpath('//button[text()="Log In"]').click()
try:
ele = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//button[text()="Start"]'))
)
all_spans = driver.find_elements_by_xpath("//div[@class='selection css-2b097c-container']")
for element in all_spans:
element.click()
if all_spans.index(element) == 0:
driver.find_element_by_xpath('//div[text()="English"]').click()
elif all_spans.index(element) == 1:
driver.find_element_by_xpath('//div[text()="Colon"]').click()
else:
driver.find_element_by_xpath('//div[text()="AOEC"]').click()
ele.click()
ele1 = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//button[text()="Labels"]'))
)
ele1.click()
except Exception as e:
print('ERROR')
print(e)
return False
else:
# print('ok')
return True
def exatag_lab_test(driver):
f = open('../datasets/labels/labels.json','r')
reports1 = json.load(f)
reports = []
for key in reports1.keys():
label = reports1[key]
reports.append(label)
try:
count = 0
nums = []
while count < 100:
labs = reports[count]
nums_1 = []
for cop in labs:
if cop == 'Cancer':
nums_1.append(0)
elif cop == 'Adenomatous polyp - high grade dysplasia':
nums_1.append(1)
elif cop == 'Adenomatous polyp - low grade dysplasia':
nums_1.append(2)
elif cop == 'Hyperplastic polyp':
nums_1.append(3)
elif cop == 'Non-informative':
nums_1.append(4)
nums.append(nums_1)
count = count+1
# print(str(count))
# print(str(labs))
# print('\n')
count = 0
testo = ''
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//div[@class="container_list"]'))
)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//div[@id="report_sel"]'))
)
inp = driver.find_elements_by_xpath('//input[@name="labels"]')
start = time.time()
click = 0
while count < 100:
# time.sleep(0.02)
# if count > 0:
# selected_option = select.first_selected_option
# if (selected_option.get_attribute('value') == str(count)):
time.sleep(0.02)
testo_rep = driver.find_element_by_xpath('//div[@id="report_sel"]')
if (testo != testo_rep.text):
testo = testo_rep.text
nums_3 = []
nums_2 = nums[count]
# if count>0:
# nums_3 = nums[count-1]
sel = False
while sel == False:
ss = 0
for el in range(len(inp)):
if inp[el].is_selected() == False:
ss = ss + 1
else:
break
if ss == len(inp):
sel = True
if sel:
for el in nums_2:
inp[el].click()
click = click+1
# time.sleep(0.02)
driver.find_element_by_xpath('//button[@id="but_sx"]').click()
click = click+1
time.sleep(0.2)
# time.sleep(0.02)
count = count + 1
end = time.time()
tot = end - start
print('tot: '+str(tot))
print('click: '+str(click))
for i in range(100):
driver.find_element_by_xpath('//button[@id="but_dx"]').click()
time.sleep(0.3)
WebDriverWait(driver, 10).until(
EC.element_to_be_clickable((By.XPATH, '//button[text()="Clear"]'))
).click()
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//button[text()="Yes"]'))
).click()
time.sleep(0.3)
return tot
except Exception as e:
print('ERROR')
print(e)
return False
# else:
# # print('ok')
# # driver.quit()
# cursor.execute('SELECT gt_json FROM ground_truth_log_file WHERE username = %s ORDER BY insertion_time ASC',
# ['selenium_test'])
# ans = cursor.fetchall()
# if len(ans) != len(reports):
# st = 'A groundtruth is missing'
# return st
# count = 0
# while count < 100:
# # report = json.dump(reports[count])
# labs_john = reports[count]['labels']
# nums = []
# json_el = ans[count][0]
#
#
# for cop in labs_john:
# nums.append(int(cop['seq_number']))
#
# labs_sel = json_el['labels']
# for cop in labs_sel:
# # print(cop['seq_number'])
# # print(nums)
# # print('\n')
# if cop['seq_number'] not in nums:
# stringa = str(count) + ' : ' + str(cop) + ' is missing.'
# return stringa
# # cursor.execute('SELECT gt_json FROM ground_truth_log_file WHERE username = %s ORDER BY insertion_time ASC',['selenium_test'])
# # ans = cursor.fetchall()
# # for el in ans:
# # json_el = el[0]
# # lab = json_el['labels']
# # for cop in lab:
# # print(cop['seq_number'])
# # print(nums)
# # print('\n')
# # if cop['seq_number'] not in nums:
# # stringa = str(count) + ' : ' + str(cop) + ' is missing.'
# # return stringa
# count = count+1
# return tot
# except (Exception, psycopg2.Error) as e:
# print(e)
#
#
# finally:
# # closing database connection.
# if (connection):
# cursor.close()
# connection.close()
if __name__ == '__main__':
exec_path = "" # INSERT HERE THE PATH TO THE DRIVER
driver = webdriver.Chrome(executable_path=exec_path)
data = []
timer = 0
try:
c = 0
log_in = login(driver)
if log_in:
while c < 40:
time.sleep(2)
print(str(c))
# connection = psycopg2.connect(dbname="groundtruthdb", user="ims", password="grace.period", host="localhost",
# port="5444")
#
# cursor = connection.cursor()
# cursor.execute('SELECT COUNT(*) FROM associate where username = %s;',['selenium_test'])
# ans = cursor.fetchone()[0]
# if(ans == 100):
# cursor.execute('DELETE FROM associate where username = %s;',['selenium_test'])
# connection.commit()
#
# cursor.execute('SELECT COUNT(*) FROM ground_truth_log_file where username = %s AND gt_type = %s;',['selenium_test','labels'])
# ans = cursor.fetchone()[0]
# if(ans == 100):
# cursor.execute('DELETE FROM ground_truth_log_file where username = %s and gt_type = %s;',['selenium_test','labels'])
# connection.commit()
if c > 0:
driver.refresh()
ele1 = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//button[text()="Labels"]'))
)
ele1.click()
timer_1 = exatag_lab_test(driver)
data.append(timer_1)
print(str(timer_1))
if(type(timer_1) == 'str'):
break
else:
timer = timer + timer_1
c = c+1
except (Exception, psycopg2.Error) as e:
print(e)
finally:
# closing database connection.
# if (connection):
# cursor.close()
# connection.close()
print(timer)
std = statistics.stdev(data)
print(str(std))
| 35.78125
| 147
| 0.450849
| 1,004
| 10,305
| 4.473108
| 0.209163
| 0.026497
| 0.034068
| 0.038076
| 0.483411
| 0.433311
| 0.397016
| 0.348252
| 0.332665
| 0.313516
| 0
| 0.020876
| 0.432897
| 10,305
| 287
| 148
| 35.905923
| 0.747604
| 0.267055
| 0
| 0.294479
| 0
| 0
| 0.092093
| 0.05437
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018405
| false
| 0.01227
| 0.055215
| 0
| 0.110429
| 0.067485
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b1ec74bf2a93ae529c6f9b679c345029b8413cf
| 1,517
|
py
|
Python
|
randgenuntil.py
|
i-can-not-program/randgenuntil
|
cec853bc0c0a6589d60e1c6e3064e273e6278e0f
|
[
"Unlicense"
] | 1
|
2021-09-09T12:03:57.000Z
|
2021-09-09T12:03:57.000Z
|
randgenuntil.py
|
i-can-not-program/randgenuntil
|
cec853bc0c0a6589d60e1c6e3064e273e6278e0f
|
[
"Unlicense"
] | null | null | null |
randgenuntil.py
|
i-can-not-program/randgenuntil
|
cec853bc0c0a6589d60e1c6e3064e273e6278e0f
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import random
import argparse
import sys
def error(message):
print(message)
sys.exit(1)
parser = argparse.ArgumentParser()
parser.add_argument("number",
help="Generate a random numbers until they are equal to this.", type=int)
parser.add_argument("-s", "--start", type=int, default=0,
help="The range in which the random numbers are in starts with this number. (default 0)")
parser.add_argument("-e", "--end", type=int, default=32767,
help="The range in which the random numbers are in ends with this number. (default 32767)")
parser.add_argument("-c", "--count",
help="Counts the amount of tries it takes to get to the number.", action="store_true")
parser.add_argument("-n", "--newline",
help="Adds a newline between random numbers.", action="store_true")
args = parser.parse_args()
if args.start > args.end:
error("error: start is greater than end")
if args.number > args.end or args.number < args.start:
error("error: number is either greater than end or less than start")
end = "\n" if args.newline else "\r"
rand_num = ''
tries = 0
args.end += 1
while rand_num != args.number:
width = len(str(rand_num))
rand_num = random.randrange(args.start, args.end)
print("{rand_num: <{width}}".format(rand_num=rand_num, width=width), end=end)
tries += 1
if args.count:
print("{} tries to get to {}".format(tries, args.number))
elif end == "\r":
print()
| 35.27907
| 111
| 0.652604
| 225
| 1,517
| 4.333333
| 0.36
| 0.050256
| 0.087179
| 0.028718
| 0.082051
| 0.082051
| 0.082051
| 0.082051
| 0.082051
| 0.082051
| 0
| 0.014143
| 0.207647
| 1,517
| 42
| 112
| 36.119048
| 0.797005
| 0.013843
| 0
| 0
| 0
| 0
| 0.344043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.114286
| 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b21181e3a74fe5a1a2c9a5d58470698abe2c63a
| 10,698
|
py
|
Python
|
python/cvi_toolkit/numpy_helper/tensor_compare.py
|
sophgo/tpu_compiler
|
6299ea0a3adae1e5c206bcb9bedf225d16e636db
|
[
"Apache-2.0"
] | 3
|
2022-03-14T11:47:20.000Z
|
2022-03-16T01:45:37.000Z
|
python/cvi_toolkit/numpy_helper/tensor_compare.py
|
sophgo/tpu_compiler
|
6299ea0a3adae1e5c206bcb9bedf225d16e636db
|
[
"Apache-2.0"
] | null | null | null |
python/cvi_toolkit/numpy_helper/tensor_compare.py
|
sophgo/tpu_compiler
|
6299ea0a3adae1e5c206bcb9bedf225d16e636db
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import numpy as np
import sys
import struct
# from math import fabs
from enum import IntEnum
from scipy import spatial
from math import *
from collections import OrderedDict
def second(elem):
return elem[1]
def get_topk(a, k):
k = min(a.size, k)
idx = np.argpartition(-a.ravel(), k - 1)[:k]
# return np.column_stack(np.unravel_index(idx, a.shape))
topk = list(zip(idx, np.take(a, idx)))
#return topk
topk.sort(key=second, reverse=True)
return topk
class TensorCompare():
NOT_MATCH = "NOT_MATCH"
EQUAL = "EQUAL"
NOT_EQUAL = "NOT_EQUAL"
CLOSE = "CLOSE"
SIMILAR = "SIMILAR"
NOT_SIMILAR = "NOT_SIMLIAR"
def __init__(self, close_order_tol=3,
cosine_similarity_tol = 0.99,
correlation_similarity_tol = 0.99,
euclidean_similarity_tol = 0.90,
signal_to_quantization_noise_tol = 50):
self.close_order_tol = close_order_tol
self.cosine_similarity_tol = cosine_similarity_tol
self.correlation_similarity_tol = correlation_similarity_tol
self.euclidean_similarity_tol = euclidean_similarity_tol
self.signal_to_quantization_noise_tol = signal_to_quantization_noise_tol
return
def square_rooted(self, x):
return sqrt(sum([a*a for a in x]))
def cosine_similarity(self, x, y):
numerator = sum(a*b for a,b in zip(x,y))
denominator = self.square_rooted(x)*self.square_rooted(y)
return round(numerator/float(denominator),3)
def euclidean_distance(self, x, y):
return sqrt(sum(pow(a-b,2) for a, b in zip(x, y)))
def sqnr_similarity(self, signal_raw, signal_dequant, remove_zero=True):
# SQNR is non-commutative
# Unlike other distance function
# Cannot change the order of signal_raw and signal_dequant
raw = signal_raw.flatten()
dequant = signal_dequant.flatten()
if remove_zero is True:
idx = raw != 0
raw = raw[idx]
dequant = dequant[idx]
noise = raw - dequant
avg_raw = np.sum(raw) / raw.size
avg_noise = np.sum(noise) / noise.size
raw_zero_mean = raw - avg_raw
noise_zero_mean = noise - avg_noise
var_raw_zero_mean = np.sum(np.square(raw_zero_mean))
var_noise_zero_mean = np.sum(np.square(noise_zero_mean))
if var_noise_zero_mean == 0 or var_raw_zero_mean == 0:
return float('inf')
sqnr = 10 * np.log10(var_raw_zero_mean / var_noise_zero_mean)
return sqnr
def all_diffs(self, d1, d2):
diffs = list()
d1f = d1.flatten()
d2f = d2.flatten()
if d1f.dtype == np.int8:
assert(d2f.dtype == np.int8)
for i in range(len(d1f)):
if (d1f[i] != d2f[i]):
diffs.append((i, d1f[i], d2f[i]))
else:
atol = 10**(-self.close_order_tol)
rtol = 10**(-self.close_order_tol)
for i in range(len(d1f)):
if fabs(d1f[i] - d2f[i]) > (atol + rtol * fabs(d2f[i])):
diffs.append((i, d1f[i], d2f[i]))
return diffs
def diff_details(self, d1, d2, verbose):
details = {}
if verbose > 1:
K = 10
tk1 = get_topk(d1, K)
tk2 = get_topk(d2, K)
details['top-k'] = (tk1, tk2)
if verbose > 2:
details['diffs'] = self.all_diffs(d1,d2)
if verbose > 3:
details['all'] = (d1, d2)
return details
def compare(self, d1, d2, verbose, int8_tensor_close=True):
similarities = {}
if d1.size != d2.size:
return (False, self.NOT_MATCH, similarities, None)
if np.array_equal(d1, d2):
return (True, self.EQUAL, similarities, None)
# int8 only check equal, not close
if d1.dtype == np.int8 and int8_tensor_close:
details = self.diff_details(d1, d2, verbose)
return (False, self.NOT_EQUAL, similarities, details)
# check allclose
for order in range((self.close_order_tol + 2), 1, -1):
if (np.allclose(d1, d2, rtol=1 * 10**(-order), atol=1e-8, equal_nan=True)):
break
if order >= self.close_order_tol:
similarities["close_order"] = order
return (True, self.CLOSE, similarities, None)
# check similarity
# cosine similarity
# cosine_similarity_my = self.cosine_similarity(d1.flatten(), d2.flatten())
cosine_similarity = 1 - spatial.distance.cosine(d1.flatten().astype(np.float32),
d2.flatten().astype(np.float32))
# correlation similarity
#1 - spatial.distance.correlation(d1.flatten(), d2.flatten())
correlation_similarity = cosine_similarity
# measure euclidean similarity
m = (d1+d2)/2
ed = self.euclidean_distance(d1.flatten(), d2.flatten())
sr = self.square_rooted(m.flatten())
euclidean_similarity = 1 - ed / sr
sqnr = self.sqnr_similarity(d1, d2)
similarities["cosine"] = cosine_similarity
similarities["correlation"] = correlation_similarity
similarities["euclid"] = euclidean_similarity
similarities["sqnr"] = sqnr
# check similarity
if (cosine_similarity > self.cosine_similarity_tol
and correlation_similarity > self.correlation_similarity_tol
and euclidean_similarity > self.euclidean_similarity_tol
and sqnr > self.signal_to_quantization_noise_tol):
return (True, self.SIMILAR, similarities, None)
else:
# Not similar
details = self.diff_details(d1, d2, verbose)
return (False, self.NOT_SIMILAR, similarities, details)
def int8_tensor_stats(self, d):
d_int8 = d.astype(np.int8)
pos = np.sum(d_int8 == 127)
neg = np.sum(d_int8 == -128)
zeros = np.sum(d_int8 == 0)
b_low = np.sum(np.abs(d_int8) <= 8) # 16, 32, 63
tol = d_int8.size
print(" pos(x=127) = {:.4f} [{}/{}]".format(pos / tol, pos, tol))
print(" neg(x=-128) = {:.4f} [{}/{}]".format(neg / tol, neg, tol))
print(" zeros(x=0) = {:.4f} [{}/{}]".format(zeros / tol, zeros, tol))
print(" low(abs(x)<8) = {:.4f} [{}/{}]".format(b_low / tol, b_low, tol))
def print_result(self, d1, name, result, verbose):
print("[{:<32}] {:>12} [{:>6}]".format(name, result[1],
"PASSED" if result[0] else "FAILED"))
if (verbose > 0):
print(" {} {} ".format(d1.shape, d1.dtype))
if (result[1] == self.CLOSE):
print(" close order = {}".format(result[2]["close_order"]))
if (result[1] == self.SIMILAR or result[1] == self.NOT_SIMILAR):
print(" cosine_similarity = {:.6f}".format(result[2]["cosine"]))
print(" correlation_similarity = {:.6f}".format(result[2]["correlation"]))
print(" euclidean_similarity = {:.6f}".format(result[2]["euclid"]))
print(" sqnr_similarity = {:.6f}".format(result[2]["sqnr"]))
if d1.dtype == np.int8:
self.int8_tensor_stats(d1)
details = result[-1]
if not details:
return
if (verbose > 1 and not result[0]):
print('top-k:')
print(' idx-t target idx-r ref')
tk1, tk2 = details['top-k']
for i in range(len(tk1)):
idx_t, target = tk1[i]
idx_r, ref = tk2[i]
print(" ", idx_t, target, idx_r, ref)
if (verbose > 2 and not result[0] and details['diffs'] is not None):
print("all-diffs:")
print(" idx target ref")
for i in details['diffs']:
print(" ", *i)
if (verbose > 3 and not result[0]):
print("all-elements:")
print(" idx target ref")
target, ref = details['all']
for index, val in np.ndenumerate(target):
print(" ", index, val, ref[index])
class TensorCompareStats():
def __init__(self):
self.passed = 0
self.failed = 0
self.results = OrderedDict()
self.count = {}
self.count[TensorCompare.NOT_MATCH] = 0
self.count[TensorCompare.EQUAL] = 0
self.count[TensorCompare.NOT_EQUAL] = 0
self.count[TensorCompare.CLOSE] = 0
self.count[TensorCompare.SIMILAR] = 0
self.count[TensorCompare.NOT_SIMILAR] = 0
self.min_cosine_similarity = 1.0
self.min_correlation_similarity = 1.0
self.min_euclidean_similarity = 1.0
self.min_sqnr = float('inf')
def update(self, name, result):
self.results[name] = result
if result[0]:
self.passed = self.passed + 1
assert (result[1] == TensorCompare.EQUAL
or result[1] == TensorCompare.CLOSE
or result[1] == TensorCompare.SIMILAR)
else:
self.failed = self.failed + 1
assert (result[1] == TensorCompare.NOT_EQUAL
or result[1] == TensorCompare.NOT_SIMILAR)
self.count[result[1]] = self.count[result[1]] + 1
# record min similarity
if result[1] == TensorCompare.SIMILAR or result[1] == TensorCompare.NOT_SIMILAR:
self.min_cosine_similarity = min(self.min_cosine_similarity, result[2]["cosine"])
self.min_correlation_similarity = min(self.min_correlation_similarity, result[2]["correlation"])
self.min_euclidean_similarity = min(self.min_euclidean_similarity, result[2]["euclid"])
self.min_sqnr = min(self.min_sqnr, result[2]["sqnr"])
def print_result(self):
print("%d compared"%(len(self.results)))
print("%d passed"%(self.passed))
print(" %d equal, %d close, %d similar"
%(self.count[TensorCompare.EQUAL],
self.count[TensorCompare.CLOSE],
self.count[TensorCompare.SIMILAR]))
print("%d failed"%(self.failed))
print(" %d not equal, %d not similar"
%(self.count[TensorCompare.NOT_EQUAL],
self.count[TensorCompare.NOT_SIMILAR]))
print("min_similiarity = ({}, {}, {}, {})".format(
self.min_cosine_similarity,
self.min_correlation_similarity,
self.min_euclidean_similarity,
self.min_sqnr))
def save_result(self, csv_file, operations, quant_types):
has_similarity = lambda x: (x == TensorCompare.SIMILAR
or x == TensorCompare.NOT_SIMILAR)
with open(csv_file, mode='w') as f:
f.write("name, op, quant, pass, sim_cos, sim_euc, sqnr\n")
for name, result in self.results.items():
op = operations.get(name, '-')
qtype = quant_types.get(name, '-')
is_equal = bool(result[1] == TensorCompare.EQUAL)
is_close = bool(result[1] == TensorCompare.CLOSE)
is_similar = bool(result[1] == TensorCompare.SIMILAR)
is_pass = bool(is_similar or is_close or is_equal)
cos = float(result[2]["cosine"]) if has_similarity(result[1]) else 1.0
euc = float(result[2]["euclid"]) if has_similarity(result[1]) else 1.0
sqnr = float(result[2]["sqnr"]) if has_similarity(result[1]) else float('-inf')
f.write("{}, {}, {}, {}, {}, {}, {}\n".format(
name, op, qtype, is_pass, cos, euc, sqnr))
| 37.405594
| 102
| 0.623294
| 1,451
| 10,698
| 4.43694
| 0.146795
| 0.021746
| 0.037589
| 0.015843
| 0.17785
| 0.093507
| 0.068034
| 0.031998
| 0.023299
| 0.015843
| 0
| 0.027869
| 0.235278
| 10,698
| 285
| 103
| 37.536842
| 0.759076
| 0.051598
| 0
| 0.055319
| 0
| 0
| 0.084625
| 0.002172
| 0
| 0
| 0
| 0
| 0.012766
| 1
| 0.068085
| false
| 0.029787
| 0.029787
| 0.012766
| 0.204255
| 0.119149
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b239e38be546f3cf138ae920d55ac95f83dd8aa
| 4,508
|
py
|
Python
|
src/cleaners.py
|
RellikDog/dota2-win-predictor-v2
|
2751093d988830296ab0408a820e52bd7fd963b0
|
[
"MIT"
] | 1
|
2020-06-12T00:27:45.000Z
|
2020-06-12T00:27:45.000Z
|
src/cleaners.py
|
RellikDog/dota2-win-predictor-v2
|
2751093d988830296ab0408a820e52bd7fd963b0
|
[
"MIT"
] | null | null | null |
src/cleaners.py
|
RellikDog/dota2-win-predictor-v2
|
2751093d988830296ab0408a820e52bd7fd963b0
|
[
"MIT"
] | null | null | null |
from src.eda import make_counter
import pandas as pd
import numpy as np
from src.heroes import heroes, name_id, id_name
def id_list_from_history(data):
'''
Takes raw data returnd by api_calls.get_match_history() and returns a list of just the match ID's
Input:
data(list):
list of match objects
Output:
List of integers each representing a unique match id
'''
return [int(i['match_id']) for i in data]
def clean_match_details(match):
'''
Takes raw data from api_calls.get_match_details() and returns a dictionary with the pertinent details
Input:
match(dict):
Return of the api.steampowers api
Dict with one key-Val pair result is a dictionary with the match information
Output:
out(dict):
Dictionary of pertinent data:
radiant_win(bool): Team that won
match_date(timestamp): When the match was played
radiant_hero_ids(list of ints): List of hero Ids for the radiant team
dire_hero_ids(list of ints): List of hero Ids for the dire team
'''
data = match['result']
out = {}
out['_id'] = data['match_id']
out['radiant_win'] = int(data['radiant_win'])
out['match_date'] = data['start_time']
out['radiant_hero_ids'] = []
out['dire_hero_ids'] = []
for player in data['players']:
if player['player_slot'] < 128:
out['radiant_hero_ids'] += [player['hero_id']]
else:
out['dire_hero_ids'] += [player['hero_id']]
return out
def make_csv(counter, counter_data):
'''
Takes in a premade coutner using make_counter from eda.py and the data used to amke the counter and produces a CSV.
Input:
counter(Counter):
Counter from all the DB data - used to generate unique columns
counter_data(mongo cursor list):
return of .find() on the raw collection
Output:
None: Creates a csv file in the same directory as run
'''
#remove count column so keys includes only hero ids
del counter['count']
uids = sorted(counter.keys())
uid_cols = []
#add a column for each hero fro each team
for i in uids:
uid_cols += [(str(i)+'R')]
uid_cols += [(str(i)+'D')]
#add the initial 3 columns and combine with hero id columns
columns = ['match_id', 'match_date', 'radiant_win']
columns += uid_cols
#create a template for each row
row_template = {col: 0 for col in columns}
rows_list = []
#for each match format a row and add to list
for match in counter_data:
temp_row = row_template.copy()
temp_row['match_id'] = match['_id']
temp_row['match_date'] = match['match_date']
temp_row['radiant_win'] = match['radiant_win']
for indx, hid in enumerate(match['radiant_hero_ids']):
temp_row[(str(hid)+'R')] = 1
temp_row[(str(match['dire_hero_ids'][indx])+'D')] = 1
rows_list += [temp_row]
#use rows to create dataframe and print to csv
df = pd.DataFrame(rows_list)
df.to_csv('test.csv')
def make_pred_row(df, rad, dire):
'''
Makes a row for predicitons to be made on
Input:
df(dataframe):
Read this is from test.csv - used to generate columns
rad(list):
List of hero names recived from the front end for readiant team
dire(list):
List of hero names recived from the front end for dire team
Output:
pred_row(pandas dataframe):
Converts heros names to IDs then adds ones to the DF in the appropriate slotfor their team
'''
#drop unnessacary columns
drop_cols = ['Unnamed: 0', 'match_id', 'match_date', 'Unnamed: 1', 'radiant_win']
for i in drop_cols:
try:
df.pop(i)
except:
continue
#make blank row
pred_row = pd.DataFrame([np.zeros(len(df.columns))], columns=df.columns)
#fill in row
for indx, hero in enumerate(rad):
#get radiant hero id - insert to pred row with R
rhid = name_id(hero)
pred_row[str(rhid)+'R'] = 1.0
#get radiant hero id - insert to pred row with D
dhid = name_id(dire[indx])
pred_row[str(dhid)+'D'] = 1.0
return pred_row
| 32.905109
| 119
| 0.590949
| 634
| 4,508
| 4.064669
| 0.283912
| 0.02988
| 0.021731
| 0.012418
| 0.103997
| 0.089251
| 0.089251
| 0.089251
| 0.089251
| 0.062088
| 0
| 0.004211
| 0.315217
| 4,508
| 137
| 120
| 32.905109
| 0.83058
| 0.471162
| 0
| 0
| 0
| 0
| 0.159848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.196429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b2a94692c84f7c38268202ef3957322166618de
| 991
|
py
|
Python
|
tensorflow_v2/dragen1860/ch10/bn_main.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 13
|
2020-01-04T07:37:38.000Z
|
2021-08-31T05:19:58.000Z
|
tensorflow_v2/dragen1860/ch10/bn_main.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 3
|
2020-06-05T22:42:53.000Z
|
2020-08-24T07:18:54.000Z
|
tensorflow_v2/dragen1860/ch10/bn_main.py
|
gottaegbert/penter
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
[
"MIT"
] | 9
|
2020-10-19T04:53:06.000Z
|
2021-08-31T05:20:01.000Z
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers
# BatchNorm 归一化网络激活函数
# 2 images with 4x4 size, 3 channels
# we explicitly enforce the mean and stddev to N(1, 0.5)
x = tf.random.normal([2, 4, 4, 3], mean=1.0, stddev=0.5)
net = layers.BatchNormalization(axis=-1, center=True, scale=True, trainable=True)
# layers.LayerNormalization
out = net(x)
print("forward in test mode:", net.variables)
out = net(x, training=True)
print("forward in train mode(1 step):", net.variables)
for i in range(100):
out = net(x, training=True)
print("forward in train mode(100 steps):", net.variables)
optimizer = optimizers.SGD(lr=1e-2)
for i in range(10):
with tf.GradientTape() as tape:
out = net(x, training=True)
loss = tf.reduce_mean(tf.pow(out, 2)) - 1
grads = tape.gradient(loss, net.trainable_variables)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
print("backward(10 steps):", net.variables)
| 30.030303
| 81
| 0.71443
| 156
| 991
| 4.512821
| 0.461538
| 0.034091
| 0.039773
| 0.06392
| 0.146307
| 0.119318
| 0.119318
| 0.119318
| 0.119318
| 0.119318
| 0
| 0.036993
| 0.15439
| 991
| 32
| 82
| 30.96875
| 0.803103
| 0.136226
| 0
| 0.15
| 0
| 0
| 0.121034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b2b18a133cf43bd4b7425912dff7947dc039608
| 1,001
|
py
|
Python
|
pytocl/main.py
|
moltob/pytocl
|
905c09e649feca8feeaef6fdecd6767d82cdb28e
|
[
"MIT"
] | 12
|
2016-11-02T08:43:04.000Z
|
2020-05-17T11:23:32.000Z
|
pytocl/main.py
|
moltob/pytocl
|
905c09e649feca8feeaef6fdecd6767d82cdb28e
|
[
"MIT"
] | 1
|
2020-06-08T09:48:20.000Z
|
2020-06-08T09:48:20.000Z
|
pytocl/main.py
|
moltob/pytocl
|
905c09e649feca8feeaef6fdecd6767d82cdb28e
|
[
"MIT"
] | 3
|
2017-08-01T18:30:32.000Z
|
2018-08-04T13:10:15.000Z
|
"""Application entry point."""
import argparse
import logging
from pytocl.protocol import Client
def main():
"""Main entry point of application."""
parser = argparse.ArgumentParser(description='Client for TORCS racing car simulation with SCRC '
'network server.')
parser.add_argument('--hostname', help='Racing server host name.', default='localhost')
parser.add_argument('--port', help='Port to connect, 3001 - 3010 for clients 1 - 10.',
type=int, default=3001)
parser.add_argument('-v', help='Debug log level.', action='store_true')
args = parser.parse_args()
# switch log level:
if args.v:
level = logging.DEBUG
else:
level = logging.INFO
del args.v
logging.basicConfig(level=level, format="%(asctime)s %(levelname)7s %(name)s %(message)s")
# start client loop:
client = Client(**args.__dict__)
client.run()
if __name__ == '__main__':
main()
| 30.333333
| 100
| 0.622378
| 119
| 1,001
| 5.092437
| 0.571429
| 0.044554
| 0.084158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021248
| 0.247752
| 1,001
| 32
| 101
| 31.28125
| 0.783533
| 0.094905
| 0
| 0
| 0
| 0
| 0.272931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.142857
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4b2b25ace759328c89bdd5c3d6fc4d697b6531e4
| 2,381
|
py
|
Python
|
JSMultiline.py
|
axilleasiv/JSMultiline
|
59779d4b6c444461597b105e31aa0efb0e86805c
|
[
"MIT"
] | 6
|
2015-05-04T00:05:00.000Z
|
2016-12-09T14:40:47.000Z
|
JSMultiline.py
|
axilleasiv/JSMultiline
|
59779d4b6c444461597b105e31aa0efb0e86805c
|
[
"MIT"
] | 1
|
2018-06-25T17:13:37.000Z
|
2018-06-25T17:13:37.000Z
|
JSMultiline.py
|
axilleasiv/JSMultiline
|
59779d4b6c444461597b105e31aa0efb0e86805c
|
[
"MIT"
] | null | null | null |
import sublime
import sublime_plugin
import re
import os
rexLastTabs = re.compile(r'(\t+|\s+)$', re.MULTILINE)
rexEmptyLines = re.compile('^[ \t]*$\r?\n', re.MULTILINE)
rexCont = re.compile(r'[^\t\s].*[^\t\s]')
rexFormatted = re.compile(r"((?<=\s)'|(?<=\t)')|('*\s[\+|\\|])")
class RunMultilineAction(sublime_plugin.TextCommand):
def run(self, edit, action=None):
if not is_js_buffer(self.view):
sublime.status_message('Multiline: Not supported format.')
return False
for region in self.view.sel():
if region.empty():
continue
text = self.view.substr(region)
formatted = self.checkFormat(text)
if formatted:
replacement = formatted
else:
text = re.sub(r"'", '"', text)
replacement = self.format( rexEmptyLines.sub('', text), action )
self.view.replace(edit, region, replacement)
sublime.status_message('Multiline: Formatting is done.')
def checkFormat(self, text):
formatted = False
# only one line formatted
if text.find('\n') == -1 and (text.endswith("';") or text.endswith("\\")):
return text[1: len(text) -2]
if rexFormatted.search( text ):
formatted = rexFormatted.sub('', text)
formatted =formatted[1: len(formatted) -2]
return formatted
def format(self, text, action=None):
lines = text.split('\n')
symbol = action == 'plus' and '+' or r'\\'
quote = action == 'plus' and "'" or ""
for index in range(len(lines)):
lines[index] = rexLastTabs.sub('', lines[index])
if index == len(lines) - 1:
lines[index] = rexCont.sub( quote + rexCont.search( lines[index] ).group() + "';", lines[index])
elif index == 0 and action == 'slash':
lines[index] = rexCont.sub( "'" + rexCont.search( lines[index] ).group() + " " + symbol, lines[index])
else:
lines[index] = rexCont.sub( quote + rexCont.search( lines[index] ).group() + quote + " " + symbol, lines[index])
return '\n'.join(lines)
#https://github.com/jdc0589/JsFormat line 47
def is_js_buffer(view):
fName = view.file_name()
vSettings = view.settings()
syntaxPath = vSettings.get('syntax')
syntax = ""
ext = ""
if (fName != None): # file exists, pull syntax type from extension
ext = os.path.splitext(fName)[1][1:]
if(syntaxPath != None):
syntax = os.path.splitext(syntaxPath)[0].split('/')[-1].lower()
return ext in ['js', 'json'] or "javascript" in syntax or "json" in syntax
| 28.345238
| 116
| 0.639227
| 313
| 2,381
| 4.833866
| 0.319489
| 0.072703
| 0.019828
| 0.039656
| 0.104428
| 0.070059
| 0.070059
| 0.070059
| 0.070059
| 0.070059
| 0
| 0.008638
| 0.173457
| 2,381
| 84
| 117
| 28.345238
| 0.760163
| 0.046619
| 0
| 0.035088
| 0
| 0
| 0.086458
| 0.014998
| 0.017544
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.070175
| 0
| 0.245614
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|