repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
tableau/TabPy
|
tests/integration/test_custom_evaluate_timeout.py
|
Python
|
mit
| 1,223
| 0.000818
|
from . import integ_test_base
class TestCustomEvaluateTimeout(integ_test_base.IntegTestBase):
def _get_evaluate_timeout(self) -> str:
return "3"
def test_custom_evaluate_timeout_with_script(self):
# Uncomment the following line to preserve
# test case output and other files (config, state, ect.)
# in system temp folder.
self.set_delete_temp_folder(False)
payload = """
{
"data": { "_arg1": 1 },
"script":
"import time\\ntime.sleep(100)\\nreturn 1"
}
|
"""
headers = {
"Content-Type": "application/json",
"TabPy-Client": "Integration test for testing custom eva
|
luate timeouts "
"with scripts.",
}
conn = self._get_connection()
conn.request("POST", "/evaluate", payload, headers)
res = conn.getresponse()
actual_error_message = res.read().decode("utf-8")
self.assertEqual(408, res.status)
self.assertEqual(
'{"message": '
'"User defined script timed out. Timeout is set to 3.0 s.", '
'"info": {}}',
actual_error_message,
)
|
saikrishnarallabandi/clustergen_steroids
|
building_blocks/falcon_models.py
|
Python
|
apache-2.0
| 7,925
| 0.029653
|
import dynet_config
dynet_config.set_gpu()
import dynet as dy
import os
import pickle
import numpy as np
import numpy as np
import os,sys
from sklearn import preprocessing
import pickle, logging
import argparse
debug = 0
class falcon_heavy(object):
def __init__(self, model, args):
self.pc = model.add_subcollection()
self.model = model
self.args = args
self.num_input = args.num_input
self.num_output = args.num_output
self.generic_layer_list = args.generic_layer_list
self.postspecificlayers = args.postspecificlayers
self.number_of_layers = len(self.generic_layer_list) + len(self.postspecificlayers) + 1
num_hidden_1 = self.generic_layer_list[0]
self.act_generic = args.act_generic
self.act_postspecific = args.act_postspecific
self.act_final = args.act_final
# Add first layer
if debug :
print "Adding input to the network ", num_hidden_1, self.num_input
self.W1 = self.pc.add_parameters((num_hidden_1, self.num_input))
self.b1 = self.pc.add_parameters((num_hidden_1))
# Add generic layers
self.weight_matrix_array = []
self.biases_array = []
self.weight_matrix_array.append(self.W1)
self.biases_array.append(self.b1)
for k in range(1, len(self.generic_layer_list)):
if debug:
print "At ", k , " adding generic weights ", self.generic_layer_list[k], self.generic_layer_list[k-1]
self.weight_matrix_array.append(self.model.add_parameters((self.generic_layer_list[k], self.generic_layer_list[k-1])))
self.biases_array.append(self.model.add_parameters((self.generic_layer_list[k])))
# Add specific layers
self.specific_weights_array = []
self.specific_biases_array = []
print "Adding specific layers "
for (i, layer) in enumerate(self.postspecificlayers):
if debug:
print "At ", i , " adding specific weights ", self.postspecificlayers[i], self.postspecificlayers[i-1]
self.specific_weights_array.append( self.model.add_parameters(( int(layer) , self.postspecificlayers[-1] )) )
self.specific_biases_array.append( self.model.add_parameters(( int(layer) )) )
# Residual
if debug:
print "Adding final layer ", self.num_output , int(layer)+self.num_input
self.W_final = self.model.add_parameters(( self.num_output , int(layer)+self.num_input ))
self.b_final = self.model.add_parameters((self.num_output))
# Spec
self.spec = (args)
def calculate_loss(self,input,output,tgtspk):
# Initial layer
weight_matrix_array = []
biases_array = []
acts = []
if debug:
print "The number of generic biases: ", len(self.biases_array)
print "The number of generic acts: ", len(self.act_generic)
# Generic layers
for (W,b,a) in zip(self.weight_matrix_array, self.biases_array, self.act_generic):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Specific layers
length = len(self.postspecificlayers)
start_index = (tgtspk -1)*length
idx = 0
if debug:
print "The number of specific biases: ", len(self.biases_array[start_index:start_index+length])
print "The number of specific acts: ", len(self.act_postspecific)
for (W,b,a) in zip(self.specific_weights_array[start_index:start_index+length], self.specific_biases_array[start_index:start_index+length], self.act_postspecific):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Final Layer
weight_matrix_array.append(dy.parameter(self.W_final))
biases_array.append(dy.parameter(self.b_final))
acts.append(self.act_final)
w = weight_matrix_array[0]
b = biases_array[0]
act = acts[0]
intermediate = act(dy.affine_transform([b, w, input]))
if debug:
print "Here are the dimensions of the biases: ", [len(k.value()) for k in biases_array]
print "Here are the acts: ", [k for k in acts]
print "Dimensions of the intermediate: "
print len(intermediate.value())
activations = [intermediate]
count = 1
for (W,b,g) in zip(weight_matrix_array[1:], biases_array[1:], acts[1:]):
if debug:
print "Adding to the layer number: ", count
print "Total layers: ", self.number_of_layers
if count == self.number_of_layers-1:
t = dy.concatenate([activations[-1],input])
pred = g(dy.affine_transform([b, W, t ]))
else:
pred = g(dy.affine_transform([b, W, activations[-1]]))
activations.append(pred)
count += 1
if debug:
print "Activation dimensions are : ", [len(k.value()) for k in activations]
print "Output dimensions are: ", len(output.value())
losses = output - pred
return dy.l2_norm(losses)
def predict(self,input, tgtspk):
# Initial layer
weight_matrix_array = []
biases_array = []
acts = []
if debug:
print "The number of generic biases: ", len(self.biases_array)
print "The number of generic acts: ", len(self.act_generic)
# Generic layers
for (W,b,a) in zip(self.weight_matrix_array, self.biases_array, self
|
.act_generic):
weigh
|
t_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Specific layers
length = len(self.postspecificlayers)
start_index = (tgtspk -1)*length
idx = 0
if debug:
print "The number of specific biases: ", len(self.biases_array[start_index:start_index+length])
print "The number of specific acts: ", len(self.act_postspecific)
for (W,b,a) in zip(self.specific_weights_array[start_index:start_index+length], self.specific_biases_array[start_index:start_index+length], self.act_postspecific):
weight_matrix_array.append(dy.parameter(W))
biases_array.append(dy.parameter(b))
acts.append(a)
# Final Layer
weight_matrix_array.append(dy.parameter(self.W_final))
biases_array.append(dy.parameter(self.b_final))
acts.append(self.act_final)
w = weight_matrix_array[0]
b = biases_array[0]
act = acts[0]
intermediate = act(dy.affine_transform([b, w, input]))
if debug:
print "Here are the dimensions of the biases: ", [len(k.value()) for k in biases_array]
print "Here are the acts: ", [k for k in acts]
print "Dimensions of the intermediate: "
print len(intermediate.value())
activations = [intermediate]
count = 1
for (W,b,g) in zip(weight_matrix_array[1:], biases_array[1:], acts[1:]):
if debug:
print "Adding to the layer number: ", count
print "Total layers: ", self.number_of_layers
if count == self.number_of_layers-1:
t = dy.concatenate([activations[-1],input])
pred = g(dy.affine_transform([b, W, t ]))
else:
pred = g(dy.affine_transform([b, W, activations[-1]]))
activations.append(pred)
count += 1
if debug:
print "Activation dimensions are : ", [len(k.value()) for k in activations]
print "Output dimensions are: ", len(output.value())
return activations[-1]
|
pashinin-com/pashinin.com
|
src/pashinin/admin.py
|
Python
|
gpl-3.0
| 1,211
| 0
|
from django.contrib import admin
from .models import Lesson, Course, CourseLead, QA
# from django.utils.translation import ugettext_lazy as _
from ordered_model.admin import OrderedModelAdmin
from core.models import User
# from adminfilters.models import Species, Breed
class UserAdminInline(admin.TabularInline):
model = User
@admin.register(Lesson)
class LessonAdmin(admin.ModelAdmin):
ordering = ['-start']
list_
|
filter = ('student', )
list_display = ('start', 'student')
save_as = True
# raw_id_fields = ("student",)
# inlines = [UserAdminInline]
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'published', )
ordering = ['id']
@admin.regis
|
ter(CourseLead)
class CourseLeadAdmin(admin.ModelAdmin):
list_display = (
'name',
'contact',
'course',
'status',
'student',
)
list_filter = ('status', )
ordering = ['status']
@admin.register(QA)
class QAAdmin(OrderedModelAdmin):
list_display = (
'order',
'question',
'move_up_down_links',
)
# list_filter = ('status', )
list_display_links = ('question', )
ordering = ['order']
|
RadonX/iScript
|
leetcode_problems.py
|
Python
|
mit
| 4,146
| 0.002894
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import sys
import re
import os
import argparse
import requests
from lxml import html as lxml_html
try:
import html
except ImportError:
import HTMLParser
html = HTMLParser.HTMLParser()
try:
import cPickle as pk
except ImportError:
import pickle as pk
class LeetcodeProblems(object):
def get_problems_info(self):
leetcode_url = 'https://leetcode.com/problemset/algorithms'
res = requests.get(leetcode_url)
if not res.ok:
print('request error')
sys.exit()
cm = res.text
cmt = cm.split('tbody>')[-2]
indexs = re.findall(r'<td>(\d+)</td>', cmt)
problem_urls = ['https://leetcode.com' + url \
for url in re.findall(
r'<a href="(/problems/.+?)"', cmt)]
levels = re.findall(r"<td value='\d*'>(.+?)</td>", cmt)
tinfos = zip(indexs, levels, problem_urls)
assert (len(indexs) == len(problem_urls) == len(levels))
infos = []
for info in tinfos:
res = requests.get(info[-1])
if not res.ok:
print('request error')
|
sys.exit()
tree = lxml_html.fromstring(res.text)
title = tree.xpath('//meta[@property="og:title"]/@content')[0]
description = tree.xpath('//meta[@property="description"]/@content')
if not description:
|
description = tree.xpath('//meta[@property="og:description"]/@content')[0]
else:
description = description[0]
description = html.unescape(description.strip())
tags = tree.xpath('//div[@id="tags"]/following::a[@class="btn btn-xs btn-primary"]/text()')
infos.append(
{
'title': title,
'level': info[1],
'index': int(info[0]),
'description': description,
'tags': tags
}
)
with open('leecode_problems.pk', 'wb') as g:
pk.dump(infos, g)
return infos
def to_text(self, pm_infos):
if self.args.index:
key = 'index'
elif self.args.title:
key = 'title'
elif self.args.tag:
key = 'tags'
elif self.args.level:
key = 'level'
else:
key = 'index'
infos = sorted(pm_infos, key=lambda i: i[key])
text_template = '## {index} - {title}\n' \
'~{level}~ {tags}\n' \
'{description}\n' + '\n' * self.args.line
text = ''
for info in infos:
if self.args.rm_blank:
info['description'] = re.sub(r'[\n\r]+', r'\n', info['description'])
text += text_template.format(**info)
with open('leecode problems.txt', 'w') as g:
g.write(text)
def run(self):
if os.path.exists('leecode_problems.pk') and not self.args.redownload:
with open('leecode_problems.pk', 'rb') as f:
pm_infos = pk.load(f)
else:
pm_infos = self.get_problems_info()
print('find %s problems.' % len(pm_infos))
self.to_text(pm_infos)
def handle_args(argv):
p = argparse.ArgumentParser(description='extract all leecode problems to location')
p.add_argument('--index', action='store_true', help='sort by index')
p.add_argument('--level', action='store_true', help='sort by level')
p.add_argument('--tag', action='store_true', help='sort by tag')
p.add_argument('--title', action='store_true', help='sort by title')
p.add_argument('--rm_blank', action='store_true', help='remove blank')
p.add_argument('--line', action='store', type=int, default=10, help='blank of two problems')
p.add_argument('-r', '--redownload', action='store_true', help='redownload data')
args = p.parse_args(argv[1:])
return args
def main(argv):
args = handle_args(argv)
x = LeetcodeProblems()
x.args = args
x.run()
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
whiplash01/pyCycle
|
src/pycycle/heat_exchanger.py
|
Python
|
apache-2.0
| 4,907
| 0.01773
|
"""
preHeatEx.py - (Run this before heatExchanger2.py)
Performs inital energy balance for a basic heat exchanger design
Originally built by Scott Jones in NPSS, ported and augmented by Jeff Chin
NTU (effectiveness) Method
Determine the heat transfer rate and outlet temperatures when the type and size of the heat exchanger is specified.
NTU Limitations
1) Effectiveness of the chosen heat exchanger must be known (empirical)
Compatible with OpenMDAO v0.8.1
"""
from math import log, pi, sqrt, e
from openmdao.main.api import Assembly, Component
from openmdao.lib.datatypes.api import Float, Bool
from openmdao.lib.drivers.api import BroydenSolver
from openmdao.main.api import convert_units as cu
from pycycle.flowstation import FlowStationVar, FlowStation
from pycycle.cycle_component import CycleComponent
class HeatExchanger(CycleComponent):
"""Calculates output temperatures for water and air, and heat transfer, for a given
water flow rate for a water-to-air heat exchanger"""
#inputs
W_cold = Float(.992, iotype="in", units = 'lbm/s', desc='Mass flow rate of cold fluid (water)')
Cp_cold = Float(0.9993, iotype="in", units = 'Btu/(lbm*R)', desc='Specific Heat of the cold fluid (water)')
T_cold_in = Float(518.58, iotype="in", units = 'R', desc='Temp of water into heat exchanger')
effectiveness = Float(.9765, iotype="in", desc='Heat Exchange Effectiveness')
MNexit_des = Float(.6, iotype="in", desc="mach number at the exit of heat exchanger")
dPqP = Float(.1, iotype="in", desc="pressure differential as a fraction of incomming pressure")
#State Vars
T_hot_out = Float(1400, iotype="in", units = 'R', desc='Temp of air out of the heat exchanger')
T_cold_out = Float(518, iotype="in", units = 'R', desc='Temp of water out of the heat exchanger')
Fl_I = FlowStationVar(iotype="in", desc="incoming air stream to heat exchanger", copy=None)
#outputs
Qreleased = Float(iotype="out", units = 'hp', desc='Energy Released')
Qabsorbed= Float(iotype="out", units = 'hp', desc='Energy Absorbed')
LMTD = Float(iotype="out", desc='Logarathmic Mean Temperature Difference')
Qmax= Float(iotype="out", units = 'hp', desc='Theoretical maximum possible heat transfer')
residual_qmax = Float(iotype="out", desc='Residual of max*effectiveness')
residual_e_balance = Float(iotype="out", desc='Residual of the energy balance')
Fl_O = FlowStationVar(iotype="out", desc="outgoing air stream from heat exchanger", copy=None)
def execute(self):
"""Calculate Various Paramters"""
Fl_I = self.Fl_I
Fl_O = self.Fl_O
T_cold_in = self.T_cold_in
T_cold_out = self.T_cold_out
T_hot_in = self.Fl_I.Tt
T_hot_out = self.T_hot_out
W_cold = self.W_cold
Wh = Fl_I.W
Cp_hot = Fl_I.Cp
|
Cp_cold = self.Cp_cold
W_coldCpMin = W_cold*Cp_cold;
if ( Wh*Cp_hot < W_cold*Cp_cold ):
W_coldCpMin = Wh*Cp_hot
self.Qmax = W_coldCpMin*(T_hot_in - T_cold_in)*1.4148532; #BTU/s to hp
self.Qreleased = Wh*Cp_hot*(T_hot_in - T_hot_out)*1
|
.4148532;
self.Qabsorbed = W_cold*Cp_cold*(T_cold_out - T_cold_in)*1.4148532;
try:
self.LMTD = ((T_hot_out-T_hot_in)+(T_cold_out-T_cold_in))/log((T_hot_out-T_cold_in)/(T_hot_in-T_cold_out))
except ZeroDivisionError:
self.LMTD = 0
self.residual_qmax = self.Qreleased-self.effectiveness*self.Qmax
self.residual_e_balance = self.Qreleased-self.Qabsorbed
Fl_O.setTotalTP(T_hot_out, Fl_I.Pt*(1-self.dPqP))
Fl_O.W = Fl_I.W
if self.run_design:
Fl_O.Mach = self.MNexit_des
self._exit_area_des = Fl_O.area
else:
Fl_O.area = self._exit_area_des
if __name__ == "__main__":
from openmdao.main.api import set_as_top
class HeatBalance(Assembly):
def configure(self):
hx = self.add('hx', HeatExchanger())
driver = self.add('driver',BroydenSolver())
driver.add_parameter('hx.T_hot_out',low=0.,high=1000.)
driver.add_parameter('hx.T_cold_out',low=0.,high=1000.)
driver.add_constraint('hx.residual_qmax=0')
driver.add_constraint('hx.residual_e_balance=0')
#hx.Wh = 0.49
#hx.Cp_hot = 1.006
#hx.T_hot_in = 791
fs = FlowStation()
fs.setTotalTP(1423.8, 0.302712118187) #R, psi
fs.W = 1.0
hx.Fl_I = fs
hx.W_cold = .45
hx.T_hot_out = hx.Fl_I.Tt
hx.T_cold_out = hx.T_cold_in
driver.workflow.add(['hx'])
test = HeatBalance()
set_as_top(test)
test.hx.design = True
test.run()
print test.hx.W_cold, test.hx.T_hot_out, test.hx.Fl_I.Tt
|
PyBossa/pybossa
|
pybossa/flickr_client.py
|
Python
|
agpl-3.0
| 2,464
| 0.000406
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""Flickr module for authentication."""
from flask_oauthlib.client import OAuth
import functools
import requests
class FlickrClient(object):
"""Class for Flickr integration."""
def
|
__init__(self, api_key, logger=None):
self.api_key = api_key
self.logger = logger
def get_user_albums(self, session):
"""Get user albums from Flickr."""
if session.get('flickr_user') is not None:
url = 'https://api.flickr.com/servi
|
ces/rest/'
payload = {'method': 'flickr.photosets.getList',
'api_key': self.api_key,
'user_id': self._get_user_nsid(session),
'format': 'json',
'primary_photo_extras':'url_q',
'nojsoncallback': '1'}
res = requests.get(url, params=payload)
if res.status_code == 200 and res.json().get('stat') == 'ok':
albums = res.json()['photosets']['photoset']
return [self._extract_album_info(album) for album in albums]
if self.logger is not None:
msg = ("Bad response from Flickr:\nStatus: %s, Content: %s"
% (res.status_code, res.json()))
self.logger.error(msg)
return []
def _get_user_nsid(self, session):
"""Get session ID."""
return session.get('flickr_user').get('user_nsid')
def _extract_album_info(self, album):
"""Extract album information."""
info = {'title': album['title']['_content'],
'photos': album['photos'],
'id': album['id'],
'thumbnail_url': album['primary_photo_extras']['url_q']}
return info
|
saltstack/salt
|
tests/pytests/unit/beacons/test_bonjour_announce.py
|
Python
|
apache-2.0
| 801
| 0.003745
|
"""
tests.pytests.unit.beacons.test_bonjour_announce
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bonjour announce beacon test cases
"""
import pytest
import salt.beacons.bonjour_announce as bonjour_announce
@pytest.fixture
def configure_loader_modules():
return {
bonjour_announce: {"last_state": {}, "last_state_extra":
|
{"no_devices": False}}
}
def test_non_list_config():
config = {}
ret = bonjour_announce.validate(config)
assert ret == (False, "Configuration for bonjour_announce beacon must be a list.")
def test_empty_config():
config = [{}]
ret = bonjour_announce.validate(config)
assert ret == (
False,
"Configuration for bo
|
njour_announce beacon must contain servicetype, port and"
" txt items.",
)
|
ActiveState/code
|
recipes/Python/576816_Interval/recipe-576816.py
|
Python
|
mit
| 3,064
| 0.014034
|
class Interval(object):
"""
Represents an interval.
Defined as half-open interval [start,end), which includes the start position but not the end.
Start and end do not have to be numeric types.
"""
def __init__(self, start, end):
"Construct, start must be <= end."
if start > end:
raise ValueError('Start (%s) must not be greater than end (%s)' % (start, end))
self._start = start
self._end = end
start = property(fget=lambda self: self._start, doc="The interval's start")
end = property(fget=lambda self: self._end, doc="The interval's end")
def __str__(self):
"As string."
return '[%s,%s)' % (self.start, self.end)
def __repr__(self):
"String representation."
return '[%s,%s)' % (self.start, self.end)
def __cmp__(self, other):
"Compare."
if None == other:
return 1
start_cmp = cmp(self.start, other.start)
if 0 != start_cmp:
return start_cmp
else:
return cmp(self.end, other.end)
def __hash__(self):
"Hash."
return hash(self.start) ^ hash(self.end)
def intersection(self, other):
"Intersection. @return: An empty intersection if there is none."
if self > other:
other, self = self, other
if self.end <= other.start:
return Interval(self.start, self.start)
return Interval(other.start, self.end)
def hull(self, other):
"@return: Interval containing both self and other."
if self > other:
other, self = self, other
return Interval(self.start, other.end)
def overlap(self, other):
"@return: True iff self intersects other."
if self > other:
other, self = self, other
return self.end > other.start
def __contains__(self, item):
"@return: True iff item in self."
return self.start <= item and item < self.end
def zero_in(self):
"@return: True iff 0 in self."
return self.start <= 0 and 0 < self.end
def subset(self, other):
"@return: True iff self is subset of other."
return self.start >= other.start and self.end <= other.end
def proper_subset(self, other):
"@return: True iff self is proper subset of other."
return self.start > other.start and self.end < other.end
def empty(self):
"@return: True iff self is empty."
return self.start == self.end
def singleton(self):
"@return: True iff self.end - self.start == 1."
return self.end - self.start == 1
def separation(self, other):
"@return: The distance between self and other."
|
if self > other:
other, self = self, other
i
|
f self.end > other.start:
return 0
else:
return other.start - self.end
|
DLR-SC/DataFinder
|
test/unittest/datafinder_test/gui/user/dialogs/datastore_dialog/__init__.py
|
Python
|
bsd-3-clause
| 1,798
| 0.017798
|
#
# $Filename$$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binar
|
y forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must reta
|
in the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Provides tests for different data store dialog.
"""
__version__ = "$Revision-Id:$"
|
eagle-knights-ITAM/ssl-refbox-ros
|
scripts/refbox.py
|
Python
|
gpl-2.0
| 1,934
| 0.023785
|
"""
Publishes the Referee Box's messages as a ROS topic named "refbox" with type "referee"
"""
from referee_pb2 import SSL_Referee
import rospy
# Substitute "ekbots" here with your ROS package name
from ekbots.msg import referee, team_info
from socket import socket, inet_aton, IPPROTO_IP, IP_ADD_MEMBERSHIP
from socket import AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, INADDR_ANY
import struct
pub = rospy.Publisher('refbox', referee)
rospy.init_node('refbox')
r = rospy.Rate(10)
# Setup socket
MCAST_GRP = "224.5.23.1"
MCAST_PORT = 10003
BUFFER_SIZE = 1024
sock = socket(AF_INET, SOCK_DGRAM)
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
mreq = struct.pack('=4sl', inet_aton(MCAST_GRP), INADDR_ANY) # pack MCAST_GRP correctly
sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq) # Request MCAST_GRP
sock.bind((MCAST_GRP, MCAST_PORT)) # Bind to all interfaces
while not rospy.is_shutdown():
# Receive the protobuff from the network
data, addr = sock.recvfrom(BUFFER_SIZE) # NOTE: This call is blocking
proto = SSL_Referee()
proto.ParseFromStrin
|
g(data)
# Instance the ROS msg types to fill them out
yellow, blue, trama = team_info(), team_info(), referee()
# Translate the team info
for team, buf in ((yellow, proto.yellow), (blue, proto.blue)):
team.name = buf.name
team.score = buf.score
team.red_cards = buf.red_cards
team.yellow_card_times = buf.yellow_card_times
team.yellow_cards = buf.yellow_cards
team.timeouts = buf.timeouts
team.timeout_time = buf.timeout_time
team.goalie = b
|
uf.goalie
trama.yellow = yellow
trama.blue = blue
# Translate the rest
trama.packet_timestamp = proto.packet_timestamp
trama.stage = proto.stage
trama.stage_time_left = proto.stage_time_left
trama.command = proto.command
trama.command_counter = proto.command_counter
trama.command_timestamp = proto.command_timestamp
pub.publish(trama)
r.sleep()
|
tchellomello/home-assistant
|
homeassistant/components/ambient_station/__init__.py
|
Python
|
apache-2.0
| 20,729
| 0.001495
|
"""Support for Ambient Weather Station Service."""
import asyncio
import logging
from aioambient import Client
from aioambient.errors import WebsocketError
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASS_CONNECTIVITY
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
AREA_SQUARE_METERS,
ATTR_LOCATION,
ATTR_NAME,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_MILLION,
CONF_API_KEY,
DEGREE,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
POWER_WATT,
SPEED_MILES_PER_HOUR,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
from .const import (
ATTR_LAST_DATA,
ATTR_MONITORED_CONDITIONS,
CONF_APP_KEY,
DATA_CLIENT,
DOMAIN,
TYPE_BINARY_SENSOR,
TYPE_SENSOR,
)
_LOGGER = logging.getLogger(__name__)
DATA_CONFIG = "config"
DEFAULT_SOCKET_MIN_RETRY = 15
TYPE_24HOURRAININ = "24hourrainin"
TYPE_BAROMABSIN = "baromabsin"
TYPE_BAROMRELIN = "baromrelin"
TYPE_BATT1 = "batt1"
TYPE_BATT10
|
= "batt10"
TYPE_BATT2 = "batt2"
TYPE_BATT3 = "batt3"
TYPE_BATT4 = "batt4"
TYPE_BATT5 = "batt5"
TYPE_BATT6 = "batt6"
TYPE_BATT7 = "batt7"
TYPE_BATT8 = "batt8"
TYPE_BATT9 = "batt9"
TYPE_BATTOUT = "battout"
TYPE_CO2 = "co2"
TYPE_DAILYRAININ = "dailyrainin"
TYPE_DEWPOINT = "dewPoint"
TYPE_EVENTRAININ = "eventrain
|
in"
TYPE_FEELSLIKE = "feelsLike"
TYPE_HOURLYRAININ = "hourlyrainin"
TYPE_HUMIDITY = "humidity"
TYPE_HUMIDITY1 = "humidity1"
TYPE_HUMIDITY10 = "humidity10"
TYPE_HUMIDITY2 = "humidity2"
TYPE_HUMIDITY3 = "humidity3"
TYPE_HUMIDITY4 = "humidity4"
TYPE_HUMIDITY5 = "humidity5"
TYPE_HUMIDITY6 = "humidity6"
TYPE_HUMIDITY7 = "humidity7"
TYPE_HUMIDITY8 = "humidity8"
TYPE_HUMIDITY9 = "humidity9"
TYPE_HUMIDITYIN = "humidityin"
TYPE_LASTRAIN = "lastRain"
TYPE_MAXDAILYGUST = "maxdailygust"
TYPE_MONTHLYRAININ = "monthlyrainin"
TYPE_RELAY1 = "relay1"
TYPE_RELAY10 = "relay10"
TYPE_RELAY2 = "relay2"
TYPE_RELAY3 = "relay3"
TYPE_RELAY4 = "relay4"
TYPE_RELAY5 = "relay5"
TYPE_RELAY6 = "relay6"
TYPE_RELAY7 = "relay7"
TYPE_RELAY8 = "relay8"
TYPE_RELAY9 = "relay9"
TYPE_SOILHUM1 = "soilhum1"
TYPE_SOILHUM10 = "soilhum10"
TYPE_SOILHUM2 = "soilhum2"
TYPE_SOILHUM3 = "soilhum3"
TYPE_SOILHUM4 = "soilhum4"
TYPE_SOILHUM5 = "soilhum5"
TYPE_SOILHUM6 = "soilhum6"
TYPE_SOILHUM7 = "soilhum7"
TYPE_SOILHUM8 = "soilhum8"
TYPE_SOILHUM9 = "soilhum9"
TYPE_SOILTEMP1F = "soiltemp1f"
TYPE_SOILTEMP10F = "soiltemp10f"
TYPE_SOILTEMP2F = "soiltemp2f"
TYPE_SOILTEMP3F = "soiltemp3f"
TYPE_SOILTEMP4F = "soiltemp4f"
TYPE_SOILTEMP5F = "soiltemp5f"
TYPE_SOILTEMP6F = "soiltemp6f"
TYPE_SOILTEMP7F = "soiltemp7f"
TYPE_SOILTEMP8F = "soiltemp8f"
TYPE_SOILTEMP9F = "soiltemp9f"
TYPE_SOLARRADIATION = "solarradiation"
TYPE_SOLARRADIATION_LX = "solarradiation_lx"
TYPE_TEMP10F = "temp10f"
TYPE_TEMP1F = "temp1f"
TYPE_TEMP2F = "temp2f"
TYPE_TEMP3F = "temp3f"
TYPE_TEMP4F = "temp4f"
TYPE_TEMP5F = "temp5f"
TYPE_TEMP6F = "temp6f"
TYPE_TEMP7F = "temp7f"
TYPE_TEMP8F = "temp8f"
TYPE_TEMP9F = "temp9f"
TYPE_TEMPF = "tempf"
TYPE_TEMPINF = "tempinf"
TYPE_TOTALRAININ = "totalrainin"
TYPE_UV = "uv"
TYPE_PM25 = "pm25"
TYPE_PM25_24H = "pm25_24h"
TYPE_WEEKLYRAININ = "weeklyrainin"
TYPE_WINDDIR = "winddir"
TYPE_WINDDIR_AVG10M = "winddir_avg10m"
TYPE_WINDDIR_AVG2M = "winddir_avg2m"
TYPE_WINDGUSTDIR = "windgustdir"
TYPE_WINDGUSTMPH = "windgustmph"
TYPE_WINDSPDMPH_AVG10M = "windspdmph_avg10m"
TYPE_WINDSPDMPH_AVG2M = "windspdmph_avg2m"
TYPE_WINDSPEEDMPH = "windspeedmph"
TYPE_YEARLYRAININ = "yearlyrainin"
SENSOR_TYPES = {
TYPE_24HOURRAININ: ("24 Hr Rain", "in", TYPE_SENSOR, None),
TYPE_BAROMABSIN: ("Abs Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BAROMRELIN: ("Rel Pressure", "inHg", TYPE_SENSOR, "pressure"),
TYPE_BATT10: ("Battery 10", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT1: ("Battery 1", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT2: ("Battery 2", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT3: ("Battery 3", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT4: ("Battery 4", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT5: ("Battery 5", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT6: ("Battery 6", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT7: ("Battery 7", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT8: ("Battery 8", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATT9: ("Battery 9", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_BATTOUT: ("Battery", None, TYPE_BINARY_SENSOR, "battery"),
TYPE_CO2: ("co2", CONCENTRATION_PARTS_PER_MILLION, TYPE_SENSOR, None),
TYPE_DAILYRAININ: ("Daily Rain", "in", TYPE_SENSOR, None),
TYPE_DEWPOINT: ("Dew Point", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_EVENTRAININ: ("Event Rain", "in", TYPE_SENSOR, None),
TYPE_FEELSLIKE: ("Feels Like", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_HOURLYRAININ: ("Hourly Rain Rate", "in/hr", TYPE_SENSOR, None),
TYPE_HUMIDITY10: ("Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY1: ("Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY2: ("Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY3: ("Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY4: ("Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY5: ("Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY6: ("Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY7: ("Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY8: ("Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY9: ("Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITY: ("Humidity", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_HUMIDITYIN: ("Humidity In", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_LASTRAIN: ("Last Rain", None, TYPE_SENSOR, "timestamp"),
TYPE_MAXDAILYGUST: ("Max Gust", SPEED_MILES_PER_HOUR, TYPE_SENSOR, None),
TYPE_MONTHLYRAININ: ("Monthly Rain", "in", TYPE_SENSOR, None),
TYPE_RELAY10: ("Relay 10", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY1: ("Relay 1", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY2: ("Relay 2", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY3: ("Relay 3", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY4: ("Relay 4", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY5: ("Relay 5", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY6: ("Relay 6", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY7: ("Relay 7", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY8: ("Relay 8", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_RELAY9: ("Relay 9", None, TYPE_BINARY_SENSOR, DEVICE_CLASS_CONNECTIVITY),
TYPE_SOILHUM10: ("Soil Humidity 10", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM1: ("Soil Humidity 1", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM2: ("Soil Humidity 2", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM3: ("Soil Humidity 3", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM4: ("Soil Humidity 4", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM5: ("Soil Humidity 5", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM6: ("Soil Humidity 6", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM7: ("Soil Humidity 7", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM8: ("Soil Humidity 8", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILHUM9: ("Soil Humidity 9", PERCENTAGE, TYPE_SENSOR, "humidity"),
TYPE_SOILTEMP10F: ("Soil Temp 10", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP1F: ("Soil Temp 1", TEMP_FAHRENHEIT, TYPE_SENSOR, "temperature"),
TYPE_SOILTEMP2F: ("Soil Temp 2", TEMP_FAHRENHEI
|
knadir/Flask-Images
|
flask_images/size.py
|
Python
|
bsd-3-clause
| 3,727
| 0.00322
|
from __future__ import division
from PIL import Image
from . import modes
from .transform import Transform
class ImageSize(object):
@property
def image(self):
if not self._image and self.path:
self._image = Image.open(self.path)
return self._image
def __init__(self, path=None, image=None, width=None, height=None,
enlarge=True, mode=None, transform=None, sharpen=None, _shortcut=False, **kw
):
# Inputs.
self.__dict__.update(kw)
self.path = path
self._image = image
self.req_width = width
self.req_height = height
self.enlarge = bool(enlarge)
self.mode = mode
self.transform = transform
self.sharpen = sharpen
self.image_width = self.image_height = None
# Results to be updated as appropriate.
self.needs_enlarge = None
self.width = width
self.height = height
self.op_width = None
self.op_height = None
if _shortcut and width and height and enlarge and mode in (modes.RESHAPE, modes.CROP, None):
return
# Source the original image dimensions.
if self.transform:
self.image_width, self.image_height = Transform(self.transform,
self.image.size if self.image else (width, height)
).size
else:
self.image_width, self.image_height = self.image.size
# Maintain aspect ratio and scale width.
if not self.height:
self.needs_enlarge = self.width > self.image_width
if not self.enlarge:
self.width = min(self.width, self.image_width)
self.height = self.image_height * self.width // self.image_width
return
# Maintain aspect ratio and scale height.
|
if not self.width:
self.needs_enlarge = self.height > self.image_height
if not self.enlarge:
self.height = min(self.height, self.image_height)
self.width = self.image_width * self.height // self.image_height
return
# Don't maintain aspect ra
|
tio; enlarging is sloppy here.
if self.mode in (modes.RESHAPE, None):
self.needs_enlarge = self.width > self.image_width or self.height > self.image_height
if not self.enlarge:
self.width = min(self.width, self.image_width)
self.height = min(self.height, self.image_height)
return
if self.mode not in (modes.FIT, modes.CROP, modes.PAD):
raise ValueError('unknown mode %r' % self.mode)
# This effectively gives us the dimensions of scaling to fit within or
# around the requested size. These are always scaled to fit.
fit, pre_crop = sorted([
(self.req_width, self.image_height * self.req_width // self.image_width),
(self.image_width * self.req_height // self.image_height, self.req_height)
])
self.op_width, self.op_height = fit if self.mode in (modes.FIT, modes.PAD) else pre_crop
self.needs_enlarge = self.op_width > self.image_width or self.op_height > self.image_height
if self.needs_enlarge and not self.enlarge:
self.op_width = min(self.op_width, self.image_width)
self.op_height = min(self.op_height, self.image_height)
if self.mode != modes.PAD:
self.width = min(self.width, self.image_width)
self.height = min(self.height, self.image_height)
return
if self.mode != modes.PAD:
self.width = min(self.op_width, self.width)
self.height = min(self.op_height, self.height)
|
nrempel/rucksack-api
|
app/web_components/models.py
|
Python
|
mit
| 1,563
| 0
|
# -*- coding: utf-8 -*-
from datetime import datetime
from app import db
from app.models import components_tags
from app.users.models import User
from app.tags.models import Tag
from app.util import unix_time
clas
|
s WebComponent(db.Model):
__tablename__ = 'web_component'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime)
name = db.Column(
db.String,
index=True,
unique=True)
description = db.Column(db.String)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
owner = db.relationship(
U
|
ser, backref=db.backref('web_components', lazy='dynamic'))
repository_url = db.Column(db.String(256))
tags = db.relationship(
Tag,
secondary=components_tags,
backref=db.backref('web_components', lazy='dynamic'))
def __init__(
self,
name,
description,
owner,
repository_url):
self.created = datetime.now()
self.name = name
self.description = description
self.owner = owner
self.repository_url = repository_url
def __iter__(self):
return {
'id': self.id,
'created': unix_time(self.created),
'name': self.name,
'description': self.description,
'owner': dict(self.owner),
'repository_url': self.repository_url,
'tags': [dict(tag) for tag in self.tags]
}.iteritems()
def __repr__(self):
return '<WebComponent:%s>' % self.name
|
TeamEOS/external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/inspector_timeline.py
|
Python
|
bsd-3-clause
| 3,318
| 0.005425
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import timeline_recorder
from telemetry.timeline import inspector_timeline_data
class TabBackendException(Exception):
"""An exception which indicates an error response from devtools inspector."""
pass
class InspectorTimeline(timeline_recorder.TimelineRecorder):
"""Implementation of dev tools timeline."""
class Recorder(object):
"""Utility class to Start and Stop recording timeline.
Example
|
usage:
with inspector_timeline.InspectorTimeline.Recorder(tab):
# Something to run while the timeline is recording.
This is an alternative to directly calling the Start and Stop methods below.
"""
def __init__(self, tab):
self._tab = tab
def __enter__(self):
self._tab.StartTimelineRecording()
def __exit__(self, *args):
self._tab.StopTimelineRecording()
def __init__(self, inspector_backend)
|
:
super(InspectorTimeline, self).__init__()
self._inspector_backend = inspector_backend
self._is_recording = False
@property
def is_timeline_recording_running(self):
return self._is_recording
def Start(self):
"""Starts recording."""
assert not self._is_recording, 'Start should only be called once.'
self._is_recording = True
self._inspector_backend.RegisterDomain(
'Timeline', self._OnNotification, self._OnClose)
# The 'bufferEvents' parameter below means that events should not be sent
# individually as messages, but instead all at once when a Timeline.stop
# request is sent.
request = {
'method': 'Timeline.start',
'params': {'bufferEvents': True},
}
self._SendSyncRequest(request)
def Stop(self):
"""Stops recording and returns timeline event data."""
if not self._is_recording:
return None
request = {'method': 'Timeline.stop'}
result = self._SendSyncRequest(request)
self._inspector_backend.UnregisterDomain('Timeline')
self._is_recording = False
raw_events = result['events']
return inspector_timeline_data.InspectorTimelineData(raw_events)
def _SendSyncRequest(self, request, timeout=60):
"""Sends a devtools remote debugging protocol request.
The types of request that are valid is determined by protocol.json:
https://src.chromium.org/viewvc/blink/trunk/Source/devtools/protocol.json
Args:
request: Request dict, may contain the keys 'method' and 'params'.
timeout: Number of seconds to wait for a response.
Returns:
The result given in the response message.
Raises:
TabBackendException: The response indicates an error occurred.
"""
response = self._inspector_backend.SyncRequest(request, timeout)
if 'error' in response:
raise TabBackendException(response['error']['message'])
return response['result']
def _OnNotification(self, msg):
"""Handler called when a message is received."""
# Since 'Timeline.start' was invoked with the 'bufferEvents' parameter,
# there will be no timeline notifications while recording.
pass
def _OnClose(self):
"""Handler called when a domain is unregistered."""
pass
|
raisfathin/chipsec
|
source/tool/chipsec/hal/spi_uefi.py
|
Python
|
gpl-2.0
| 26,618
| 0.018859
|
#!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#[email protected]
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2012 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
SPI UEFI Region parsing
usage:
>>> parse_uefi_region_from_file( filename )
"""
__version__ = '1.0'
import os
import fnmatch
import struct
import sys
import time
import collections
import hashlib
import re
import random
#import phex
from chipsec.helper.oshelper import helper
from chipsec.logger import *
from chipsec.file import *
from chipsec.cfg.common import *
from chipsec.hal.uefi_common import *
from chipsec.hal.uefi_platform import *
from chipsec.hal.uefi import identify_EFI_NVRAM
CMD_UEFI_FILE_REMOVE = 0
CMD_UEFI_FILE_INSERT_BEFORE = 1
CMD_UEFI_FILE_INSERT_AFTER = 2
CMD_UEFI_FILE_REPLACE = 3
def decompress_section_data( _uefi, section_dir_path, sec_fs_name, compressed_data, compression_type, remove_files=False ):
compressed_name = os.path.join(section_dir_path, "%s.gz" % sec_fs_name)
uncompressed_name = os.path.join(section_dir_path, sec_fs_name)
write_file(compressed_name, compressed_data)
uncompressed_image = _uefi.decompress_EFI_binary( compressed_name, uncompressed_name, compression_type )
if remove_files:
try:
os.remove(compressed_name)
os.remove(uncompressed_name)
except: pass
return uncompressed_image
def compress_image( _uefi, image, compression_type ):
precomress_file = 'uefi_file.raw.comp'
compressed_file = 'uefi_file.raw.comp.gz'
write_file(precomress_file, image)
compressed_image = _uefi.compress_EFI_binary(precomress_file, compressed_file, compression_type)
write_file(compressed_file, compressed_image)
os.remove(precomress_file)
os.remove(compressed_file)
return compressed_image
def modify_uefi_region(data, command, guid, uefi_file = ''):
RgLengthChange = 0
FvOffset, FsGuid, FvLength, FvAttributes, FvHeaderLength, FvChecksum, ExtHeaderOffset, FvImage, CalcSum = NextFwVolume(data)
while FvOffset is not None:
FvLengthChange = 0
polarity = bit_set(FvAttributes, EFI_FVB2_ERASE_POLARITY)
if ((FsGuid == EFI_FIRMWARE_FILE_SYSTEM2_GUID) or (FsGuid == EFI_FIRMWARE_FILE_SYSTEM_GUID)):
cur_offset, next_offset, Name, Type, Attributes, State, Checksum, Size, FileImage, HeaderSize, UD, fCalcSum = NextFwFile(FvImage, FvLength, FvHeaderLength, polarity)
while next_offset is not None:
if (Name == guid):
uefi_file_size = (len(uefi_file) + 7) & 0xFFFFFFF8
CurFileOffset = FvOffset + cur_offset + FvLengthChange
NxtFileOffset = FvOffset + next_offset + FvLengthChange
if command == CMD_UEFI_FILE_REMOVE:
FvLengthChange -= (next_offset - cur_offset)
logger().log( "Removing UEFI file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:CurFileOffset] + data[NxtFileOffset:]
elif command == CMD_UEFI_FILE_INSERT_BEFORE:
FvLengthChange += uefi_file_size
logger().log( "Inserting UEFI file before file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:CurFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[CurFileOffset:]
elif command == CMD_UEFI_FILE_INSERT_AFTER:
FvLengthChange += uefi_file_size
logger().log( "Inserting UEFI file after file with GUID=%s at offset=%08X, size change: %d bytes" % (Name, CurFileOffset, FvLengthChange) )
data = data[:NxtFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[NxtFileOffset:]
elif command == CMD_UEFI_FILE_REPLACE:
FvLengthChange += uefi_file_size - (next_offset - cur_offset)
logger().log( "Replacing UEFI file with GUID=%s at offset=%08X, new size: %d, old size: %d, size change: %d bytes" % (Name, CurFileOffset, len(uefi_file), Size, FvLengthChange) )
data = data[:CurFileOffset] + uefi_file.ljust(uefi_file_size, '\xFF') + data[NxtFileOffset:]
else:
raise Exception('Invalid command')
if next_offset - cur_offset >= 24:
FvEndOffset = FvOffset + next_offset + FvLengthChange
cur_offset, next_offset, Name, Type, A
|
ttributes, State, Checksum, Size, FileImage, HeaderSize, UD, fCalcSum = NextFwFile(FvImage, FvLength, next_offset, polarity)
if FvLengthChange >= 0:
data = data[:FvEndOffset] + data[FvEndOffset + FvLengthChange:]
else:
data = data[:FvEndOffset] + (abs(FvLengthChange) * '\xFF') + data[FvEndOffset:]
FvLengthChange = 0
#if FvLengthChange != 0:
# logger().log(
|
"Rebuilding Firmware Volume with GUID=%s at offset=%08X" % (FsGuid, FvOffset) )
# FvHeader = data[FvOffset: FvOffset + FvHeaderLength]
# FvHeader = FvHeader[:0x20] + struct.pack('<Q', FvLength) + FvHeader[0x28:]
# NewChecksum = FvChecksum16(FvHeader[:0x32] + '\x00\x00' + FvHeader[0x34:])
# FvHeader = FvHeader[:0x32] + struct.pack('<H', NewChecksum) + FvHeader[0x34:]
# data = data[:FvOffset] + FvHeader + data[FvOffset + FvHeaderLength:]
FvOffset, FsGuid, FvLength, FvAttributes, FvHeaderLength, FvChecksum, ExtHeaderOffset, FvImage, CalcSum = NextFwVolume(data, FvOffset + FvLength)
return data
DEF_INDENT = " "
class EFI_MODULE(object):
def __init__(self, Offset, Guid, HeaderSize, Attributes, Image):
self.Offset = Offset
self.Guid = Guid
self.HeaderSize = HeaderSize
self.Attributes = Attributes
self.Image = Image
self.clsname = "EFI module"
self.indent = ''
self.MD5 = ''
self.SHA1 = ''
self.SHA256 = ''
def __str__(self):
_ind = self.indent + DEF_INDENT
return "%sMD5 : %s\n%sSHA1 : %s\n%sSHA256: %s\n" % (_ind,self.MD5,_ind,self.SHA1,_ind,self.SHA256)
class EFI_FV(EFI_MODULE):
def __init__(self, Offset, Guid, Size, Attributes, HeaderSize, Checksum, ExtHeaderOffset, Image, CalcSum):
EFI_MODULE.__init__(self, Offset, Guid, HeaderSize, Attributes, Image)
self.clsname = "EFI firmware volume"
self.Size = Size
self.Checksum = Checksum
self.ExtHeaderOffset = ExtHeaderOffset
self.CalcSum = CalcSum
def __str__(self):
schecksum = ('%04Xh (%04Xh) *** checksum mismatch ***' % (self.Checksum,self.CalcSum)) if self.CalcSum != self.Checksum else ('%04Xh' % self.Checksum)
_s = "\n%s%s +%08Xh {%s}: Size %08Xh, Attr %08Xh, HdrSize %04Xh, ExtHdrOffset %08Xh, Chec
|
willu47/SALib
|
src/SALib/sample/latin.py
|
Python
|
mit
| 1,859
| 0
|
from __future__ import division
import numpy as np
from . import common_args
from ..util import scale_samples, read_param_file
def sample(problem, N, seed=None):
"""Generate model inputs using Latin hypercube sampling (LHS).
Returns a NumPy matrix containing the model inputs generated by Latin
hypercube sampling. The resulting matrix contains N rows and D columns,
where D is the number of parameters.
Parameters
----------
problem : dict
The problem definition
N : int
The number of samples to generate
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
result = np.zeros([N, D])
temp = np.zeros([N])
d = 1.0 / N
for i in range
|
(D):
for j in range(N):
temp[j] = np.random.uniform(
low=j * d, high=(j + 1) * d, size=1)[0]
np.random.shuffle(temp)
for j in range(N):
result[j, i] = temp[j]
scale_samples(result, problem['bounds'])
return result
def cli_parse(parser):
"""Add method specific options t
|
o CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument('-n', '--samples', type=int, required=True,
help='Number of Samples')
return parser
def cli_action(args):
"""Run sampling method
Parameters
----------
args : argparse namespace
"""
problem = read_param_file(args.paramfile)
param_values = sample(problem, args.samples, seed=args.seed)
np.savetxt(args.output, param_values, delimiter=args.delimiter,
fmt='%.' + str(args.precision) + 'e')
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
|
gpittarelli/reddit-twitch-bot
|
lib/twitchtv.py
|
Python
|
mit
| 1,061
| 0
|
#!/usr/bin/env python
'''
Define functions to query the twitch.tv streaming
websites.
More info on the Twitch.tv REST api here:
https://github.com/justintv/twitch-api
'''
import sys
import logging
import requests
'''
Twitch.tv API stream listing r
|
equest. This API call takes a comma
separated list of channel names and returns an array of JSON objects,
one per channel that is currently streaming (so nothing is returned
for channels that were queried but aren't streaming)
'''
STREAM_URL = "https://api.twitch.tv/kraken/streams?channel=%s"
# Takes an array of channel names and returns the names from the array
# which are currently streaming
def fetch_streams(channel_names):
response = requests.get(STREAM_URL % (
|
",".join(channel_names)))
try:
message = response.json()["streams"]
except ValueError:
# JSON Decode failed
sys.exit("Invalid message from twitch.tv: %s" % (response.text))
if not isinstance(message, list):
sys.exit("Unexpected JSON from twitch.tv: %s" % (message))
return message
|
ahmedhosnycs/linkedin-search
|
fab.py
|
Python
|
gpl-2.0
| 575
| 0.003478
|
from __future__ import with_sta
|
tement
from fabric.contrib.console import confirm
from fabric.api import local
import fileinput
def server(port=""):
replace_for_local()
if port:
local("python manage.py runserver 0.0.0.0:" + port + " --settings=linkedin_search.local")
else:
local("python manage.py runserver 0.0.0.0:8888 --settings=linkedin_search.local")
def test():
local("python manage.py test --settings=linkedin_search.local")
def setting(setting=""):
local("python man
|
age.py " + setting + " --settings=linkedin_search.local")
|
AutorestCI/azure-sdk-for-python
|
azure-mgmt-web/azure/mgmt/web/models/backup_request.py
|
Python
|
mit
| 3,146
| 0.001589
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_only_resource import ProxyOnlyResource
class BackupRequest(ProxyOnlyResource):
"""Description of a backup which will be performed.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:ivar type: Resource type.
:vartype type: str
:param backup_request_name: Name of the backup.
:type backup_request_name: str
:param enabled: True if the backup schedule is enabled (must be included
in that case), false if the backup schedule should be disabled.
:type enabled: bool
:param storage_account_url: SAS URL to the container.
:type storage_account_url: str
:param backup_schedule: Schedule for the backup if it is executed
periodically.
:type backup_schedule: ~azure.mgmt.web.models.BackupSchedule
:param databases: Databases included in the backup.
:type databases: list[~azure.mgmt.web.models.DatabaseBackupSetting]
:param backup_request_type: Type of the backup. Possible values include:
'Default', 'Clone', 'Relocation', 'Snapshot'
:type backup_request_type: str or
~azure.mgmt.web.models.BackupRestoreOperationType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'backup_request_name': {'key': 'properties.name', 'type': 'str'},
'enabled': {'key': 'properties.enabled', 'type': 'bool'},
'storage_account_url': {'key': 'properties.storageAccountUrl', 'type': 'str'},
'backup_schedule': {'key': 'properties.backupSchedule', 'type': 'BackupSchedule'},
'databases': {'key': 'properties.databases', 'type': '[DatabaseBackupSetting]'},
'backup_request_type': {'key': 'properties.type', 'type': 'BackupRestoreOperatio
|
nType'},
}
def __init__(self, kind=None, backup_request_name=None, enabled=None, storage_account_url=None, backup_schedule=None, databases=None, backup_request_type=None):
super(BackupRequest, self).__init__(kind=kind)
self.backup_request_name = backup_request_name
self.enabled = enabled
self.storage_account_url = storage_account_url
self.backup_schedule = backup_schedule
self.databases = databases
|
self.backup_request_type = backup_request_type
|
perimosocordiae/pyhrm
|
extract_images.py
|
Python
|
mit
| 2,304
| 0.009549
|
from __future__ import print_function
import numpy as np
import turtle
from argparse import ArgumentParser
from base64 import decodestring
from zlib import de
|
compress
# Python 2/3 compat
try:
_input = raw_input
except NameError:
_input = input
'''TODO:
* add a matplotlib-based plotter
* add a path export function (for pasting back into HRM)
* path cleanup (length reduction)
* handwriting -> ascii conversion?
'''
def parse_images(filepath):
lines = open(filepath, 'rb')
while True:
# clever trick!
# when next() raises StopIteration, it stops this generator too
line = next(lines)
if not line.st
|
artswith(b'DEFINE '):
continue
_, kind, number = line.split()
kind = kind.decode('ascii')
number = int(number)
raw_data = b''
while not line.endswith(b';'):
line = next(lines).strip()
raw_data += line
# strip ; terminator
raw_data = raw_data[:-1]
# add base64 padding
if len(raw_data) % 4 != 0:
raw_data += b'=' * (2 - (len(raw_data) % 2))
# decode base64 -> decode zlib -> convert to byte array
data = np.fromstring(decompress(decodestring(raw_data)), dtype=np.uint8)
assert data.shape == (1028,)
path_len, = data[:4].view(np.uint32)
path = data[4:4+4*path_len].view(np.uint16).reshape((-1,2))
yield kind, number, path
def main():
ap = ArgumentParser()
ap.add_argument('--speed', type=int, default=10,
help='Number 1-10 for drawing speed, or 0 for no added delay')
ap.add_argument('program')
args = ap.parse_args()
for kind, number, path in parse_images(args.program):
title = '%s #%d, path length %d' % (kind, number, path.shape[0])
print(title)
if not path.size:
continue
pen_up = (path==0).all(axis=1)
# convert from path (0 to 65536) to turtle coords (0 to 655.36)
path = path / 100.
turtle.title(title)
turtle.speed(args.speed)
turtle.setworldcoordinates(0, 655.36, 655.36, 0)
turtle.pen(shown=False, pendown=False, pensize=10)
for i,pos in enumerate(path):
if pen_up[i]:
turtle.penup()
else:
turtle.setpos(pos)
turtle.pendown()
turtle.dot(size=10)
_input('Press enter to continue')
turtle.clear()
turtle.bye()
if __name__ == '__main__':
main()
|
carlos-ferras/Sequence-ToolKit
|
view/genrep/dialogs/ui_apply_this_to.py
|
Python
|
gpl-3.0
| 11,221
| 0.000891
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/krl1to5/Work/FULL/Sequence-ToolKit/2016/resources/ui/genrep/dialogs/apply_this_to.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_apply_to(object):
def setupUi(self, apply_to):
apply_to.setObjectName("apply_to")
apply_to.resize(558, 285)
self.verticalLayout = QtWidgets.QVBoxLayout(apply_to)
self.verticalLayout.setContentsMargins(-1, -1, -1, 0)
self.verticalLayout.setSpacing(15)
self.verticalLayout.setObjectName("verticalLayout")
self.form_area = QtWidgets.QFrame(apply_to)
self.form_area.setFrameShape(QtWidgets.QFrame.Box)
self.form_area.setFrameShadow(QtWidgets.QFrame.Raised)
self.form_area.setObjectName("form_area")
self.gridLayout = QtWidgets.QGridLayout(self.form_area)
self.gridLayout.setContentsMargins(8, 8, 8, 8)
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setVerticalSpacing(12)
self.gridLayout.setObjectName("gridLayout")
self.condition_label = QtWidgets.QLabel(self.form_area)
self.condition_label.setObjectName("condition_label")
self.gridLayout.addWidget(self.condition_label, 0, 1, 1, 1)
self.condition_4 = QtWidgets.QComboBox(self.form_area)
self.condition_4.setEnabled(False)
self.condition_4.setMinimumSize(QtCore.QSize(160, 28))
self.condition_4.setObjectName("condition_4")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.condition_4.addItem("")
self.gridLayout.addWidget(self.condition_4, 4, 1, 1, 1)
self.condition_2 = QtWidgets.QComboBox(self.form_area)
self.condition_2.setEnabled(False)
self.condition_2.setMinimumSize(QtCore.QSize(160, 28))
self.condition_2.setObjectName("condition_2")
self.condition_2.addItem("")
self.condition_2.addItem("")
self.condition_2.addItem("")
self.condition_2.addItem("")
|
self.gridLayout.addWidget(self.condition_2, 2, 1, 1, 1)
self.criterion_2 = QtWidgets.QComboBox(self.form_area)
self.criterion_2.setEnabled(False)
self.criterion_2.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_2.setObjectName("criterion_2")
self.criterion_2.addItem("")
|
self.criterion_2.addItem("")
self.criterion_2.addItem("")
self.criterion_2.addItem("")
self.gridLayout.addWidget(self.criterion_2, 2, 0, 1, 1)
self.value_1 = QtWidgets.QLineEdit(self.form_area)
self.value_1.setEnabled(False)
self.value_1.setMinimumSize(QtCore.QSize(160, 28))
self.value_1.setObjectName("value_1")
self.gridLayout.addWidget(self.value_1, 1, 2, 1, 1)
self.criterion_1 = QtWidgets.QComboBox(self.form_area)
self.criterion_1.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_1.setObjectName("criterion_1")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.criterion_1.addItem("")
self.gridLayout.addWidget(self.criterion_1, 1, 0, 1, 1)
self.value_2 = QtWidgets.QLineEdit(self.form_area)
self.value_2.setEnabled(False)
self.value_2.setMinimumSize(QtCore.QSize(160, 28))
self.value_2.setObjectName("value_2")
self.gridLayout.addWidget(self.value_2, 2, 2, 1, 1)
self.condition_3 = QtWidgets.QComboBox(self.form_area)
self.condition_3.setEnabled(False)
self.condition_3.setMinimumSize(QtCore.QSize(160, 28))
self.condition_3.setObjectName("condition_3")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.condition_3.addItem("")
self.gridLayout.addWidget(self.condition_3, 3, 1, 1, 1)
self.value_4 = QtWidgets.QLineEdit(self.form_area)
self.value_4.setEnabled(False)
self.value_4.setMinimumSize(QtCore.QSize(160, 28))
self.value_4.setObjectName("value_4")
self.gridLayout.addWidget(self.value_4, 4, 2, 1, 1)
self.criterion_4 = QtWidgets.QComboBox(self.form_area)
self.criterion_4.setEnabled(False)
self.criterion_4.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_4.setObjectName("criterion_4")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.criterion_4.addItem("")
self.gridLayout.addWidget(self.criterion_4, 4, 0, 1, 1)
self.value_label = QtWidgets.QLabel(self.form_area)
self.value_label.setObjectName("value_label")
self.gridLayout.addWidget(self.value_label, 0, 2, 1, 1)
self.criterion_label = QtWidgets.QLabel(self.form_area)
self.criterion_label.setObjectName("criterion_label")
self.gridLayout.addWidget(self.criterion_label, 0, 0, 1, 1)
self.criterion_3 = QtWidgets.QComboBox(self.form_area)
self.criterion_3.setEnabled(False)
self.criterion_3.setMinimumSize(QtCore.QSize(160, 28))
self.criterion_3.setObjectName("criterion_3")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.criterion_3.addItem("")
self.gridLayout.addWidget(self.criterion_3, 3, 0, 1, 1)
self.value_3 = QtWidgets.QLineEdit(self.form_area)
self.value_3.setEnabled(False)
self.value_3.setMinimumSize(QtCore.QSize(160, 28))
self.value_3.setObjectName("value_3")
self.gridLayout.addWidget(self.value_3, 3, 2, 1, 1)
self.condition_1 = QtWidgets.QComboBox(self.form_area)
self.condition_1.setEnabled(False)
self.condition_1.setMinimumSize(QtCore.QSize(160, 28))
self.condition_1.setObjectName("condition_1")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.condition_1.addItem("")
self.gridLayout.addWidget(self.condition_1, 1, 1, 1, 1)
self.verticalLayout.addWidget(self.form_area)
self.line = QtWidgets.QFrame(apply_to)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.buttons_area = QtWidgets.QHBoxLayout()
self.buttons_area.setSpacing(10)
self.buttons_area.setObjectName("buttons_area")
spacerItem = QtWidgets.QSpacerItem(0, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.buttons_area.addItem(spacerItem)
self.push_button_apply_to_all = QtWidgets.QPushButton(apply_to)
self.push_button_apply_to_all.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_apply_to_all.setObjectName("push_button_apply_to_all")
self.buttons_area.addWidget(self.push_button_apply_to_all)
self.push_button_accept = QtWidgets.QPushButton(apply_to)
self.push_button_accept.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_accept.setObjectName("push_button_accept")
self.buttons_area.addWidget(self.push_button_accept)
self.push_button_cancel = QtWidgets.QPushButton(apply_to)
self.push_button_cancel.setMinimumSize(QtCore.QSize(100, 32))
self.push_button_cancel.setObjectName("push_button_cancel")
self.buttons_area.addWidget(self.push_button_cancel)
self.verticalLayout.addLayout(self.buttons_area)
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.retranslateUi(apply_to)
QtCore.QMetaObject.connectSlotsByName(apply_to)
def retranslateUi(self, apply_to):
_translate = QtCore.QCoreApplication.translate
apply_to.setWindowTitle(_translate("apply_to", "Apply this to"))
self.condition_label.
|
nke001/attention-lvcsr
|
libs/Theano/theano/sandbox/gpuarray/opt_util.py
|
Python
|
mit
| 4,761
| 0
|
from functools import wraps
import numpy
from theano import scalar as scal, Constant
from theano.gof import local_optimizer
from theano.tensor import (DimShuffle, get_scalar_constant_value,
NotScalarConstantError)
from .basic_ops import GpuFromHost, HostFromGpu
from .elemwise import GpuDimShuffle, GpuElemwise
_one = scal.constant(numpy.asarray(1.0, dtype='float64'))
def grab_cpu_scalar(v, nd):
if v.owner is not None:
n = v.owner
if (isinstance(n.op, GpuDimShuffle) and
n.op.new_order == ('x',) * nd):
return grab_cpu_scalar(n.inputs[0])
elif (isinstance(n.op, DimShuffle) and
n.op.new_order == ('x',) * nd):
return grab_cpu_scalar(n.inputs[0])
elif isinstance(n.op, GpuFromHost):
return grab_cpu_scalar(n.inputs[0], nd=nd)
else:
return None
else:
if (isinstance(v, Constant) and
v.broadcastable == (True,) * nd):
return v.dimshuffle(())
def find_node(v, cls, ignore_clients=False):
# This digs through possibly redundant transfers to for the node
# that has the op class specified. If ignore_clients is False (the
# default) it will only dig through nodes that have a single
# client.
if v.owner is not None and (ignore_clients or len(v.clients) == 1):
if isinstance(v.owner.op, cls):
return v.owner
elif (isinstance(v.owner.op, GpuFromHost) and
v.owner.inputs[0].owner is not None and
(ignore_clients or len(v.owner.inputs[0].clients) == 1) and
isinstance(v.owner.inputs[0].owner.op, HostFromGpu)):
return find_node(v.owner.inputs[0].owner.inputs[0], cls)
else:
return None
def is_equal(var, val):
# Returns True if var is always equal to val (python value), False
# otherwise (including if var is not constant)
try:
v = get_scalar_constant_value(var)
return v == val
except NotScalarConstantError:
return False
def alpha_merge(cls, alpha_in, beta_in, nd):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.mul and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
if targ is None:
targ = find_node(node.inputs[1], cls)
lr = grab_
|
cpu_scalar(node.inputs[0], nd=nd)
else:
lr = grab_cpu_scalar(node.inputs[1], nd=nd)
if lr is None or targ
|
is None:
return None
inputs = list(targ.inputs)
try:
c = get_scalar_constant_value(lr)
if c == 0:
inputs[alpha_in] = lr
inputs[beta_in] = lr
elif c == 1:
inputs[alpha_in] = targ.inputs[alpha_in]
inputs[beta_in] = targ.inputs[beta_in]
else:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
except NotScalarConstantError:
inputs[alpha_in] = lr * targ.inputs[alpha_in]
inputs[beta_in] = lr * targ.inputs[beta_in]
return maker(targ, *inputs)
return opt
return wrapper
def output_merge(cls, alpha_in, beta_in, out_in, nd):
def wrapper(maker):
@local_optimizer([GpuElemwise])
@wraps(maker)
def opt(node):
if (isinstance(node.op, GpuElemwise) and
node.op.scalar_op == scal.add and
node.nin == 2):
targ = find_node(node.inputs[0], cls)
W = node.inputs[1]
if targ is None:
targ = find_node(node.inputs[1], cls)
W = node.inputs[0]
if targ is None:
return None
if not is_equal(targ.inputs[beta_in], 0.0):
# other cases are too complex for now
return None
if W.broadcastable != targ.inputs[out_in].broadcastable:
# Would need to explicitly tile the output to fill
# the full shape here. Disable for now.
return None
inputs = list(targ.inputs)
inputs[out_in] = W
inputs[beta_in] = _one.clone()
return maker(targ, *inputs)
return opt
return wrapper
|
hassaanm/stock-trading
|
src/pybrain/datasets/dataset.py
|
Python
|
apache-2.0
| 13,198
| 0.001743
|
from __future__ import with_statement
__author__ = 'Thomas Rueckstiess, [email protected]'
import random
import pickle
from itertools import chain
from scipy import zeros, resize, ravel, asarray
import scipy
from pybrain.utilities import Serializable
class OutOfSyncError(Exception): pass
class VectorFormatError(Exception): pass
class NoLinkedFieldsError(Exception): pass
class DataSet(Serializable):
"""DataSet is a general base class for other data set classes
(e.g. SupervisedDataSet, SequentialDataSet, ...). It consists of several
fields. A field is a NumPy array with a label (a string) attached to it.
Fields can be linked together which means they must have the same length."""
def __init__(self):
self.data = {}
self.endmarker = {}
self.link = []
self.index = 0
# row vectors returned by getLinked can have different formats:
# '1d' example: array([1, 2, 3])
# '2d' example: array([[1, 2, 3]])
# 'list' example: [1, 2, 3]
self.vectorformat = 'none'
def __str__(self):
"""Return a string representation of a dataset."""
s = ""
for key in self.data:
s = s + key + ": dim" + str(self.data[key].shape) + "\n" + str(self.data[key][:self.endmarker[key]]) + "\n\n"
return s
def __getitem__(self, field):
"""Return the given field."""
return self.getField(field)
def __iter__(self):
self.reset()
while not self.endOfData():
yield self.getLinked()
def getVectorFormat(self):
"""Returns the current vector format."""
return self.__vectorformat
def setVectorFormat(self, vf):
"""Determine which format to use for returning vectors. Use the property vectorformat.
:key type: possible types are '1d', '2d', 'list'
'1d' - example: array([1,2,3])
'2d' - example: array([[1,2,3]])
'list' - example: [1,2,3]
'none' - no conversion
"""
switch = {
'1d': self._convertArray1d,
'2d': self._convertArray2d,
'list': self._convertList,
'none': lambda(x):x
}
try:
self._convert = switch[vf]
self.__vectorformat = vf
except KeyError:
raise VectorFormatError("vector format must be one of '1d', '2d', 'list'. given: %s" % vf)
vectorformat = property(getVectorFormat, setVectorFormat, None, "vectorformat can be '1d', '2d' or 'list'")
def _convertList(self, vector):
"""Converts the incoming vector to a python list."""
return ravel(vector).tolist()
def _convertArray1d(self, vector):
"""Converts the incoming vector to a 1d vector with shape (x,) where x
is the number of elements."""
return ravel(vector)
def _convertArray2d(self, vector, column=False):
"""Converts the incoming `vector` to a 2d vector with shape (1,x), or
(x,1) if `column` is set, where x is the number of elements."""
a = asarray(vector)
sh = a.shape
# also reshape scalar values to 2d-index
if len(sh) == 0:
sh = (1,)
if len(sh) == 1:
# use reshape to add extra dimension
if column:
return a.reshape((sh[0], 1))
else:
return a.reshape((1, sh[0]))
else:
# vector is not 1d, return a without change
return a
def addField(self, label, dim):
"""Add a field to the dataset.
A field consists of a string `label` and a numpy ndarray of dimension
`dim`."""
self.data[label] = zeros((0, dim), float)
self.endmarker[label] = 0
def setField(self, label, arr):
"""Set the given array `arr` as the new array of field `label`,"""
as_arr = asarray(arr)
self.data[label] = as_arr
self.endmarker[label] = as_arr.shape[0]
def linkFields(self, linklist):
"""Link the length of several fields given by the list of strings
`linklist`."""
length = self[linklist[0]].shape[0]
for l in linklist:
if self[l].shape[0] != length:
raise OutOfSyncError
self.link = linklist
def unlinkFields(self, unlinklist=None):
"""Remove fields from the link list or clears link given by the list of
string `linklist`.
This method has no effect if fields are not linked."""
link = self.link
if unlinklist is not None:
for l in unlinklist:
if l in self.link:
link.remove(l)
self.link = link
else:
self.link = []
def getDimension(self, label):
"""Return the dimension/number of columns for the field given by
`label`."""
try:
dim = self.data[label].shape[1]
except KeyError:
raise KeyError('dataset field %s not found.' % label)
return dim
def __len__(self):
"""Return the length of the linked data fields. If no linked fields exist,
return the length of the longest field."""
return self.getLength()
def getLength(self):
"""Return the length of the linked data fields. If no linked fields exist,
return the length of the longest field."""
if self.link == []:
try:
length = self.endmarker[max(self.endmarker)]
except ValueError:
return 0
return length
else:
# all linked fields have equal length. return the length of the first.
l = self.link[0]
return self.endmarker[l]
def _resize(self, label=None):
if label:
label = [label]
elif self.link:
label = self.link
else:
label = self.data
for l in label:
self.data[l] = self._resizeArray(self.data[l])
def _resizeArray(self, a):
"""Increase the buffer size. It should always be one longer than the
current sequence length and double on every growth step."""
shape = list(a.shape)
shape[0] = (shape[0] + 1) * 2
return resize(a, shape)
def _appendUnlinked(self, label, row):
"""Append `row` to the field array with the given `label`.
Do not call this function from outside, use ,append() instead.
Automatically casts vector to a 2d (or higher) shape."""
if self.data[label].shape[0] <= self.endmarker[label]:
self._resize(label)
self.data[label][self.endmarker[label], :] = row
self.endmarker[label] += 1
def append(self, label, row):
"""Append `row` to the array given by `label`.
If the field is linked with others, the function throws an
`OutOfSyncError` because all linked fields always have to have the same
length. If you want to add a row to all linked fields, use appendLink
instead."""
if label in self.link:
raise OutOfSyncError
self._appendUnlinked(label, row)
def ap
|
pendLinked(self, *args):
"""Add rows to all linked fields at once."""
assert len(args) == len(self.link)
for i, l in enumerate(self.link):
self._appendUnlinked(l, args[i])
def getLinked(self, index=None):
"""Access the dataset randomly or sequential.
|
If called with `index`, the appropriate line consisting of all linked
fields is returned and the internal marker is set to the next line.
Otherwise the marked line is returned and the marker is moved to the
next line."""
if self.link == []:
raise NoLinkedFieldsError('The dataset does not have any linked fields.')
if index == None:
# no index given, return the currently marked line and step marker one line forward
index = self.index
self.index += 1
else:
# return the indexed line and move marker to next line
self.index = in
|
shyba/cryptosync
|
cryptosync/resources/__init__.py
|
Python
|
agpl-3.0
| 300
| 0
|
from twisted.web.server import Site
from .root import RootResource
from .auth import Aut
|
hResource
def make_site(**kwargs
|
):
root_resource = RootResource()
auth_resource = AuthResource(kwargs['authenticator'])
root_resource.putChild('auth', auth_resource)
return Site(root_resource)
|
skearnes/pylearn2
|
pylearn2/datasets/svhn.py
|
Python
|
bsd-3-clause
| 18,086
| 0.007796
|
"""
.. todo::
WRITEME
"""
import os
import gc
import warnings
try:
import tables
except ImportError:
warnings.warn("Couldn't import tables, so far SVHN is "
"only supported with PyTables")
import numpy
from theano import config
from pylearn2.datasets import dense_design_matrix
from pylearn2.utils.serial import load
from pylearn2.utils.string_utils import preprocess
from pylearn2.utils.rng import make_np_rng
class SVHN(dense_design_matrix.DenseDesignMatrixPyTables):
"""
Only for faster access there is a copy of hdf5 file in PYLEARN2_DATA_PATH
but it mean to be only readable. If you wish to modify the data, you
should pass a local copy to the path argument.
Parameters
----------
which_set : WRITEME
path : WRITEME
center : WRITEME
scale : WRITEME
start : WRITEME
stop : WRITEME
axes : WRITEME
preprocessor : WRITEME
"""
mapper = {'train': 0, 'test': 1, 'extra': 2, 'train_all': 3,
'splitted_train': 4, 'valid': 5}
data_path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
def __init__(self, which_set, path = None, center = False, scale = False,
start = None, stop = None, axes = ('b', 0, 1, 'c'),
preprocessor = None):
assert which_set in self.mapper.keys()
self.__dict__.update(locals())
del self.self
if path is None:
path = '${PYLEARN2_DATA_PATH}/SVHN/format2/'
mode = 'r'
else:
mode = 'r+'
warnings.warn("Because path is not same as PYLEARN2_DATA_
|
PATH "
"be aware that data might have been "
"modified or pre-processed
|
.")
if mode == 'r' and (scale or center or (start != None) or
(stop != None)):
raise ValueError("Only for speed there is a copy of hdf5 " +\
"file in PYLEARN2_DATA_PATH but it meant to be only " +\
"readable. If you wish to modify the data, you should " +\
"pass a local copy to the path argument.")
# load data
path = preprocess(path)
file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
if os.path.isfile(file_n):
make_new = False
else:
make_new = True
warnings.warn("Over riding existing file: {0}".format(file_n))
# if hdf5 file does not exist make them
if make_new:
self.filters = tables.Filters(complib='blosc', complevel=5)
self.make_data(which_set, path)
self.h5file = tables.openFile(file_n, mode = mode)
data = self.h5file.getNode('/', "Data")
if start != None or stop != None:
self.h5file, data = self.resize(self.h5file, start, stop)
# rescale or center if permitted
if center and scale:
data.X[:] -= 127.5
data.X[:] /= 127.5
elif center:
data.X[:] -= 127.5
elif scale:
data.X[:] /= 255.
view_converter = dense_design_matrix.DefaultViewConverter((32, 32, 3),
axes)
super(SVHN, self).__init__(X = data.X, y = data.y,
view_converter = view_converter)
if preprocessor:
if which_set in ['train', 'train_all', 'splitted_train']:
can_fit = True
preprocessor.apply(self, can_fit)
self.h5file.flush()
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return SVHN(which_set = 'test', path = self.path,
center = self.center, scale = self.scale,
start = self.start, stop = self.stop,
axes = self.axes, preprocessor = self.preprocessor)
def make_data(self, which_set, path, shuffle = True):
"""
.. todo::
WRITEME
"""
sizes = {'train': 73257, 'test': 26032, 'extra': 531131,
'train_all': 604388, 'valid': 6000, 'splitted_train' : 598388}
image_size = 32 * 32 * 3
h_file_n = "{0}_32x32.h5".format(os.path.join(path, "h5", which_set))
h5file, node = self.init_hdf5(h_file_n, ([sizes[which_set],
image_size], [sizes[which_set], 10]))
# For consistency between experiments better to make new random stream
rng = make_np_rng(None, 322, which_method="shuffle")
def design_matrix_view(data_x, data_y):
"""reshape data_x to deisng matrix view
and data_y to one_hot
"""
data_x = numpy.transpose(data_x, axes = [3, 2, 0, 1])
data_x = data_x.reshape((data_x.shape[0], 32 * 32 * 3))
# TODO assuming one_hot as default for now
one_hot = numpy.zeros((data_y.shape[0], 10), dtype = config.floatX)
for i in xrange(data_y.shape[0]):
one_hot[i, data_y[i] - 1] = 1.
return data_x, one_hot
def load_data(path):
"Loads data from mat files"
data = load(path)
data_x = numpy.cast[config.floatX](data['X'])
data_y = data['y']
del data
gc.collect()
return design_matrix_view(data_x, data_y)
def split_train_valid(path, num_valid_train = 400,
num_valid_extra = 200):
"""
Extract number of class balanced samples from train and extra
sets for validation, and regard the remaining as new train set.
Parameters
----------
num_valid_train : int, optional
Number of samples per class from train
num_valid_extra : int, optional
Number of samples per class from extra
"""
# load difficult train
data = load("{0}train_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_train])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = data['X'][:, :, :, train_index]
train_y = data['y'][train_index, :]
valid_x = data['X'][:, :, :, valid_index]
valid_y = data['y'][valid_index, :]
train_size = data['X'].shape[3]
assert train_x.shape[3] == train_size - num_valid_train * 10
assert train_y.shape[0] == train_size - num_valid_train * 10
assert valid_x.shape[3] == num_valid_train * 10
assert valid_y.shape[0] == num_valid_train * 10
del data
gc.collect()
# load extra train
data = load("{0}extra_32x32.mat".format(SVHN.data_path))
valid_index = []
for i in xrange(1, 11):
index = numpy.nonzero(data['y'] == i)[0]
index.flags.writeable = 1
rng.shuffle(index)
valid_index.append(index[:num_valid_extra])
valid_index = set(numpy.concatenate(valid_index))
train_index = set(numpy.arange(data['X'].shape[3])) - valid_index
valid_index = list(valid_index)
train_index = list(train_index)
train_x = numpy.concatenate((train_x,
data['X'][:, :, :, train_index]), axis = 3)
train_y = numpy.concatenate((train_y, data['y'][train_index, :]))
valid_x = numpy.concatenate((valid_x,
data['X'][:, :, :, valid_index]), axis = 3)
valid_y = numpy.concatenate((valid_y, data['y'][valid_index, :]))
extra_size = data['X'].shape[3]
sizes['valid'] = (num_valid_train +
|
peddie/conftron
|
settings.py
|
Python
|
gpl-2.0
| 8,250
| 0.00897
|
## This file is part of conftron.
##
## Copyright (C) 2011 Matt Peddie <[email protected]>
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA.
import genconfig, baseio
from settings_templates import *
class LCMSettingField(baseio.TagInheritance):
required_tags = ['default', 'step', 'min', 'max']
def __init__(self, hsh, parent):
self.__dict__.update(hsh)
self._inherit(parent)
if self.has_key('absmax'):
self.min = -float(self.absmax)
self.max = float(self.absmax)
self.parent = parent
self.parentname = parent.name
self._musthave(parent, parse_settings_noval)
self.classname = parent.classname
parent.die += self._filter()
def field_setting(self):
return lcm_settings_field_template_mm % self
def _filter(self):
die = 0
die += self._are_defaults_sane()
return die
def _are_defaults_sane(self):
## Default values outside the range given by the bounds
## don't make sense either.
die = 0
if (float(self['min']) > float(self['default'])
or float(self['max']) < float(self['default'])):
print parse_settings_badval % {"sp":'default',
"f":self['name'],
"s":self.parent['name'],
|
"max":self['max'],
"min":self['min'],
"val":self['default']}
die += 1
if float(self['step']) > (float(self['max']) - float(self['min'])):
print parse_settings_badval % {"sp":'default',
"f":self['name'],
"s":self.parent['name'],
"max":self['max'],
"min":self['min'],
"val":self['step']}
die += 1
return die
class LCMSetting(baseio.CHeader, baseio.LCMFile, baseio.CCode, baseio.TagInheritance, baseio.IncludePasting):
def __init__(self, s, parent):
self.__dict__.update(s.attrib)
self.classname = parent.name
self._inherit(parent)
self.lcm_folder = genconfig.lcm_folder
self.die = 0
self.make_fields(s.getchildren())
self.field_settings = "\n".join([f.field_setting() for f in self.fields])
def make_fields(self, fields):
flattened = self.insert_includes(fields, ['member'])
self.check_includes(flattened, ['member'])
self.fields = [LCMSettingField(dict(f.attrib, **{'varname':self.varname}), self) for f in flattened]
def to_settings_file(self):
basename = "%(classname)s_%(type)s_%(varname)s" % self
filename = genconfig.settings_folder + "/" + basename
def sf(cf):
cf.write("#include <lcm/lcm.h>\n" % self)
cf.write("#include <math.h>\n" % self)
cf.write("#include <%(classname)s_settings.h>\n" % self)
if self.has_key('channel'):
cf.write(lcm_settings_init_custom_chan_template % self)
else:
cf.write(lcm_settings_init_template % self)
cf.write(lcm_settings_func_template % self)
self.to_h(filename, sf)
def to_settings_nop(self):
filename = genconfig.stubs_folder + "/%(classname)s_%(type)s_%(varname)s_setting_stub" % self
def stub_f(cf):
cf.write("#include <lcm_settings_auto.h>\n\n")
cf.write(lcm_settings_init_nop_template % self)
cf.write(lcm_settings_set_nop_template % self)
self.to_c_no_h(filename, stub_f)
def to_settings_prototype(self, cf):
cf.write(lcm_settings_prototype % self)
class Settings(baseio.CHeader,
baseio.LCMFile,
baseio.CCode,
baseio.TagInheritance,
baseio.Searchable,
baseio.IncludePasting):
def __init__(self, name, children, class_structs, path, filename):
self.name = name
self.path = path
self.file = filename
self.classname = name
self._filter_settings(children)
self.class_struct_includes = self._class_struct_includes(class_structs)
def merge(self, other):
for k, v in other.__dict__.iteritems():
if not k in genconfig.reserved_tag_names:
try:
# Is it a method?
getattr(getattr(self, k), "__call__")
except AttributeError:
# Nope.
self.__dict__[k] = other.__dict__[k]
self.settings.extend(other.settings)
return self
def search(self, searchname):
return self._search(self.settings, searchname)
def codegen(self):
self.init_calls = "\n".join([lcm_settings_init_call_template % s for s in self.settings])
self.null_calls = "\n".join([lcm_settings_init_null_template % s for s in self.settings])
self.to_settings_h()
self.settings_nops()
def init_call(self):
return " %(classname)s_settings_init(provider); \\\n" % self
def check_call(self):
return " %(classname)s_settings_check(); \\\n" % self
def _filter_settings(self, structs):
die = 0
flattened = self.insert_includes(structs, ['struct'])
self.check_includes(flattened, ['struct'])
outstructs = [LCMSetting(s, self) for s in flattened]
die = sum([s.die for s in outstructs])
if die:
print "Lots of settings errors detected; cannot continue code generation."
sys.exit(1)
self.settings = outstructs
def settings_functions(self):
for s in self.settings:
s.to_settings_file()
def settings_prototypes(self, cf):
cf.write("/* Prototypes for all the functions defined in settings/ folder */\n")
for s in self.settings:
cf.write(lcm_settings_prototype % s)
cf.write(lcm_settings_init_prototype % s)
def settings_nops(self):
for s in self.settings:
s.to_settings_nop()
def _class_struct_includes(self, structs):
out = []
formatstr = "#include \"%(lcm_folder)s/%(classname)s_%(type)s.h\""
if (structs):
out = [formatstr % s for s in structs]
else:
## Orphaned settings module; include only types we know
## about
out = [formatstr % s for s in self.settings]
return "\n".join(out)
def settings_includes(self, cf):
cf.write(self.class_struct_includes)
def to_settings_periodic(self):
pass
def to_settings_c(self):
pass
def to_settings_h(self):
self.settings_functions()
def settings_f(cf):
cf.write("#include \"%(classname)s_types.h\"\n\n" % self)
cf.write("#include \"%(classname)s_telemetry.h\"\n\n" % self)
cf.write("#ifdef __cplusplus\n")
cf.write("extern \"C\"{\n")
cf.write("#endif\n\n")
self.settings_prototypes(cf)
cf.write("\n#ifdef __cplusplus\n")
cf.write("}\n")
cf.write("#endif\n")
# Make initialization macro
cf.write(lcm_settings_init_class_template % self)
cf.write(lcm_check_call_temp
|
|
CodeCatz/litterbox
|
ajda/complicajda.py
|
Python
|
mit
| 5,477
| 0.023553
|
# 1. del: funkcije
#gender: female = 2, male = 0
def calculate_score_for_gender(gender):
if gender == "male":
return 0
else: return 2
#age: 0-100 if age < 10 --> 0, 11 < age < 20 --> 5, 21 < age < 35 --> 2, 36 < age < 50 --> 4, 50+ --> 1
def calculate_score_for_age(age):
if (age > 11 and age <= 20) or (age > 36 and age <= 50):
return 5
elif age > 20 and age <= 35:
return 2
elif age < 10:
return 0
else:
return 1
#status: 0 = single, 1 = relationship, 2 = in open relationship, 3 = it's complicated, 4 = I'm a pizza, 5 = depends who's asking
def calculate_score_for_status(status):
if status == "single":
return 0
elif status == "in a relationship":
return 1
elif status == "in an open relationship":
return 2
elif status == "it's complicated":
return 3
elif status == "I'm a pizza":
return 0
else:
return 5
# ignorance: 0 = Problem is my challenge, 1 = Who gives a fuck, 2 = I'm an angel
def calculate_score_for_ignorance(ignorance):
if ignorance == "Ignorance is bliss":
return 0
elif ignorance == "not at all":
return 2
elif ignorance == "I'm an angel":
return 4
# money_have: -10000+ = 6, (-10000)-(-5000) = 5, -5000-0 = 4, 0-500 = 3, 500-3000 = 2, 3000-10000 = 1, 10000+ = 0
def calculate_score_for_money_have(money_have):
if money_have <= (-10000.0):
return 8.0
elif money_have > (-10000.0) and money_have <= (-5000.0):
return 5.0
elif money_have > (-5000.0) and money_have <= 0.0:
return 4.0
elif money_have > 0.0 and money_have <= 500.0:
return 3.0
elif money_have > 500.0 and money_have <= 3000.0:
return 2.0
else:
return 0.0
# ---ZAKAJ MI NE PREPOZNA POZITIVNIH FLOATING NUMBERS IN NOBENE NEGATIVE (INTEGER ALI FLOATING NEGATIVNE) KOT STEVILKO?
# -->PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
# money_want: 0 = 0, 0-1000 = 1, 1000-5000 = 3, 5000-10000 = 4, 10000+ = 5
def caluculate_score_for_money_want(money_want):
if money_want == 0:
return 0
elif money_want > 0.0 and money_want <= 1000.0:
return 1
elif money_want > 1000.0 and money_want <= 5000.0:
return 3
elif money_want > 5000.0 and money_want <= 10000.0:
return 4
else:
return 5
#real friends: 0 = 5, 1-3 = 1, 4-6 = 2, 7-9 = 3, 10+ = 4
def calculate_score_for_rl_friends(rl_friends):
if rl_friends == 0:
return 5
elif rl_friends >= 1 and rl_friends <= 3:
return 1
elif rl_friends >= 4 and rl_friends <= 6:
return 2
elif rl_friends >= 7 and rl_friends <= 9:
return 3
else:
return 4
#children: 0 = 1, 1-2 = 2, 3 = 3, 4 = 4, 5+ = 5
def calculate_score_for_children(children):
if children == 0:
return 1
elif children == 1 and children == 2:
return 2
elif children == 3:
return 3
elif children == 4:
return 4
else:
return 5
# 2. del: sestevek funkcij
def calculate_score(gender, age, status, ignorance, money_have, money_want, rl_friends, children):
result = calculate_score_for_gender(gender)
result += calculate_score_for_age(age)
result += calculate_score_for_status(status)
result += calculate_score_for_ignorance(ignorance)
result += calculate_score_for_money_have(money_have)
result += caluculate_score_for_money_want(money_want)
result += calculate_score_for_rl_friends(rl_friends)
result += calculate_score_for_children(children)
return result
# 3. del: ------------- output za userja
#gender
print "Are you male or female?"
gender = raw_input(">> ")
#note to self: "while" pomeni da cekira na loop, "if" cekira enkratno
while (gender != "male") and (gender != "female"):
gender = raw_input("Check your gender again: ")
#age
print "How old are you?"
age = raw_input(">> ")
while not age.isdigit():
age = raw_input("Admit it, you're old. Now write your real age: ")
#status
print "What is your marital status?"
status = raw_input(">> ")
while (status != "single") and (status != "in a relationship") and (status != "in an open relationship") and (status != "it's complicated") and (status != "I'm a pizza"):
status = raw_input("Yeah, right... Think again: ")
#ignorance
print "How ignorant are you?"
ignorance = raw_input(">> ")
while (ignorance != "problem is my challenge") and (ignorance != "who gives a fuck") and (ignorance != "I'm an angel"):
ignorance = raw_input("You can't be that ignorant. Try again: ")
#money_have
print "How much money have you got?"
money_have = float
|
(raw_input(">> "))
while not money_have:
money_have = float(raw_input("We aren't tax collectors, so be honest: "))
# PRED RAW INPUT MORAS DAT FLOAT, CE NI CELA STEVILKA IN ODSTRANI .ISDIGIT, KER .ISDIGIT JE LE ZA CELE STEVILKE!
#money_want
print "In addition to the mone
|
y you've got, how much money do you want to have?"
money_want = float(raw_input(">> "))
while money_want < 0: #---->zato, da je pozitivno stevilo!
money_want = float(raw_input("I didn't ask for apples and peaches. So, how much money do you want? "))
#rl_friends
print "How many real friends have you got?"
rl_friends = raw_input(">> ")
while not rl_friends.isdigit():
rl_friends = raw_input("Spock doesn't count. Think again - how many? ")
#children
print "How many children have you got?"
children = raw_input(">> ")
while not children.isdigit():
children = raw_input("No aliens, just humans, please: ")
# 4.del: sestevek
print "On a scale from 0 to 40, your life complication is : ", calculate_score(gender, int(age), status, ignorance, money_have, money_want, rl_friends, children)
|
anselmobd/mb2e
|
mb2e.py
|
Python
|
mit
| 8,171
| 0.000122
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import os
import sys
import re
import gettext
from oxy.arg import parse as argparse
from oxy.verbose import VerboseOutput
class Mbox():
NONE = 0
READ = 1
HEADERCANDIDATE = 2
COPY = 3
END = 4
vOut = None
state = NONE
nLine = 0
header = []
msgId = ''
line = ''
mailDir = ''
mbox = None
eml = None
def __init__(self):
self.parseArgs()
self.vOut = VerboseOutput(self.args.verbosity)
self.vOut.prnt('->... __init__', 4)
self.openMbox()
self.extract()
def __del__(self):
if self.vOut is not None:
self.vOut.prnt('->__del__', 4)
if self.mbox is not None:
self.mbox.close()
def openMbox(self):
self.vOut.prnt('->openMbox', 4)
try:
self.mbox = open(self.args.mboxFile, 'r', encoding="latin-1")
except Exception as e:
self.vOut.prnt('Can not open mbox file to read "{}"'.format(
self.args.mboxFile), 0)
sys.exit(21)
self.vOut.prnt('mbox file = {}'.format(self.args.mboxFile), 1)
self.vOut.prnt('mbox file opened', 1)
self.mailDir = '{}.__mb2e__'.format(self.args.mboxFile)
self.vOut.prnt('mailDir = {}'.format(self.mailDir), 1)
self.setState(self.READ)
def initEml(self):
self.vOut.prnt('->initEml', 4)
if not self.eml:
if not os.path.isdir(self.mailDir):
os.mkdir(self.mailDir)
if self.msgId:
name = self.msgId
else:
name = 'line_{}'.format(self.nLine)
mailName = '{}.eml'.format(name)
transCharacters = {'/': '_pathbar_',
'$': '_dolar_',
|
'-': '_'}
mailFileName = "".join(transCharacters[c]
if c in transCharacters
else c
for c in mailName
).rstrip()
mailFileName = os.path.join(self.mailDir, mailFileName)
self.vOut.prnt('eml file = {}'.format(mailFileName), 2)
try:
self.eml = open(mailFileName, 'w')
except Exceptio
|
n as e:
self.vOut.prnt('Can not open mail file to write "{}"'.format(
mailFileName), 0)
def endEml(self):
self.vOut.prnt('->endEml', 4)
self.eml.close()
self.eml = None
def cleanLine(self):
return self.line.strip('\n')
def extract(self):
self.vOut.prnt('->extract', 4)
for self.line in self.mbox:
self.nLine += 1
if self.args.lineLimit > 0 and self.nLine > self.args.lineLimit:
self.setState(self.END)
break
line = self.cleanLine()
self.vOut.prnt('extract nLine = {}; line = "{}"{}'.format(
self.nLine, line[:30],
'...' if line[30:] else ''), 4)
self.processLine()
def headerLine(self):
line = self.cleanLine()
if self.args.cleanMozilla and (
re.search('^X-Mozilla-Status2?: .*$', line) or
re.search('^X-Mozilla-Keys: .*$', line)):
return
self.header.append(self.line)
def processLine(self):
def isIniHeader():
line = self.cleanLine()
result = bool(
re.search('^From $', line)
or re.search('^From - ... ... .. ..:..:.. ....$', line)
)
self.vOut.prnt('isIniHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
def isInsideHeader():
line = self.cleanLine()
result = bool(
re.search('^[^ ]+: .*$', line)
or re.search('^\s+[^ ].*$', line)
)
self.vOut.prnt('isInsideHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
def ifGetMessageId():
line = self.cleanLine()
self.vOut.prnt('ifGetMessageId', 3)
reMsgId = re.search('^Message-I[dD]: <(.*)>', line)
if reMsgId is not None:
self.msgId = reMsgId.group(1)
self.vOut.prnt(
'ifGetMessageId line = "{}"; self.msgId = "{}"'
.format(line[:20], self.msgId), 3)
def isEndHeader():
line = self.cleanLine()
result = bool(re.search('^ *$', line))
self.vOut.prnt('isEndHeader line = "{}" = {}'.format(
line[:20], result), 3)
return result
self.vOut.prnt('->processLine', 4)
if self.state in (self.READ, self.COPY):
self.vOut.prnt('processLine state == READ or COPY', 4)
if isIniHeader():
self.vOut.prnt('processLine isIniHeader', 4)
self.setState(self.HEADERCANDIDATE)
# self.headerLine()
elif self.state == self.HEADERCANDIDATE:
self.vOut.prnt('processLine state == HEADERCANDIDATE', 4)
if isInsideHeader():
self.vOut.prnt('processLine isInsideHeader', 4)
ifGetMessageId()
self.headerLine()
else:
self.vOut.prnt('processLine not isInsideHeader', 4)
if isEndHeader() and len(self.header) > 1:
self.vOut.prnt('processLine isEndHeader and has header', 4)
self.setState(self.COPY)
else:
self.vOut.prnt(
'processLine not isEndHeader or hasn''t header', 4)
self.setState(self.READ)
if self.state == self.COPY:
self.vOut.prnt('processLine state == COPY', 4)
self.eml.write(self.line)
def setState(self, state):
if self.state == state:
return
self.state = state
self.vOut.prnt('>setState = {}'.format(self.state), 3)
if self.state == self.READ:
self.vOut.prnt('setState = READ', 4)
self.header = []
if self.state == self.HEADERCANDIDATE:
self.vOut.prnt('setState = HEADERCANDIDATE', 4)
self.msgId = None
if self.state in (self.COPY, self.END):
self.vOut.prnt('setState = COPY or END', 4)
if self.eml is not None:
self.vOut.prnt('setState - andEml', 4)
self.endEml()
self.vOut.prnt('self.eml = {}'.format(self.eml), 4)
if self.state == self.COPY:
self.vOut.prnt('setState = COPY', 4)
self.vOut.prnt('setState - initEml', 4)
self.initEml()
self.vOut.prnt('setState - for self.header', 4)
for headerLine in self.header:
self.eml.write(headerLine)
self.vOut.prnt('setState - empty self.header', 4)
self.header = []
def parseArgs(self):
parser = argparse.ArgumentParser(
description=_('Extract EML files from MBox to subdirectory\n'
'version 0.1.6 2017-06-28'),
epilog="(c) Anselmo Blanco Dominguez (Tussor & Oxigenai)",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"mboxFile",
help='name of the MBox file')
parser.add_argument(
"-c", "--cleanMozilla",
action="store_true",
help='clean Mozilla tags in EML')
parser.add_argument(
"-l", "--lineLimit",
type=int,
default=0,
help='number of lines of mboxFile to be processed (if > 0)')
parser.add_argument(
"-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
self.args = parser.parse_args()
if __name__ == '__main__':
mb2eGT = gettext.translation('mb2e', 'po', fallback=True)
mb2eGT.install()
mbox = Mbox()
|
SGover/monopoly
|
test1.py
|
Python
|
unlicense
| 84
| 0
|
fr
|
om gui import playerDialog
n
|
ame = "haha"
name = playerDialog().show()
print(name)
|
couchbase/couchbase-python-client
|
acouchbase/tests/cases/transcoder_t.py
|
Python
|
apache-2.0
| 31,512
| 0.001111
|
import json
import platform
from datetime import timedelta
from unittest import SkipTest
from nose.tools import nottest
from functools import wraps
from acouchbase.cluster import (Cluster, get_event_loop,
close_event_loop)
from couchbase_tests.async_base import AsyncioTestCase
from couchbase.exceptions import DocumentNotFoundException, ValueFormatException, DocumentLockedException
from couchbase.transcoder import (JSONTranscoder, RawJSONTranscoder,
RawStringTranscoder, RawBinaryTranscoder, LegacyTranscoder)
from couchbase.collection import (GetOptions, UpsertOptions, InsertOptions, ReplaceOptions,
GetAndTouchOptions, GetAndLockOptions, GetAnyReplicaOptions, GetAllReplicasOptions)
@nottest
def async_test(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
return self.loop.run_until_complete(func(self, *args, **kwargs))
return wrapper
class AcouchbaseDefaultTranscoderTestSuite(object):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"
|
a": "b"}}
KEY = "imakey"
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
@async_test
async def test_default_tc_json_upsert(self):
await self.collection.upsert(self.KEY, self.CONTENT)
resp = awa
|
it self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_insert(self):
await self.collection.insert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_replace(self):
await self.collection.upsert(self.KEY, self.CONTENT)
new_content = self.CONTENT
new_content["some"] = "new content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, content)
@async_test
async def test_default_tc_binary_replace(self):
content = "Lets to a str first"
await self.collection.upsert(self.KEY, content)
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, new_content)
class AcouchbaseDefaultTranscoderTests(
AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster)
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).tearDownClass()
close_event_loop()
def setUp(self):
super(AcouchbaseDefaultTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseDefaultJsonTranscoderTests(AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=JSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).tearDownClass()
def setUp(self):
super(AcouchbaseDefaultJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseRawJsonTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawJSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_json_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_replace(self):
await self.collection.upsert(self.KEY, "some string content")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@a
|
baspijhor/paparazzi
|
sw/ground_segment/python/dashboard/radiowatchframe.py
|
Python
|
gpl-2.0
| 2,290
| 0.00131
|
import wx
import sys
import os
import time
import threading
import math
import pynotify
import pygame.mixer
sys.path.append(os.getenv("PAPARAZZI_HOME") + "/sw/ext/pprzlink/lib/v1.0/python")
from pprzlink.ivy import IvyMessagesInterface
WIDTH = 150
HEIGHT = 40
UPDATE_INTERVAL = 250
class RadioWa
|
tchFrame(wx.Frame):
def message_recv(self, ac_id, msg):
if msg.name == "ROTORCRAFT_STATUS"
|
:
self.rc_status = int(msg['rc_status'])
if self.rc_status != 0 and not self.alertChannel.get_busy():
self.warn_timer = wx.CallLater(5, self.rclink_alert)
# else:
# self.notification.close()
def gui_update(self):
self.rc_statusText.SetLabel(["OK", "LOST", "REALLY LOST"][self.rc_status])
self.update_timer.Restart(UPDATE_INTERVAL)
def rclink_alert(self):
self.alertChannel.queue(self.alertSound)
self.notification.show()
time.sleep(5)
def setFont(self, control):
font = control.GetFont()
size = font.GetPointSize()
font.SetPointSize(size * 1.4)
control.SetFont(font)
def __init__(self):
wx.Frame.__init__(self, id=-1, parent=None, name=u'RCWatchFrame',
size=wx.Size(WIDTH, HEIGHT), title=u'RC Status')
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.rc_statusText = wx.StaticText(self, -1, "UNKWN")
pygame.mixer.init()
self.alertSound = pygame.mixer.Sound("crossing.wav")
self.alertChannel = pygame.mixer.Channel(False)
self.setFont(self.rc_statusText)
self.notification = pynotify.Notification("RC Link Warning!",
"RC Link status not OK!",
"dialog-warning")
self.rc_status = -1
pynotify.init("RC Status")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.rc_statusText, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.interface = IvyMessagesInterface("radiowatchframe")
self.interface.subscribe(self.message_recv)
self.update_timer = wx.CallLater(UPDATE_INTERVAL, self.gui_update)
def OnClose(self, event):
self.interface.shutdown()
self.Destroy()
|
envhyf/wrftools
|
wrftools/__init__.py
|
Python
|
gpl-3.0
| 148
| 0.033784
|
#import wrftools
#from exceptions import ConfigError, Domain
|
Error, ConversionError
#import tools
#import io
#__all__ = ['wrftools', 't
|
ools', 'io']
|
pythongssapi/python-gssapi
|
gssapi/_utils.py
|
Python
|
isc
| 5,004
| 0
|
import sys
import types
import typing as t
import decorator as deco
from gssapi.raw.misc import GSSError
if t.TYPE_CHECKING:
from gssapi.sec_contexts import SecurityContext
def import_gssapi_extension(
name: str,
) -> t.Optional[types.ModuleType]:
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def inquire_property(
name: str,
doc: t.Optional[str] = None
) -> property:
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self: "SecurityContext") -> t.Any:
if not self._started:
msg = (f"Cannot read {name} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding() -> str:
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(
enc: str,
) -> None:
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(
d: t.Dict[t.Union[bytes, str], t.Union[bytes, str]],
) -> t.Dict[bytes, bytes]:
"""Encodes any relevant strings in a dict"""
def enc(x: t.Union[bytes, str]) -> bytes:
if isinstance(x, str):
return x.encode(_ENCODING)
else:
return x
return {enc(k): enc(v) for k, v in d.items()}
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Optional[bytes]:
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
defer_step_errors = getattr(self, '__DEFER_STEP_ERRORS__', False)
if e.token is not None and defer_step_errors:
self._last_err = e
# skip the "return func" line above in the traceback
tb = e.__traceback__.tb_next # type: ignore[union-attr]
self._last_err.__traceback__ = tb
return e.token
else:
raise
@deco.decorator
def check_last_err(
func: t.Callable,
self: "SecurityContext",
*args: t.Any,
**kwargs: t.Any,
) -> t.Any:
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self,
|
*args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(
cls,
name: str,
parents: t.Tuple[t.Type],
attrs: t.Dict[str, t.Any],
) -> "CheckLastError":
attrs['__DEFER_STEP_ERRORS__'] = Tr
|
ue
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
|
prologic/mio
|
mio/types/string.py
|
Python
|
mit
| 3,962
| 0.000252
|
from mio import runtime
from mio.utils import method
from mio.object import Object
from mio.lexer import encoding
from mio.core.message import Message
from mio.errors import AttributeError
class String(Object):
def __init__(self, value=u""):
super(String, self).__init__(value=value)
self.create_methods()
try:
self.parent = runtime.find("String")
except AttributeError:
self.parent = runtime.find("Object")
def __iter__(self):
for c in self.value:
yield self.clone(c)
def __add__(self, other):
return self.value + other
def __mul__(self, other):
return self.value * other
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __repr__(self):
return "u\"{0:s}\"".format(self.value)
def __str__(self):
return self.value.encode(encoding)
def __unicode__(self):
return self.value
@method()
def init(self, receiver, context, m, value=None):
receiver.value = value or u""
return receiver
# Special Metho
|
ds
@method("__getitem__")
def getItem(self, receiver, context, m, i):
i = int(i.eval(context))
return receiver.value[i]
@method("__len__")
def getLen(self, receiver, context, m):
return runtime.find("Number").clone(len(receiver.value))
# General Operations
@method("+")
def add(self, receiver, context, m, other):
return self.clone(receiver + str(other.eval(context)))
@method("*")
def mul(self, receiver, context,
|
m, other):
return self.clone(receiver * int(other.eval(context)))
@method()
def find(self, receiver, context, m, sub, start=None, end=None):
sub = str(sub.eval(context))
start = int(start.eval(context)) if start is not None else None
end = int(end.eval(context)) if end is not None else None
return runtime.find("Number").clone(receiver.value.find(sub, start, end))
@method()
def format(self, receiver, context, m, *args):
args = [str(arg.eval(context)) for arg in args]
return receiver.clone(receiver.value.format(*args))
@method()
def split(self, receiver, context, m, sep=None, maxsplit=-1):
sep = runtime.state.frommio(
sep.eval(context)) if sep is not None else sep
maxsplit = int(maxsplit.eval(context)) if maxsplit != -1 else maxsplit
xs = [runtime.types("String").clone(s)
for s in receiver.value.split(sep, maxsplit)]
return runtime.types("List").clone(xs)
@method()
def strip(self, receiver, context, m, chars=None):
chars = runtime.state.frommio(
chars.eval(context)) if chars is not None else chars
if chars is None:
value = receiver.value.strip()
else:
value = receiver.value.strip(chars)
return receiver.clone(value)
@method()
def join(self, receiver, context, m, *args):
if len(args) == 1 and isinstance(args[0], Message):
args = args[0].eval(context)
else:
args = [arg.eval(context) if isinstance(
arg, Message) else arg for arg in args]
return receiver.clone(receiver.value.join(map(str, args)))
@method()
def lower(self, receiver, context, m):
return self.clone(receiver.value.lower())
@method()
def upper(self, receiver, context, m):
return self.clone(receiver.value.upper())
@method()
def startswith(self, receiver, context, m, prefix, start=None, end=None):
prefix = str(prefix.eval(context))
start = int(start.eval(context)) if start is not None else None
end = int(end.eval(context)) if end is not None else None
truth = receiver.value.startswith(prefix, start, end)
return runtime.find("True") if truth else runtime.find("False")
|
stephenl6705/fluentPy
|
registration_param.py
|
Python
|
mit
| 451
| 0.017738
|
registry = set()
def register(active=True):
def decorate(func):
print('running register(active=%s)->decorate(%s)' % (active, func))
if active:
registry.add(func)
else:
registry.discard(func)
return func
return decorate
@r
|
egister(active=False)
def f1()
|
:
print('running f1()')
@register()
def f2():
print('running f2()')
def f3():
print('running f3()')
|
tuxite/ovh-dynhost-updater
|
updater.py
|
Python
|
apache-2.0
| 4,677
| 0.002138
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""OVH DynHost IP Updater.
Updates at least every 15 minutes the DynHost Record IP of the server.
Uses the OVH API.
Requires:
* ovh - https://github.com/ovh/python-ovh
* ipgetter - https://github.com/phoemur/ipgetter
"""
import re
import time
import os.path
import ConfigParser
import logging
import ovh
import ipgetter
# Creation of the logger
logger = logging.getLogger('OVH DynHost Updater')
logger.setLevel(logging.INFO)
# create console handler and set level to info
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to
|
ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
# The paths in the OVH API (api.ovh.com)
UPDATE_PATH = "/domain/zone/{zonename}/dynHost/record/{id}
|
"
REFRESH_PATH = "/domain/zone/{zonename}/refresh"
# The file where the IP will be stored
# As the script doesn't run continuosly, we need to retreive the IP somewhere...
IP_FILE = "stored_ip.txt"
# The period between two forced updates of the IP on the OVH server.
# If you launch the script every minute, this reduces the number of calls to the
# OVH server.
MIN_UPDATE_TIME = 15 # In minutes [1-59]
# Regex for checking IP strings
check_re = re.compile(r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$')
def get_conf():
"""Get the configuration from the file `subdomain.conf`.
Mandatory sections/values:
- zone/name
- subdomain/id
- subdomain/name
"""
config = ConfigParser.SafeConfigParser()
config.read('subdomain.conf')
try:
zonename = config.get('zone', 'name')
dynhost_id = config.get('subdomain', 'id')
subdomain = config.get('subdomain', 'name')
except ConfigParser.Error, error:
logger.error("Configuration File Error: %s", error)
return None, None
path = {
'update': UPDATE_PATH.format(zonename=zonename, id=dynhost_id),
'refresh': REFRESH_PATH.format(zonename=zonename)
}
return path, subdomain
def get_stored_ip():
"""Return the IP stored in the file `IP_FILE` or False if not conform."""
try:
with open(IP_FILE, "r") as fd:
ip = fd.read()
fd.close()
result = check_re.match(ip)
if result:
return result.group(0)
# No match. Not blocking.
logger.warning("Bad stored IP. No regex match.")
return False
except IOError:
# No file found.
logger.warning("No such file: %s", IP_FILE)
return None
def store_ip(ip):
"""Write the IP into the file `IP_FILE`."""
try:
with open(IP_FILE, 'w') as fd:
fd.write(ip)
fd.close()
return True
except IOError:
# Not possible to write a file.
logger.error("Impossible to write %s", os.path.abspath(IP_FILE))
return False
def get_dynhost_ip():
"""Get the DynHost IP record from OVH server using the API."""
client = ovh.Client()
dynhost_current = client.get(UPDATE_PATH)
if 'ip' in dynhost_current:
return dynhost_current['ip']
else:
logger.warning("No IP returned by OVH...")
return False
def set_dynhost_ip(ip):
"""Set the IP using the OVH API."""
# Get the conf
path, subdomain = get_conf()
if not path or not subdomain:
logger.error("No path or subdomain!")
return False
params = {"ip": ip, "subDomain": subdomain}
client = ovh.Client()
try:
client.put(path['update'], **params)
client.post(path['refresh'])
except ovh.exceptions.NotGrantedCall, error:
logger.error("OVH Not Granted Call: %s", error)
return False
return True
def compare():
"""Compare the current IP and the stored IP.
Update the DynHost IP if different.
"""
stored_ip = get_stored_ip()
logger.info("Stored IP: %s", stored_ip)
current_ip = ipgetter.myip()
logger.info("Current IP: %s", current_ip)
# Check if there is no difference between stored IP and current IP
if not stored_ip or (stored_ip != current_ip):
logger.info("DynHost IP updated! [New IP]")
dynhost_ip = set_dynhost_ip(current_ip)
if dynhost_ip:
store_ip(current_ip)
else:
# This will force update next call
store_ip('Error')
# Set each 15 minutes the Dynhost IP
if (time.gmtime().tm_min % MIN_UPDATE_TIME) == 0:
logger.info("DynHost IP updated! [15 min]")
set_dynhost_ip(current_ip)
if __name__ == "__main__":
compare()
|
uppsaladatavetare/foobar-api
|
src/wallet/migrations/0010_auto_20170218_2322.py
|
Python
|
mit
| 640
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-18 23:22
from __future__ import unicode_literals
from django.db import migrations
import enumfields.fields
import wallet.enums
import enum
class TrxType(enum.Enum):
FINALIZED = 0
|
PENDING = 1
CANCELLATION = 2
class Migration(migrations.Migration):
dependencies = [
('wallet', '000
|
9_remove_wallettransaction_trx_status'),
]
operations = [
migrations.AlterField(
model_name='wallettransaction',
name='trx_type',
field=enumfields.fields.EnumIntegerField(default=0, enum=TrxType),
),
]
|
yl565/statsmodels
|
statsmodels/tsa/statespace/tests/test_models.py
|
Python
|
bsd-3-clause
| 6,374
| 0.000157
|
"""
Tests for miscellaneous models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import mlemodel
from statsmodels import datasets
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from .results import results_sarimax
current_path = os.path.dirname(os.path.abspath(__file__))
class Intercepts(mlemodel.MLEModel):
"""
Test class for observation and state intercepts (which usually don't
get tested in other models).
"""
def __init__(self, endog, **kwargs):
k_states = 3
k_posdef = 3
super(Intercepts, self).__init__(
endog, k_states=k_states, k_posdef=k_posdef, **kwargs)
self['design'] = np.eye(3)
self['obs_cov'] = np.eye(3)
self['transition'] = np.eye(3)
self['selection'] = np.eye(3)
self['state_cov'] = np.eye(3)
self.initialize_approximate_diffuse()
@property
def param_names(self):
return ['d.1', 'd.2', 'd.3', 'c.1', 'c.2', 'c.3']
@property
def start_params(self):
return np.arange(6)
def update(self, params, **kwargs):
params = super(Intercepts, self).update(params, **kwargs)
self['obs_intercept'] = params[:3]
self['state_intercept'] = params[3:]
class TestIntercepts(object):
@classmethod
def setup_class(cls, which='mixed', **kwargs):
# Results
path = current_path + os.sep + 'results/results_intercepts_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = dta[['realgdp', 'realcons', 'realinv']].copy()
obs = obs / obs.std()
if which == 'all':
obs.ix[:50, :] = np.nan
obs.ix[119:130, :] = np.nan
elif which == 'partial':
obs.ix[0:50, 0] = np.nan
obs.ix[119:130, 0] = np.nan
elif which == 'mixed':
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
mod = Intercepts(obs, **kwargs)
cls.params = np.arange(6) + 1
cls.model = mod
cls.results = mod.smooth(cls.params, return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0, i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:, :, i]))
cls.results.det_predicted_state_cov[0, i] = np.linalg.det(
cls.results.predicted_state_cov[:, :, i+1])
cls.results.det_smoothed_state_cov[0, i] = np.linalg.det(
cls.results.smoothed_state_cov[:, :, i])
cls.results.det_smoothed_state_disturbance_cov[0, i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:, :, i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), -7924.03893566)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']]
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
self.results.det_scaled_smoothed_estimator_cov.T,
self.desired[['detN']]
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']]
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:, 1:].T,
self.desired[['a1', 'a2', 'a3']]
)
|
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.
|
T,
self.desired[['detP']]
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']]
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']]
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1', 'muhat2', 'muhat3']]
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1', 'etahat2', 'etahat3']]
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']]
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1', 'epshat2', 'epshat3']], atol=1e-9
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1', 'Veps2', 'Veps3']]
)
|
GiorgioAresu/backuppc-pi-display
|
settings.py
|
Python
|
mit
| 1,278
| 0.000782
|
BACKUPPC_DIR = "/usr/share/backuppc"
TARGET_HOST = "192.168.1.65"
BACKUPPC_USER_UID = 110
BACKUPPC_USER_GID = 116
DEBUG = False
TRANSLATIONS = {
'Status_idle': 'inattivo',
'Status_backup_starting': 'avvio backup',
'Status_backup_in_progress': 'backup in esecuzione',
'Status_restore_starting': 'avvio ripristino',
'Status_restore_in_progress': 'restore in esecuzione',
'Status_link_pending': 'collegamenti pendenti',
'Status_link_running': 'collegamenti in esecuzione',
'Reason_backup_done': 'backup eseguito',
'Reason_restore_done': 'restore eseguito',
'Reason_archive_done': 'archivio eseguito',
'Reason_
|
nothing_to_do': 'nulla da fare',
'Reason_backup_failed': 'backup fallito',
'Reason_restore_failed': 'restore fallito',
'Reason_archive_failed': 'archivio fallito',
'Reason_no_ping': 'no ping',
'Reason_backup_canceled_by_user': 'backup annullato dall\'utente',
'Reason_restore_canceled_by_user': 'ripristino annullato dall\'utente',
'Reason_archive_canceled_by_user
|
': 'archivio annullato dall\'utente',
'Disabled_OnlyManualBackups': 'auto disabilitato',
'Disabled_AllBackupsDisabled': 'disabilitato',
'full': 'completo',
'incr': 'incrementale',
'backupType_partial': 'parziale',
}
|
pombredanne/lizard-progress
|
hdsr_controle/realtech_hdsr/data_loader.py
|
Python
|
gpl-3.0
| 11,413
| 0.013669
|
'''
Created on Aug 1, 2012
@author: ouayed
'''
import logging,os
import hdsr_controle.realtech_hdsr.models as model
from django.contrib.gis.utils import LayerMapping,LayerMapError
from django.db import transaction,IntegrityError
from django.utils.datetime_safe import datetime
from hdsr_controle.realtech_hdsr import export
from metfileparser import metfileparser
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
HYDROVAKKEN_TAG = "Hydrovakken_"
PROFIELEN_TAG = "DWP_"
METFILE_TAG = ".met"
SHAPEFILE_TAG =".shp"
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(name)-12s %(message)s',
datefmt='%m-%d %H:%M',
filename= os.path.join( ROOT_PATH ,'log.txt'),
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')
console.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
def save(obj):
try:
obj.save()
except IntegrityError:
transaction.rollback()
else:
transaction.commit()
class projectData:
def __init__(self,projectnaam,gebruiker=None,gebruikFoldersIndeling=True,datafolder=DATA_PATH):
self.name = projectnaam
self.profielenShapes =[]
self.hydrovakkenShapes = []
self.metBestanden = []
self.klant_id=0
self.datafolder=datafolder
if gebruikFoldersIndeling:
self.setDataFoldersIndeling(gebruiker)
def setDataFoldersIndeling(self,gebruiker):
try:
self.project,created = model.HdsrGebruikersProjecten.objects.get_or_create(gebruiker = gebruiker,project = os.path.basename(self.name))
if not created:
raise Exception( "Kan het project " + self.projectpath + " niet aanmaken")
for root, _ , filenames in os.walk(os.path.join(self.datafolder, self.name)):
for filename in filenames:
if filename.endswith(SHAPEFILE_TAG):
if filename.startswith(PROFIELEN_TAG):
self.profielenShapes.append(os.path.join(root, filename))
if filename.startswith(HYDROVAKKEN_TAG):
self.hydrovakkenShapes.append(os.path.join(root, filename))
if filename.endswith(METFILE_TAG):
self.metBestanden.append(os.path.join(root, filename))
except Exception,e:
self.load_log = logging.getLogger("projectData")
self.load_log.exception(e)
raise
class gebruikerData:
def __init__(self,gebruikernaam,gebruikFoldersIndeling=True,datafolder=DATA_PATH):
self.name = gebruikernaam
self.projecten=[]
self.datafolder=datafolder
if gebruikFoldersIndeling:
self.setDataFoldersIndeling()
def setDataFoldersIndeling(self):
try:
self.gebruiker,created = model.HdsrGebruikers.objects.get_or_create (gebruiker_ref = self.name)
if not created:
raise Exception("Kan de aannemer " + self.name + " niet aanmaken!")
for l in os.listdir(os.path.join(self.datafolder,self.name)):
if os.path.isdir(os.path.join(self.datafolder,os.path.join(self.name,l))):
self.projecten.append(projectData(gebruiker=self.gebruiker,projectnaam=os.path.join(self.name,l)))
except Exception,e:
self.load_log = logging.getLogger('gebruikerData')
self.load_log.exception("laden data voor aannemer " + self.name)
raise e
def loadGebruikersData(datafolder):
load_log = logging.getLogger('loadGebruikersData')
load_log.info("datapath: " + datafolder)
data =[]
try:
for f in os.listdir(datafolder):
if os.path.isdir(os.path.join(datafolder,f)):
g = gebruikerData(gebruikernaam=f)
data.append(g)
except Exception,e:
raise (e)
return data
def saveShapeFile(model,data,mapping,verbose,project,beginTime):
load_log = logging.getLogger('saveShapeFile')
try:
lm = LayerMapping(model, data, mapping,transform=False, encoding='iso-8859-1')
lm.save(strict=True, verbose=verbose)
model.objects.filter(datum_verw__gte = beginTime,project = None).update(project=project.project)
except LayerMapError,e:
load_log.error("Kolommen komen niet overeen met de shapebestand: " + os.path.basename(data) )
raise e
except Exception,e:
load_log.info("mappen datamodel met de shapebestand: "+ data)
load_log.exception(e)
raise e
def loadshapefiles(verbose,gebruikersdata):
load_log = logging.getLogger('loadshapefiles')
for gebruiker in gebruikersdata:
load_log.info("laden shape bestanden voor gebruiker: " + gebruiker.name)
for project in gebruiker.projecten:
load_log.info("laden shape bestanden voor project: " + project.name)
beginTime = datetime.now()
for shapefile in project.hydrovakkenShapes:
saveShapeFile(model.hdsrHydrovakken, shapefi
|
le, model.realtech_hdsr_Hydrovakken_mapping, verbose, project, beginTime)
for shapefile in project.profielenShapes:
saveShapeFile(model.HdsrDWPProfielen, shapefile, model.realtech_hdsr_DWPProfielen_mapping, verbose, project, beginTime)
def exportHydrovakken(gebruikersdata):
for gebruiker in gebruikersdata:
for project in gebruiker.projecten
|
:
for shapefile in project.hydrovakkenShapes:
export.ShpResponder(queryset=model.hdsrHydrovakken.objects.filter(project=project.project), file_name= shapefile,geo_field=None, proj_transform=None)
def loadmetfiles(gebruikersdata):
for gebruiker in gebruikersdata:
for project in gebruiker.projecten:
model.hdsrHydrovakken.objects.filter(project=project.project).update(slib_vb_cl=0,slib_od_cl=0)
for metfile in project.metBestanden:
metfileparser.parsMetfile(metfile,project.project)
def controleren(hydrovakkenshapefile,dwpshapefile,metfile,projectnaam="dummyProject",aannemer="dummyAannemer",verwijderOudeData=True):
"""
Input:
hydrovakkenshapefile = hydrovakken shape bestand zoals ./Hydrovakken_TestProject.shp
dwpshapefile = dwp profielen shape bestand zoals ./DWP_TestProject.shp
metfile = metfile bestand zoals ./Metfile_TestProject.met
projectnaam = naam van het project
aannemer = naam van de aannemer
verwijderOudeData: wordt gebruikt om hdsr controletabellen leeg te maken.
volgende tabellen worden hiermee leeg gemaakt:
-model.HdsrMeetpunten
-model.HdsrProfielen
-model.hdsrHydrovakken
-model.HdsrDWPProfielen
-model.HdsrGebruikersProjecten
-model.HdsrGebruikers
"""
load_log = logging.getLogger('controleren')
dataOntbreekt=""
if not os.path.exists(hydrovakkenshapefile):
dataOntbreekt = 'Hydrovakken shape %s bestaat niet!\n' % hydrovakkenshapefile
elif not os.path.exists(dwpshapefile):
dataOntbreekt = dataOntbreekt + 'DWP profielen shape %s bestaat niet!\n' % dwpshapefile
elif not os.path.exists(metfile):
dataOntbreekt = dataOntbreekt + 'Metfile %s bestaat niet!\n' % metfile
if dataOntbreekt != "":
load_log.exception(dataOntbreekt)
return
try:
truncateTables(verwijderOudeData)
data =[]
gebruiker,created = model.HdsrGebruikers.objects.get_or_create (gebruiker_ref = aannemer)
if not created:
raise Exception( "Kan de aannemer " + aannemer + " niet aanmaken")
project,created = model.HdsrGebr
|
PaddlePaddle/Paddle
|
python/paddle/fluid/tests/book/test_recommender_system.py
|
Python
|
apache-2.0
| 12,089
| 0.000662
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import math
import sys
import os
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.layers as layers
import paddle.fluid.nets as nets
from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGDOptimizer
paddle.enable_static()
IS_SPARSE = True
USE_GPU = False
BATCH_SIZE = 256
def get_usr_combined_features():
# FIXME(dzh) : old API integer_value(10) may has range check.
# currently we don't have user configurated check.
USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1
uid = layers.data(name='user_id', shape=[1], dtype='int64')
usr_emb = layers.embedding(
input=uid,
dtype='float32',
size=[USR_DICT_SIZE, 32],
param_attr='user_table',
is_sparse=IS_SPARSE)
usr_fc = layers.fc(input=usr_emb, size=32)
USR_GENDER_DICT_SIZE = 2
usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')
usr_gender_emb = layers.embedding(
input=usr_gender_id,
size=[USR_GENDER_DICT_SIZE, 16],
param_attr='gender_table',
is_sparse=IS_SPARSE)
usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)
USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")
usr_age_emb = layers.embedding(
input=usr_age_id,
size=[USR_AGE_DICT_SIZE, 16],
is_sparse=IS_SPARSE,
param_attr='age_table')
usr_age_fc = layers.fc(input=usr_age_emb, size=16)
USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")
usr_job_emb = layers.embedding(
input=usr_job_id,
size=[USR_JOB_DICT_SIZE, 16],
param_attr='job_table',
is_sparse=IS_SPARSE)
usr_job_fc = layers.fc(input=usr_job_emb, size=16)
concat_embed = layers.concat(
input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)
usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return usr_combined_features
def get_mov_combined_features():
MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1
mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')
mov_emb = layers.embedding(
input=mov_id,
dtype='float32',
size=[MOV_DICT_SIZE, 32],
param_attr='movie_table',
is_sparse=IS_SPARSE)
mov_fc = layers.fc(input=mov_emb, size=32)
CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())
category_id = layers.data(
name='category_id', shape=[1], dtype='int64', lod_level=1)
mov_categories_emb = layers.embedding(
input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_categories_hidden = layers.sequence_pool(
input=mov_categories_emb, pool_type="sum")
MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())
mov_title_id = layers.data(
name='movie_title', shape=[1], dtype='int64', lod_level=1)
mov_title_emb = layers.embedding(
input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)
mov_title_conv = nets.sequence_conv_pool(
input=mov_title_emb,
num_filters=32,
filter_size=3,
act="tanh",
pool_type="sum")
concat_embed = layers.concat(
input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)
# FIXME(dzh) : need tanh operator
mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")
return mov_combined_features
def model():
usr_combined_features = get_usr_combined_features()
mov_combined_features = get_mov_combined_features()
# need cos sim
inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)
scale_infer = layers.scale(x=inference, scale=5.0)
label = layers.data(name='score', shape=[1], dtype='float32')
square_cost = layers.square_error_cost(input=scale_infer, label=label)
avg_cost = layers.mean(square_cost)
return scale_infer, avg_cost
def train(use_cuda, save_dirname, is_local=True):
scale_infer, avg_cost = model()
# test program
test_program = fluid.default_main_program().clone(for_test=True)
sgd_optimizer = SGDOptimizer(learning_rate=0.2)
sgd_optimizer.minimize(avg_cost)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.movielens.train(), buf_size=8192),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.movielens.test(), batch_size=BATCH_SIZE)
feed_order = [
'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id',
'movie_title', 'score'
]
def train_loop(main_program):
exe.run(framework.default_startup_program())
feed_list = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list, place)
PASS_NUM = 100
for pass_id in range(PASS_NUM):
for batch_id, data in enumerate(train_reader()):
# train a mini-batch
outs = exe.run(program=main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
out = np.array(outs[0])
if (batch_id + 1) % 10 == 0:
avg_cost_set = []
for test_data in test_reader():
avg_cost_np = exe.run(program=test_program,
feed=feeder.feed(test_data),
fetch_list=[avg_cost])
avg_cost_set.append(avg_cost_n
|
p[0])
break # test only 1 segment for speeding up CI
# get test avg_cost
test_avg_cost = np.array(avg_cost_set).mean()
if test_avg_cost < 6.0:
# if avg_cost less than 6.0, we think our code is good.
if save_dirname is not None:
fluid.io.save_inference_model(save_
|
dirname, [
"user_id", "gender_id", "age_id", "job_id",
"movie_id", "category_id", "movie_title"
], [scale_infer], exe)
return
if math.isnan(float(out[0])):
sys.exit("got NaN loss, training failed.")
if is_local:
train_loop(fluid.default_main_program())
else:
port = os.getenv("PADDLE_PSERVER_PORT", "6174")
pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip...
eplist = []
for ip in pserver_ips.split(","):
eplist.append(':'.join([ip, port]))
pserver_endpoints = ",".join(eplist) # ip:port,ip:port...
trainers = int(os.getenv("PADDLE_TRAINERS"))
current_endpoint = os.getenv("POD_IP") + ":" + port
trainer_id = int(os.getenv("PADDLE_TRAINER_ID"))
training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER")
t = fluid.DistributeTranspiler()
t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers)
if training_role == "PSERVER":
pserver_prog = t.get_pser
|
dask/distributed
|
distributed/tests/test_asyncprocess.py
|
Python
|
bsd-3-clause
| 11,851
| 0.000506
|
import asyncio
import gc
import os
import signal
import sys
import threading
import weakref
from datetime import timedelta
from time import sleep
import psutil
import pytest
from tornado import gen
from tornado.locks import Event
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.process import AsyncProcess
from distributed.utils import mp_context
from distributed.utils_test import gen_test, nodebug, pristine_loop
def feed(in_q, out_q):
obj = in_q.get(timeout=5)
out_q.put(obj)
def exit(q):
sys.exit(q.get())
def exit_now(rc=0):
sys.exit(rc)
def exit_with_signal(signum):
signal.signal(signal.SIGINT, signal.SIG_DFL)
while True:
os.kill(os.getpid(), signum)
sleep(0.01)
def wait():
while True:
sleep(0.01)
def threads_info(q):
q.put(len(threading.enumerate()))
q.put(threading.current_thread().name)
@nodebug
@gen_test()
async def test_simple():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
proc = AsyncProcess(target=feed, args=(to_child, from_child))
assert not proc.is_alive()
assert proc.pid is None
assert proc.exitcode is None
assert not proc.daemon
proc.daemon = True
assert proc.daemon
wr1 = weakref.ref(proc)
wr2 = weakref.ref(proc._process)
# join() before start()
with pytest.raises(AssertionError):
await proc.join()
await proc.start()
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
t1 = time()
await proc.join(timeout=0.02)
dt = time() - t1
assert 0.2 >= dt >= 0.01
assert proc.is_alive()
assert proc.pid is not None
assert proc.exitcode is None
# setting daemon attribute after start()
with pytest.raises(AssertionError):
proc.daemon = False
to_child.put(5)
assert from_child.get() == 5
# child should be stopping now
t1 = time()
await proc.join(timeout=30)
dt = time() - t1
assert dt <= 1.0
assert not proc.is_alive()
assert proc.pid is not None
assert proc.exitcode == 0
# join() again
t1 = time()
await proc.join()
dt = time() - t1
assert dt <= 0.6
del proc
gc.collect()
start = time()
while wr1() is not None and time() < start + 1:
# Perhaps the GIL switched before _watch_process() exit,
# help it a little
sleep(0.001)
gc.collect()
if wr1() is not None:
# Help diagnosing
from types import FrameType
p = wr1()
if p is not None:
rc = sys.getrefcount(p)
refs = gc.get_referrers(p)
del p
print("refs to proc:", rc, refs)
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("AsyncProcess should have been destroyed")
t1 = time()
while wr2() is not None:
await asyncio.sleep(0.01)
gc.collect()
dt = time() - t1
assert dt < 2.0
@gen_test()
async def test_exitcode():
q = mp_context.Queue()
proc = AsyncProcess(target=exit, kwargs={"q": q})
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
assert proc.is_alive()
assert proc.exitcode is None
q.put(5)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.e
|
xitcode == 5
@pytest.mark.skipif(WINDOWS, reason="POSIX only")
@gen_test()
async def test_signal():
proc = AsyncProcess(target=exit_with_signal, args=(signal.SIGINT,))
proc.daemon = True
assert not proc.is_alive()
assert proc.exitcode is None
await proc.start()
await proc.join(timeout=30)
a
|
ssert not proc.is_alive()
# Can be 255 with forkserver, see https://bugs.python.org/issue30589
assert proc.exitcode in (-signal.SIGINT, 255)
proc = AsyncProcess(target=wait)
await proc.start()
os.kill(proc.pid, signal.SIGTERM)
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_terminate():
proc = AsyncProcess(target=wait)
proc.daemon = True
await proc.start()
await proc.terminate()
await proc.join(timeout=30)
assert not proc.is_alive()
assert proc.exitcode in (-signal.SIGTERM, 255)
@gen_test()
async def test_close():
proc = AsyncProcess(target=exit_now)
proc.close()
with pytest.raises(ValueError):
await proc.start()
proc = AsyncProcess(target=exit_now)
await proc.start()
proc.close()
with pytest.raises(ValueError):
await proc.terminate()
proc = AsyncProcess(target=exit_now)
await proc.start()
await proc.join()
proc.close()
with pytest.raises(ValueError):
await proc.join()
proc.close()
@gen_test()
async def test_exit_callback():
to_child = mp_context.Queue()
from_child = mp_context.Queue()
evt = Event()
# FIXME: this breaks if changed to async def...
@gen.coroutine
def on_stop(_proc):
assert _proc is proc
yield gen.moment
evt.set()
# Normal process exit
proc = AsyncProcess(target=feed, args=(to_child, from_child))
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
to_child.put(None)
await evt.wait(timedelta(seconds=5))
assert evt.is_set()
assert not proc.is_alive()
# Process terminated
proc = AsyncProcess(target=wait)
evt.clear()
proc.set_exit_callback(on_stop)
proc.daemon = True
await proc.start()
await asyncio.sleep(0.05)
assert proc.is_alive()
assert not evt.is_set()
await proc.terminate()
await evt.wait(timedelta(seconds=5))
assert evt.is_set()
@gen_test()
async def test_child_main_thread():
"""
The main thread in the child should be called "MainThread".
"""
q = mp_context.Queue()
proc = AsyncProcess(target=threads_info, args=(q,))
await proc.start()
await proc.join()
n_threads = q.get()
main_name = q.get()
assert n_threads <= 3
assert main_name == "MainThread"
q.close()
q._reader.close()
q._writer.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_test()
async def test_num_fds():
# Warm up
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
p = psutil.Process()
before = p.num_fds()
proc = AsyncProcess(target=exit_now)
proc.daemon = True
await proc.start()
await proc.join()
assert not proc.is_alive()
assert proc.exitcode == 0
while p.num_fds() > before:
await asyncio.sleep(0.01)
@gen_test()
async def test_terminate_after_stop():
proc = AsyncProcess(target=sleep, args=(0,))
await proc.start()
await asyncio.sleep(0.1)
await proc.terminate()
def _worker_process(worker_ready, child_pipe):
# child_pipe is the write-side of the children_alive pipe held by the
# test process. When this _worker_process exits, this file descriptor should
# have no references remaining anywhere and be closed by the kernel. The
# test will therefore be able to tell that this process has exited by
# reading children_alive.
# Signal to parent process that this process has started and made it this
# far. This should cause the parent to exit rapidly after this statement.
worker_ready.set()
# The parent exiting should cause this process to os._exit from a monitor
# thread. This sleep should never return.
shorter_timeout = 2.5 # timeout shorter than that in the spawning test.
sleep(shorter_timeout)
# Unreachable if functioning correctly.
child_pipe.send("child should have exited by now")
def _parent_proces
|
isabellewei/deephealth
|
data/network.py
|
Python
|
mit
| 3,768
| 0.004777
|
'''
A Multilayer Perceptron implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
#Load Medchart data.
filename_queue = tf.train.string_input_producer(["parsed.csv"])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
record_defaults = [[-1]] * 50
columns = tf.decode_csv(value, record_defaults=record_defaults)
#targets 7 8
col_7 = columns[7]
col_8 = columns[8]
del columns[7]
del columns[7]
features = tf.stack(columns)
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1200):
# Retrieve a single instance:
example, label = sess.run([features, col_7, col_8])
coord.request_stop()
coord.join(threads)
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b
|
1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
|
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(csv_size/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print("Optimization Finished!")
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
|
michaelconnor00/gbdxtools
|
tests/unit/test_idaho.py
|
Python
|
mit
| 2,604
| 0.003072
|
'''
Authors: Donnie Marino, Kostas Stamatiou
Contact: [email protected]
Unit tests for the gbdxtools.Idaho class
'''
from gbdxtools import Interface
from gbdxtools.idaho import Idaho
from auth_mock import get_mock_gbdx_session
import vcr
from os.path import join, isfile, dirname, realpath
import tempfile
import unittest
# How to use the mock_gbdx_session and vcr to create unit tests:
# 1. Add a new test that is dependent upon actually hitting GBDX APIs.
# 2. Decorate the test with @vcr appropriately
# 3. Replace "dummytoken" with a real gbdx token
# 4. Run the tests (existing test shouldn't be affected by use of a real token). This will record a "cassette".
# 5. Replace the real gbdx token with "dummytoken" again
# 6. Edit the cassette to remove any possibly sensitive information (s3 creds for example)
class IdahoTest(unittest.TestCase):
_temp_path = None
@classmethod
def setUpClass(cls):
mock_gbdx_session = get_mock_gbdx_session(token='dymmytoken')
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
cls._temp_path = tempfile.mkdtemp()
print("Created: {}".format(cls._temp_path))
def test_init(self):
c = Idaho(self.gbdx)
self.assertTrue(isinstance(c, Idaho))
@vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid_and_aoi.yaml', filter_headers=['authorization'])
def test_idaho_get_images_by_catid_and_aoi(self):
i = Idaho(self.gbdx)
catid = '10400100203F1300'
aoi_wkt = "POLYGON ((-105.0207996368408345 39.7338828628182839, -105.0207996368408345 39.7365972921260067, -105.0158751010894775 39.7365972921260067, -105.0158751010894775 39.7338828628182839, -105.0207996368408345 39.7338828628
|
182839))"
results = i.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=aoi_wkt)
assert len(results['results']) == 2
@vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid.yaml', filter_headers=['authorization'])
def test_idaho_get_images_by_catid(self):
i = Idaho(self.gb
|
dx)
catid = '10400100203F1300'
results = i.get_images_by_catid(catid=catid)
assert len(results['results']) == 12
@vcr.use_cassette('tests/unit/cassettes/test_idaho_describe_images.yaml', filter_headers=['authorization'])
def test_idaho_describe_images(self):
i = Idaho(self.gbdx)
catid = '10400100203F1300'
description = i.describe_images(i.get_images_by_catid(catid=catid))
assert description['10400100203F1300']['parts'][1]['PAN']['id'] =='b1f6448b-aecd-4d9b-99ec-9cad8d079043'
|
dochang/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 20,345
| 0.01332
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def shell_expand(path):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY',
|
None, ispath=True)
DEFAULT_ROLES_PATH = ge
|
t_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS,
|
sirk390/coinpy
|
coinpy-lib/src/coinpy/lib/transactions/tx_checks.py
|
Python
|
lgpl-3.0
| 2,364
| 0.004653
|
from
|
coinpy.lib.serialization.structures.s11n_tx import TxSerializer
from coinpy.model.constants.bitcoin import MAX_BLOCK_SIZE, is_money_range
from coinpy.lib.serialization.scripts.serialize import ScriptSerializer
class TxVerifier():
def __init__(self, runmode):
self.runmode = runmode
self.tx_serializer = TxSerializer()
self.script_serializer = ScriptSerializer()
"""
basic_check: run tests that don't require any
|
context.
"""
def basic_checks(self, tx):
self.check_size_limit(tx)
self.check_vin_empty(tx)
self.check_vout_empty(tx)
self.check_money_range(tx)
self.check_dupplicate_inputs(tx)
self.check_coinbase_script_size(tx)
self.check_null_inputs(tx)
def check_size_limit(self, tx):
if not tx.rawdata:
tx.rawdata = self.tx_serializer.serialize(tx)
if len(tx.rawdata) > MAX_BLOCK_SIZE:
raise Exception("Transaction too large : %d bytes" % (len(tx.rawdata)))
def check_vin_empty(self, tx):
if (not tx.in_list):
raise Exception("vin empty" )
def check_vout_empty(self, tx):
if (not tx.out_list):
raise Exception("vout empty" )
def check_money_range(self, tx):
for txout in tx.out_list:
if not is_money_range(txout.value):
raise Exception("txout not in money range")
if not is_money_range(sum(txout.value for txout in tx.out_list)):
raise Exception("txout total not in money range")
def check_dupplicate_inputs(self, tx):
inputs = set()
for txin in tx.in_list:
if txin.previous_output in inputs:
raise Exception("dupplicate txin")
inputs.add(txin.previous_output)
def check_coinbase_script_size(self, tx):
if tx.iscoinbase():
bin_script = self.script_serializer.serialize(tx.in_list[0].script)
if (len(bin_script) < 2 or len(bin_script) > 100):
raise Exception("incorrect coinbase script size : %d" % (len(bin_script)))
def check_null_inputs(self, tx):
if not tx.iscoinbase():
for txin in tx.in_list:
if (txin.previous_output.is_null()):
raise Exception("null prevout")
|
zloidemon/aiohttp_jrpc
|
aiohttp_jrpc/__init__.py
|
Python
|
bsd-2-clause
| 6,064
| 0
|
""" Simple JSON-RPC 2.0 protocol for aiohttp"""
from .exc import (ParseError, InvalidRequest, InvalidParams,
InternalError, InvalidResponse)
from .errors import JError, JResponse
from validictory import validate, ValidationError, SchemaError
from functools import wraps
from uuid import uuid4
from aiohttp import ClientSession
import asyncio
import json
import traceback
__version__ = '0.1.0'
REQ_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"method": {"type": "string"},
"params": {"type": "any"},
"id": {"type": "any"},
},
}
RSP_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"result": {"type": "any"},
"id": {"type": "any"},
},
}
ERR_JSONRPC20 = {
"type": "object",
"properties": {
"jsonrpc": {"pattern": r"2\.0"},
"error": {
"type": "object",
"properties": {
"code": {"type": "number"},
"message": {"type": "string"},
}
},
"id": {"type": "any"},
},
}
async def jrpc_errorhandler_middleware(app, handler):
async def middleware(request):
try:
return (await handler(request))
except Exception:
traceback.print_exc()
return JError().internal()
return middleware
async def decode(request):
""" Get/decode/validate json from request """
try:
data = await request.json()
except Exception as err:
raise ParseError(err)
try:
validate(data, REQ_JSONRPC20)
except ValidationError as err:
raise InvalidRequest(err)
except SchemaError as err:
raise InternalError(err)
except Exception as err:
raise InternalError(err)
return data
class Service(object):
""" Service class """
def __new__(cls, ctx):
""" Return on call class """
return cls.__run(cls, ctx)
def valid(schema=None):
""" Validation data by specific validictory configuration """
def dec(fun):
@wraps(fun)
def d_func(self, ctx, data, *a, **kw):
try:
validate(data['params'], schema)
except ValidationError as err:
raise InvalidParams(err)
except SchemaError as err:
raise InternalError(err)
return fun(self, ctx, data['params'], *a, **kw)
return d_func
return dec
async def __run(self, ctx):
""" Run service """
try:
data = await decode(ctx)
except ParseError:
return JError().parse()
except InvalidRequest:
return JError().request()
except InternalError:
return JError().internal()
try:
i_app = getattr(self, data['method'])
i_app = asyncio.coroutine(i_app)
except Exception:
return JError(data).method()
try:
resp = await i_app(self, ctx, data)
except InvalidParams:
return JError(data).params()
except InternalError:
return JError(data).internal()
return JResponse(jsonrpc={
"id": data['id'], "result": resp
})
class Response(object):
__slots__ = ['id', 'error', 'result']
def __init__(self, id, result=None, error=None, **kw):
self.id = id
self.result = result
self.error = error
def __repr__(self):
return "Response(id={rid}, result={res}, error={err}".format(
rid=self.id, res=self.result, err=self.error)
class Client(object):
def __init__(self, url, dumper=None, loop=None):
self.url = url
self.dumper = dumper
if not loop:
loop = asyncio.get_event_loop()
if not self.dumper:
self.dumper = json.dumps
self.client = ClientSession(
loop=loop,
headers={'content-type': 'application/json'})
def __del__(self):
self.client.close()
def __encode(self, method, params=None, id=None):
try:
data = self.dumper({
"jsonrpc": "2.0",
"id": id,
"method": method,
"params": params
})
except Exception as e:
raise Exception("Can not encode: {}".format(e))
return data
async def call(self, method, params=None, id=None, schem=None):
if not id:
id = uuid4().hex
try:
resp = await self.client.post(
self.url, data=self.__encode(method, params, id))
except Exception as err:
raise Exception(err)
if 200 != resp.status:
raise InvalidResponse(
"Error, server retunrned: {status}".format(status=resp.status))
try:
data = await resp.json()
except Exception as err:
raise InvalidResponse(err)
try:
validate(data, ERR_JSONRPC20)
return Response(**data)
|
except ValidationError:
# Passing data to validate response.
# Good if does not valid to ERR_JSONRPC20 object.
pass
except Exception as err:
raise InvalidResponse(err)
try:
validate(data, RSP_JSONRPC20)
|
if id != data['id']:
raise InvalidResponse(
"Rsponse id {local} not equal {remote}".format(
local=id, remote=data['id']))
except Exception as err:
raise InvalidResponse(err)
if schem:
try:
validate(data['result'], schem)
except ValidationError as err:
raise InvalidResponse(err)
except Exception as err:
raise InternalError(err)
return Response(**data)
|
wizgrav/protobot
|
server.py
|
Python
|
bsd-3-clause
| 861
| 0.020906
|
import SocketServer
class ProtoHandler(SocketServer.BaseRequestHandler):
def handle(self):
msg = self.request.recv(1024)
a = msg.split(" ",2)
if len(a) >1 and a[0] == "GET":
a = a[1].split("/")
a =[i for i in a if i != '']
if len(a) == 0:
self.request.sendall(self.server.ret)
else:
self.server.data=a
print a
class ProtoServer(SocketServer.TCPServer):
def __init__(self,hostport,default):
self.a
|
llow_reuse_address = True
SocketServer.TCPServer.__init__(self,hostport, ProtoHandler)
with open (default, "r") as myfile:
self.ret=myfile.read()
if __name__ == "__main__":
s = ProtoServer(("192.168.
|
1.253", 6661),"index.html")
s.serve_forever()
|
manassolanki/frappe
|
frappe/database.py
|
Python
|
mit
| 30,790
| 0.02897
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# Database Module
# --------------------
from __future__ import unicode_literals
import warnings
import datetime
import frappe
import frappe.defaults
import frappe.async
import re
import frappe.model.meta
from frappe.utils import now, get_datetime, cstr, cast_fieldtype
from frappe import _
from frappe.model.utils.link_count import flush_local_link_count
from frappe.model.utils import STANDARD_FIELD_CONVERSION_MAP
from frappe.utils.background_jobs import execute_job, get_queue
from frappe import as_unicode
import six
# imports - compatibility imports
from six import (
integer_types,
string_types,
binary_type,
text_type,
iteritems
)
# imports - third-party imports
from markdown2 import UnicodeWithAttrs
from pymysql.times import TimeDelta
from pymysql.constants import ER, FIELD_TYPE
from pymysql.converters import conversions
import pymysql
# Helpers
def _cast_result(doctype, result):
batch = [ ]
try:
for field, value in result:
df = frappe.get_meta(doctype).get_field(field)
if df:
value = cast_fieldtype(df.fieldtype, value)
batch.append(tuple([field, value]))
except frappe.exceptions.DoesNotExistError:
return result
return tuple(batch)
class Database:
|
"""
Open a database connection with the given parmeters, if use_default is True, use the
login details from `conf.py`. This is called by the request handler and is accessible using
the `db` global variable. the `sql` method is also global to run queries
"""
def __init__(self, host=None, user=None, password=None, ac_name=None, use_defau
|
lt = 0, local_infile = 0):
self.host = host or frappe.conf.db_host or 'localhost'
self.user = user or frappe.conf.db_name
self._conn = None
if ac_name:
self.user = self.get_db_login(ac_name) or frappe.conf.db_name
if use_default:
self.user = frappe.conf.db_name
self.transaction_writes = 0
self.auto_commit_on_many_writes = 0
self.password = password or frappe.conf.db_password
self.value_cache = {}
# this param is to load CSV's with LOCAL keyword.
# it can be set in site_config as > bench set-config local_infile 1
# once the local-infile is set on MySql Server, the client needs to connect with this option
# Connections without this option leads to: 'The used command is not allowed with this MariaDB version' error
self.local_infile = local_infile or frappe.conf.local_infile
def get_db_login(self, ac_name):
return ac_name
def connect(self):
"""Connects to a database as set in `site_config.json`."""
warnings.filterwarnings('ignore', category=pymysql.Warning)
usessl = 0
if frappe.conf.db_ssl_ca and frappe.conf.db_ssl_cert and frappe.conf.db_ssl_key:
usessl = 1
self.ssl = {
'ca':frappe.conf.db_ssl_ca,
'cert':frappe.conf.db_ssl_cert,
'key':frappe.conf.db_ssl_key
}
conversions.update({
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.DATETIME: get_datetime,
UnicodeWithAttrs: conversions[text_type]
})
if six.PY2:
conversions.update({
TimeDelta: conversions[binary_type]
})
if usessl:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, ssl=self.ssl, conv = conversions, local_infile = self.local_infile)
else:
self._conn = pymysql.connect(self.host, self.user or '', self.password or '',
charset='utf8mb4', use_unicode = True, conv = conversions, local_infile = self.local_infile)
# MYSQL_OPTION_MULTI_STATEMENTS_OFF = 1
# # self._conn.set_server_option(MYSQL_OPTION_MULTI_STATEMENTS_OFF)
self._cursor = self._conn.cursor()
if self.user != 'root':
self.use(self.user)
frappe.local.rollback_observers = []
def use(self, db_name):
"""`USE` db_name."""
self._conn.select_db(db_name)
self.cur_db_name = db_name
def validate_query(self, q):
"""Throw exception for dangerous queries: `ALTER`, `DROP`, `TRUNCATE` if not `Administrator`."""
cmd = q.strip().lower().split()[0]
if cmd in ['alter', 'drop', 'truncate'] and frappe.session.user != 'Administrator':
frappe.throw(_("Not permitted"), frappe.PermissionError)
def sql(self, query, values=(), as_dict = 0, as_list = 0, formatted = 0,
debug=0, ignore_ddl=0, as_utf8=0, auto_commit=0, update=None):
"""Execute a SQL query and fetch all rows.
:param query: SQL query.
:param values: List / dict of values to be escaped and substituted in the query.
:param as_dict: Return as a dictionary.
:param as_list: Always return as a list.
:param formatted: Format values like date etc.
:param debug: Print query and `EXPLAIN` in debug log.
:param ignore_ddl: Catch exception if table, column missing.
:param as_utf8: Encode values as UTF 8.
:param auto_commit: Commit after executing the query.
:param update: Update this dict to all rows (if returned `as_dict`).
Examples:
# return customer names as dicts
frappe.db.sql("select name from tabCustomer", as_dict=True)
# return names beginning with a
frappe.db.sql("select name from tabCustomer where name like %s", "a%")
# values as dict
frappe.db.sql("select name from tabCustomer where name like %(name)s and owner=%(owner)s",
{"name": "a%", "owner":"[email protected]"})
"""
if not self._conn:
self.connect()
# in transaction validations
self.check_transaction_status(query)
# autocommit
if auto_commit: self.commit()
# execute
try:
if values!=():
if isinstance(values, dict):
values = dict(values)
# MySQL-python==1.2.5 hack!
if not isinstance(values, (dict, tuple, list)):
values = (values,)
if debug:
try:
self.explain_query(query, values)
frappe.errprint(query % values)
except TypeError:
frappe.errprint([query, values])
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log("with values:")
frappe.log(values)
frappe.log(">>>>")
self._cursor.execute(query, values)
else:
if debug:
self.explain_query(query)
frappe.errprint(query)
if (frappe.conf.get("logging") or False)==2:
frappe.log("<<<< query")
frappe.log(query)
frappe.log(">>>>")
self._cursor.execute(query)
except Exception as e:
if ignore_ddl and e.args[0] in (ER.BAD_FIELD_ERROR, ER.NO_SUCH_TABLE,
ER.CANT_DROP_FIELD_OR_KEY):
pass
# NOTE: causes deadlock
# elif e.args[0]==2006:
# # mysql has gone away
# self.connect()
# return self.sql(query=query, values=values,
# as_dict=as_dict, as_list=as_list, formatted=formatted,
# debug=debug, ignore_ddl=ignore_ddl, as_utf8=as_utf8,
# auto_commit=auto_commit, update=update)
else:
raise
if auto_commit: self.commit()
# scrub output if required
if as_dict:
ret = self.fetch_as_dict(formatted, as_utf8)
if update:
for r in ret:
r.update(update)
return ret
elif as_list:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
elif as_utf8:
return self.convert_to_lists(self._cursor.fetchall(), formatted, as_utf8)
else:
return self._cursor.fetchall()
def explain_query(self, query, values=None):
"""Print `EXPLAIN` in error log."""
try:
frappe.errprint("--- query explain ---")
if values is None:
self._cursor.execute("explain " + query)
else:
self._cursor.execute("explain " + query, values)
import json
frappe.errprint(json.dumps(self.fetch_as_dict(), indent=1))
frappe.errprint("--- query explain end ---")
except:
frappe.errprint("error in query explain")
def sql_list(self, query, values=(), debug=False):
"""Return data as list of single elements (first column).
Example:
# doctypes = ["DocType", "DocField", "User", ...]
doctypes = frappe.db.sql_list("select name from DocType")
"""
return [r[0] for r in self.sql(query, values, debug=debug)]
def sql_ddl(self, query, values=(), debug=False):
"""Commit and execute a query. DDL (Data Definition Language) queries that alter schema
autocommit in MariaDB."""
self.commit()
self.sql(query, debug=debug)
def check_tra
|
wojtask/CormenPy
|
test/queue_util.py
|
Python
|
gpl-3.0
| 279
| 0.003584
|
def get_stack_elements(stack):
return stack[1:stack.top].elements
def get_queue
|
_elements(queue):
if queue.head <= queue.tail:
retu
|
rn queue[queue.head:queue.tail - 1].elements
return queue[queue.head:queue.length].elements + queue[1:queue.tail - 1].elements
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/models/editor_subscribe_label_deleted.py
|
Python
|
gpl-2.0
| 1,251
| 0.000799
|
"""
.. module:: editor_subscribe_label_deleted
The **Editor Subscribe Label Deleted** Model.
PostgreSQL Definition
---------------------
The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as:
.. code-block:: sql
CREATE TABLE editor_subscribe_label_deleted
(
editor INTEGER NOT NULL, -- PK, references editor.id
gid UUID NOT NULL, -- PK, references deleted_entity.gid
deleted_by INTEGER NOT NULL -- references edit.id
);
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class editor_subscribe_label_deleted(models.Model):
"""
Not all parameters are listed here, only those that present some interest
in their Django implementation.
:param editor: references :class:`.editor`
:param gid: references :class:`.deleted_entity`
:param deleted_by: references
|
:class:`.edit`
"""
editor = models.OneToOneField('editor', primary_key=True)
gid = models.OneToOneField('deleted_entity')
deleted_by = models.ForeignKey('edit')
def __str__(self):
return 'Editor Subscribe Label Deleted'
class Meta:
db_table
|
= 'editor_subscribe_label_deleted'
|
programmdesign/blitzdb
|
setup.py
|
Python
|
mit
| 3,033
| 0.008243
|
from distutils.core import setup
from setuptools import find_packages
setup(name='blitzdb',
version='0.2.12',
author='Andreas Dewes - 7scientists',
author_email='[email protected]',
license='MIT',
entry_points={
},
url='https://github.com/adewes/blitzdb',
packages=find_packages(),
zip_safe=False,
description='A document-oriented database written purely in Python.',
long_description="""Blitz is a document-oriented database toolkit for Python that is backend-agnostic.
It comes with a flat-file database for JSON documents and provides MongoDB-like querying capabilities.
Key Features
============
* Document-based, object-oriented interface.
* Powerful and rich querying language.
* Deep document indexes on arbitrary fields.
* Compressed storage of documents.
* Support for multiple backends (e.g. file-based storage, MongoDB).
* Support for database transactions (currently only for the file-based backend).
Documentation
=============
An extensive documentation, including tutorials and installation instructions is available on `ReadTheDocs <http://blitz-db.readthedocs.org/>`_.
Source Code
===========
The source code is available on `GitHub <https://github.com/adewes/blitzdb>`_.
Isssue Tracker
==============
If you should encounter any problems when using BlitzDB, please feel free to `submit an issue <https://github.com/adewes/blitzdb/issues>`_ on Github.
Changelog
=========
* 0.2.12: Added support for proper attribute iteration to `Document`.
* 0.2.11: Allow setting the `collection` parameter through a `Document.Meta` attribute.
* 0.2.10: Bugfix-Release: Fix Python 3 compatibility issue.
* 0.2.9: Bug
|
fix-Release: Fix serialization problem with file backend.
* 0.2.8: Added `get`, `has_key` and `clear` methods to `Document` class
* 0.2.7: Fixed problem with __unicode__ function in Python 3.
* 0.2.6: Bugfix-Relea
|
se: Fixed an issue with the $exists operator for the file backend.
* 0.2.5: Bugfix-Release
* 0.2.4: Added support for projections and update operations to the MongoDB backend.
* 0.2.3: Bugfix-Release: Fixed bug in transaction data caching in MongoDB backend.
* 0.2.2: Fix for slice operators in MongoDB backend.
* 0.2.1: Better tests.
* 0.2.0: Support for including additional information in DB references. Support for accessing document attributes as dictionary items.
Added $regex parameter that allows to use regular expressions in queries.
* 0.1.5: MongoDB backend now supports database transactions. Database operations are now read-isolated by default, i.e.
uncommitted operations will not affect database queries before they are committed.
* 0.1.4: Improved indexing of objects for the file backend, added support for automatic serialization/deserialization
of object attributes when adding keys to or querying an index.
* 0.1.3: Sorting of query sets is now supported (still experimental)
* 0.1.2: Small bugfixes, BlitzDB version number now contained in DB config dict
* 0.1.1: BlitzDB is now Python3 compatible (thanks to David Koblas)
"""
)
|
JazzeYoung/VeryDeepAutoEncoder
|
pylearn2/pylearn2/expr/information_theory.py
|
Python
|
bsd-3-clause
| 1,193
| 0
|
"""
.. todo::
WRITEME
"""
import theano.tensor as T
from
|
theano.gof.op import get_debug_values
from theano.gof.op import debug_assert
import numpy as np
from theano.tensor.xlogx import xl
|
ogx
from pylearn2.utils import contains_nan, isfinite
def entropy_binary_vector(P):
"""
.. todo::
WRITEME properly
If P[i,j] represents the probability of some binary random variable X[i,j]
being 1, then rval[i] gives the entropy of the random vector X[i,:]
"""
for Pv in get_debug_values(P):
assert Pv.min() >= 0.0
assert Pv.max() <= 1.0
oneMinusP = 1. - P
PlogP = xlogx(P)
omPlogOmP = xlogx(oneMinusP)
term1 = - T.sum(PlogP, axis=1)
assert len(term1.type.broadcastable) == 1
term2 = - T.sum(omPlogOmP, axis=1)
assert len(term2.type.broadcastable) == 1
rval = term1 + term2
debug_vals = get_debug_values(PlogP, omPlogOmP, term1, term2, rval)
for plp, olo, t1, t2, rv in debug_vals:
debug_assert(isfinite(plp))
debug_assert(isfinite(olo))
debug_assert(not contains_nan(t1))
debug_assert(not contains_nan(t2))
debug_assert(not contains_nan(rv))
return rval
|
scm-spain/slippin-jimmy
|
src/slippinj/databases/drivers/oracle.py
|
Python
|
apache-2.0
| 7,437
| 0.004303
|
import re
import unicodedata
from injector import inject, AssistedBuilder
import cx_Oracle as pyoracle
class Oracle(object):
"""Wrapper to connect to Oracle Servers and get all the metastore information"""
@inject(oracle=AssistedBuilder(callable=pyoracle.connect), logger='logger')
def __init__(self, oracle, logger, db_host=None, db_user='root', db_name=None, db_schema=None, db_pwd=None, db_port=None):
super(Oracle, self).__init__()
self.__db_name = db_name
self.__db_user = db_user
self.__db_schema = db_schema
self.__db_dsn = pyoracle.makedsn(host=db_host, port=int(db_port) if None != db_port else 1521, service_name=db_name)
self.__conn = oracle.build(user=db_user, password=db_pwd, dsn=self.__db_dsn)
if self.__db_schema is not None:
cursor = self.__conn.cursor()
cursor.execute("ALTER SESSION SET CURRENT_SCHEMA = {schema}".format(schema=self.__db_schema))
self.__db_connection_string = 'jdbc:oracle:thin:@//' + db_host + ((':' + db_port) if db_port else '') + (('/' + db_name) if db_name else '')
self.__illegal_characters = re.compile(r'[\000-\010]|[\013-\014]|[\016-\037]|[\xa1]|[\xc1]|[\xc9]|[\xcd]|[\xd1]|[\xbf]|[\xda]|[\xdc]|[\xe1]|[\xf1]|[\xfa]|[\xf3]')
self.__logger = logger
def __makedict(self,cursor):
"""
Convert cx_oracle query result to be a dictionary
"""
cols = [d[0] for d in cursor.description]
def createrow(*args):
return dict(zip(cols, args))
return createrow
def __join_tables_list(self, tables):
return ','.join('\'%s\'' % table for table in tables)
def __get_table_list(self, table_list_query=False):
self.__logger.debug('Getting table list')
query_with_d
|
b_schema = "= '{schema}'".format(schema=self.__db_schema)
query = "SELECT DISTINCT
|
table_name " \
"FROM all_tables WHERE OWNER " \
"{owner} {table_list_query}".format(owner=query_with_db_schema if self.__db_schema else "NOT LIKE '%SYS%' AND OWNER NOT LIKE 'APEX%'AND OWNER NOT LIKE 'XDB'" ,table_list_query=' AND ' + table_list_query if table_list_query else '')
cursor = self.__conn.cursor()
cursor.execute(query)
cursor.rowfactory = self.__makedict(cursor)
tablelist = map(lambda x: x['TABLE_NAME'], cursor.fetchall())
self.__logger.debug('Found {count} tables'.format(count=cursor.rowcount))
return tablelist
def __get_columns_for_tables(self, tables):
self.__logger.debug('Getting columns information')
query_with_owner = "AND owner = '{schema}'".format(schema=self.__db_schema)
info_query = "SELECT table_name, column_name, data_type, data_length, nullable, data_default, data_scale " \
"FROM ALL_TAB_COLUMNS " \
"WHERE table_name IN ({tables}) " \
"{owner}" \
"ORDER BY COLUMN_ID".format(tables=self.__join_tables_list(tables), owner=query_with_owner if self.__db_schema else '')
cursor = self.__conn.cursor()
cursor.execute(info_query)
cursor.rowfactory = self.__makedict(cursor)
tables_information = {}
for row in cursor.fetchall():
self.__logger.debug('Columns found for table {table}'.format(table=row['TABLE_NAME']))
if not row['TABLE_NAME'] in tables_information:
tables_information[row['TABLE_NAME']] = {'columns': []}
tables_information[row['TABLE_NAME']]['columns'].append({
'column_name': row['COLUMN_NAME'],
'data_type': row['DATA_TYPE'].lower(),
'character_maximum_length': row['DATA_LENGTH'],
'is_nullable': row['NULLABLE'],
'column_default': row['DATA_DEFAULT'],
})
return tables_information
def __get_count_for_tables(self, tables):
tables_information = {}
cursor = self.__conn.cursor()
for table in tables:
try:
self.__logger.debug('Getting count for table {table}'.format(table=table))
info_query = 'SELECT COUNT(*) FROM {table}'.format(table=table)
cursor.execute(info_query)
tables_information[table] = {'count': cursor.fetchone()[0]}
except:
self.__logger.debug('The count query for table {table} has fail'.format(table=table))
pass
return tables_information
def __get_top_for_tables(self, tables, top=30):
tables_information = {}
cursor = self.__conn.cursor()
for table in tables:
tables_information[table] = {'rows': []}
if top > 0:
try:
self.__logger.debug('Getting {top} rows for table {table}'.format(top=top, table=table))
query = 'SELECT * FROM {table} WHERE ROWNUM < {top}'.format(top=top, table=table)
cursor.execute(query)
for row in cursor.fetchall():
table_row = []
for column in row:
try:
if type(column) is unicode:
column = unicodedata.normalize('NFKD', column).encode('iso-8859-1', 'replace')
else:
column = str(column).decode('utf8', 'replace').encode('iso-8859-1', 'replace')
if self.__illegal_characters.search(column):
column = re.sub(self.__illegal_characters, '?', column)
if column == 'None':
column = 'NULL'
except:
column = 'Parse_error'
table_row.append(column)
tables_information[table]['rows'].append(table_row)
except pyoracle.ProgrammingError:
tables_information[table]['rows'].append(
'Error getting table data {error}'.format(error=pyoracle.ProgrammingError.message))
return tables_information
def get_all_tables_info(self, table_list, table_list_query, top_max):
"""
Return all the tables information reading from the Information Schema database
:param table_list: string
:param table_list_query: string
:param top_max: integer
:return: dict
"""
if table_list:
tables = map(lambda x: unicode(x), table_list.split(','))
else:
tables = self.__get_table_list(table_list_query)
tables_counts = self.__get_count_for_tables(tables)
tables_columns = self.__get_columns_for_tables(tables)
tables_top = self.__get_top_for_tables(tables, top_max)
tables_info = {'tables': {}}
for table in tables_counts:
tables_info['tables'][table] = {}
tables_info['tables'][table].update(tables_columns[table])
tables_info['tables'][table].update(tables_counts[table])
tables_info['tables'][table].update(tables_top[table])
tables_info['db_connection_string'] = self.__db_connection_string
return tables_info
|
dickloraine/EmbedComicMetadata
|
comicbookinfo.py
|
Python
|
gpl-3.0
| 4,500
| 0.001778
|
"""
A python class to encapsulate the ComicBookInfo data
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from datetime import datetime
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, lang_as_iso639_1
from calibre_plugins.EmbedComicMetadata.genericmetadata import GenericMetadata
import sys
if sys.version_info[0] > 2:
unicode = str
class ComicBookInfo:
def metadataFromString(self, string):
cbi_container = json.loads(unicode(string, 'utf-8'))
metadata = GenericMetadata()
cbi = cbi_container['ComicBookInfo/1.0']
# helper func
# If item is not in CBI, return None
def xlate(cbi_entry):
if cbi_entry in cbi:
return cbi[cbi_entry]
else:
return None
metadata.series = xlate('series')
metadata.title = xlate('title')
metadata.issue = xlate('issue')
metadata.publisher = xlate('publisher')
metadata.month = xlate('publicationMonth')
metadata.year = xlate('publicationYear')
metadata.issueCount = xlate('numberOfIssues')
metadata.comments = xlate('comments')
metadata.credits = xlate('credits')
metadata.genre = xlate('genre')
metadata.volume = xlate('volume')
metadata.volumeCount = xlate('numberOfVolumes')
metadata.language = xlate('language')
metadata.country = xlate('country')
metadata.criticalRating = xlate('rating')
metadata.tags = xlate('tags')
# make sure credits and tags are at least empty lists and not None
if metadata.credits is None:
metadata.credits = []
if metadata.tags is None:
metadata.tags = []
# need to massage the language string to be ISO
# modified to use a calibre function
if metadata.language is not None:
metadata.language = lang_as_iso639_1(metadata.language)
metadata.isEmpty = False
return metadata
def stringFromMetadata(self, metadata):
cbi_container = self.createJSONDictionary(metadata)
return json.dumps(cbi_container)
# verify that the string actually contains CBI data in JSON format
def validateString(self, string):
try:
cbi
|
_container = json.loads(string)
except:
return False
return ('ComicBookInfo/1.0' in cbi_container)
def createJSONDictionary(self, metadata):
# Create the dictionary that we will convert to JSON text
cbi = dict()
cbi_container = {'appID': 'ComicTagger/',
'lastModif
|
ied': str(datetime.now()),
'ComicBookInfo/1.0': cbi}
# helper func
def assign(cbi_entry, md_entry):
if md_entry is not None:
cbi[cbi_entry] = md_entry
# helper func
def toInt(s):
i = None
if type(s) in [str, unicode, int]:
try:
i = int(s)
except ValueError:
pass
return i
assign('series', metadata.series)
assign('title', metadata.title)
assign('issue', metadata.issue)
assign('publisher', metadata.publisher)
assign('publicationMonth', toInt(metadata.month))
assign('publicationYear', toInt(metadata.year))
assign('numberOfIssues', toInt(metadata.issueCount))
assign('comments', metadata.comments)
assign('genre', metadata.genre)
assign('volume', toInt(metadata.volume))
assign('numberOfVolumes', toInt(metadata.volumeCount))
assign('language', calibre_langcode_to_name(canonicalize_lang(metadata.language)))
assign('country', metadata.country)
assign('rating', metadata.criticalRating)
assign('credits', metadata.credits)
assign('tags', metadata.tags)
return cbi_container
|
illfelder/libcloud
|
libcloud/compute/drivers/ec2.py
|
Python
|
apache-2.0
| 256,785
| 0.000008
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2, Eucalyptus, Nimbus and Outscale drivers.
"""
import re
import sys
import base64
import copy
import warnings
import time
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, basestring, ensure_string
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint
from libcloud.utils.publickey import get_pubkey_comment
from libcloud.utils.iso8601 import parse_date
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.aws import DEFAULT_SIGNATURE_VERSION
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot
from libcloud.compute.base import KeyPair
from libcloud.compute.types import NodeState, KeyPairDoesNotExistError, \
StorageVolumeState, VolumeSnapshotState
__all__ = [
'API_VERSION',
'NAMESPACE',
'INSTANCE_TYPES',
'OUTSCALE_INSTANCE_TYPES',
'OUTSCALE_SAS_REGION_DETAILS',
'OUTSCALE_INC_REGION_DETAILS',
'DEFAULT_EUCA_API_VERSION',
'EUCA_NAMESPACE',
'EC2NodeDriver',
'BaseEC2NodeDriver',
'NimbusNodeDriver',
'EucNodeDriver',
'OutscaleSASNodeDriver',
'OutscaleINCNodeDriver',
'EC2NodeLocation',
'EC2ReservedNode',
'EC2SecurityGroup',
'EC2ImportSnapshotTask',
'EC2PlacementGroup',
'EC2Network',
'EC2NetworkSubnet',
'EC2NetworkInterface',
'EC2RouteTable',
'EC2Route',
'EC2SubnetAssociation',
'ExEC2AvailabilityZone',
'IdempotentParamError'
]
API_VERSION = '2016-11-15'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
# Eucalyptus Constants
DEFAULT_EUCA_API_VERSION = '3.3.0'
EUCA_NAMESPACE = 'http://msgs.eucalyptus.com/%s' % (DEFAULT_EUCA_API_VERSION)
# Outscale Constants
DEFAULT_OUTSCALE_API_VERSION = '2016-04-01'
OUTSCALE_NAMESPACE = 'http://api.outscale.com/wsdl/fcuext/2014-04-15/'
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
and <http://aws.amazon.com/ec2/previous-generation/>
ram = [MiB], disk = [GB]
"""
def GiB(value):
return int(value * 1024)
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': GiB(0.613),
'disk': 15, # GB
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': GiB(1.7),
'disk': 160, # GB
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': GiB(3.75),
'disk': 410, # GB
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 2 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': GiB(1.7),
'disk': 350, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': GiB(7),
'disk': 4 * 420, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': GiB(17.1),
'disk': 420, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': GiB(34.2),
'disk': 850, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': GiB(68.4),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 8
|
}
},
'm3.medium': {
'id': 'm3.medium',
'name': 'Medium Instance',
|
'ram': GiB(3.75),
'disk': 4, # GB
'bandwidth': None,
'extra': {
'cpu': 1
}
},
'm3.large': {
'id': 'm3.large',
'name': 'Large Instance',
'ram': GiB(7.5),
'disk': 32, # GB
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(15),
'disk': 2 * 40, # GB
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(30),
'disk': 2 * 80, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.large': {
'id': 'm4.large',
'name': 'Large Instance',
'ram': GiB(8),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 2
}
},
'm4.xlarge': {
'id': 'm4.xlarge',
'name': 'Extra Large Instance',
'ram': GiB(16),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 4
}
},
'm4.2xlarge': {
'id': 'm4.2xlarge',
'name': 'Double Extra Large Instance',
'ram': GiB(32),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'm4.4xlarge': {
'id': 'm4.4xlarge',
'name': 'Quadruple Extra Large Instance',
'ram': GiB(64),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'm4.10xlarge': {
'id': 'm4.10xlarge',
'name': '10 Extra Large Instance',
'ram': GiB(160),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 40
}
},
'm4.16xlarge': {
'id': 'm4.16xlarge',
'name': '16 Extra Large Instance',
'ram': GiB(256),
'disk': 0, # EBS only
'bandwidth': None,
'extra': {
'cpu': 64
}
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': GiB(22.5),
'disk': 2 * 840, # GB
'bandwidth': None,
'extra': {
'cpu': 16
}
},
'g2.2xlarge': {
'id': 'g2.2xlarge',
'name': 'Cluster GPU G2 Double Extra Large Instance',
'ram': GiB(15),
'disk': 60, # GB
'bandwidth': None,
'extra': {
'cpu': 8
}
},
'g2.8xlarge
|
dudanogueira/microerp
|
microerp/comercial/migrations/0062_dadovariavel_tipo.py
|
Python
|
lgpl-3.0
| 549
| 0.001821
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-09 22:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations
|
.Migration):
dependencies = [
('comercial', '0061_auto_20160206_2052'),
]
operations = [
migrations.Ad
|
dField(
model_name='dadovariavel',
name='tipo',
field=models.CharField(blank=True, choices=[(b'texto', b'Texto'), (b'inteiro', b'Inteiro'), (b'decimal', b'Decimal')], max_length=100),
),
]
|
pombredanne/drf-toolbox
|
tests/test_serializers.py
|
Python
|
bsd-3-clause
| 23,089
| 0.00078
|
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models.fields import FieldDoesNotExist
from django.test.client import RequestFactory
from drf_toolbox.compat import django_pgfields_installed, models
from drf_toolbox.serializers import (fields, BaseModelSerializer,
ModelSerializer, RelatedField)
from drf_toolbox.serializers.fields import api
from drf_toolbox import viewsets
from rest_framework import serializers
from rest_framework.relations import HyperlinkedIdentityField
from tests import models as test_models, serializers as test_serializers
from tests.compat import mock
import unittest
import six
import uuid
NO_DJANGOPG = 'django-pgfields is not installed.'
class SerializerSuite(unittest.TestCase):
"""Suite of test cases around custom serializers, ensuring that
they provide expected output.
"""
def test_api_endpoints_field_autocreated(self):
"""Establish that the `api_endpoints` key is auto-created on
a serializer that doesn't explicitly define the field.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
serializer_class = test_serializers.NormalSerializer
# Create the serializer
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointsField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoints_field_default_serializer(self):
"""Establish that the the `api_endpoints` key is created for a
default serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class ViewSet(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = ViewSet().get_serializer_class()()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': ViewSet(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoints', df)
self.assertIsInstance(df['api_endpoints'], api.APIEndpointsField)
def test_api_endpoint_field_default_serializer(self):
"""Establish that the the `api_endpoint` key is created in a case
where we cannot match to the viewset, and we're still using a
specific serializer.
"""
# Create a bogus viewset class, so the serializer can be
# given context that is aware of it.
class Viewset(viewsets.ModelViewSet):
model = test_models.NormalModel
# Create the serializer.
s = test_serializers.NormalSerializer()
s.context = {
'request': RequestFactory().get('/foo/bar/'),
'view': Viewset(),
}
# Ensure that the expected api.APIEndpointField is present.
df = s.get_default_fields()
self.assertIn('api_endpoint', df)
self.assertIsInstance(df['api_endpoint'], api.APIEndpointField)
def test_api_endpoint_key_existing(self):
"""Test that if a set of fields is provided with an `api_endpoints`
field, that we don't barrel over it.
"""
# Ensure I get what I expect from `get_default_fields`.
s = test_serializers.ExplicitAPIEndpointsSerializer()
fields = s.get_default_fields()
self.assertEqual(len(fields), 3)
self.assertIsInstance(fields['api_endpoints'],
serializers.IntegerField)
def test_api_endpoints_autocovert_plural_to_singular(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoints')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
s = Serializer()
self.assertIn('api_endpoint', s.opts.fields)
self.assertNotIn('api_endpoints', s.opts.fields)
def test_api_endpoints_autocovert_singular_to_plural(self):
"""Establish that explicitly specifying `api_endpoint` or
`api_endpoints` will graciously switch between them when necessary.
"""
# Create a serializer to use for this test.
class Serializer(test_serializers.NormalSerializer):
class Meta:
model = test_serializers.NormalSerializer.Meta.model
fields = ('id', 'api_endpoint')
# Establish that a serializer instance with no context will
# have an api_endpoint field.
with mock.patch.object(ModelSerializer, '_viewset_uses_me') as vum:
vum.return_value = True
s = Serializer(context={'view': object(),})
self.assertIn(
|
'api_endpoints', s.opts.fields)
self.assertNotIn('api_endpoint', s.opts.fields)
def test_direct_relationship(self):
"""Test that a direct relationship retrieval works
as expected.
"""
# Get the related field from a direct relationship.
s = test_serializers.ChildSerializer()
rel_field = s.get_relate
|
d_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
# Verify the label.
self.assertEqual(
rel_field.label_from_instance(test_models.NormalModel()),
'NormalModel object',
)
# Verify the value.
self.assertFalse(rel_field.prepare_value(test_models.NormalModel()))
def test_direct_relationship_with_explicit_fields(self):
"""Test that a direct relationship retreival works as expected,
and that our explicit field list chains down to the related field.
"""
# Create our serializer.
s = test_serializers.ChildSerializerII()
rel_field = s.get_related_field(
model_field=test_models.ChildModel._meta.\
get_field_by_name('normal')[0],
related_model=test_models.NormalModel,
to_many=False,
)
self.assertIsInstance(rel_field, RelatedField)
rel_field.context = {'request': RequestFactory().get('/foo/bar/')}
# Get the serializer class.
s = rel_field._get_serializer(test_models.NormalModel(bacon=42))
self.assertEqual([i for i in s.get_fields().keys()], ['id', 'bacon'])
def test_reverse_relationship(self):
"""Test that a reverse relationship retrieval works as
expected.
"""
# Instantiate my normal serializer and run a reverse
# relationship against the fake child model.
s = test_serializers.NormalSerializer()
rel_field = s.get_related_field(None, test_models.ChildModel, False)
self.assertIsInstance(rel_field, RelatedField)
def test_related_field_with_no_pk(self):
"""Test that a related field receiving a model object
with no primary key returns None.
"""
rel_field = RelatedField(())
answer = rel_field.to_native(test_models.ChildModel())
self.assertEqual(answer, None)
def test_related_field_with_pk(self):
"""Test that a related field receiving a model object
with a primary key returns None.
"""
#
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractChuunihimeWordpressCom.py
|
Python
|
bsd-3-clause
| 560
| 0.033929
|
def extractChuunihimeWordpressCom(item):
'''
Parser for 'chuunihime.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPos
|
tfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
|
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
joeydong/endless-lake-player
|
player.py
|
Python
|
mit
| 4,891
| 0.002045
|
import functools
import itertools
import json
import multiprocessing
import os
import shutil
import sys
import time
import cv2
import numpy
import utility.config
import utility.cv
import utility.geometry
import utility.gui
import utility.image
import utility.log
# Explicitly disable OpenCL. Querying for OpenCL support breaks when multiprocessing.
cv2.ocl.setUseOpenCL(False)
# Create multiprocessing pool. Uses `multiprocessing.cpu_count()` processes by default.
pool = multiprocessing.Pool()
# Load all templates
template_refs = utility.cv.load_template_refs()
template_game_over = utility.cv.load_template_game_over()
# Setup empty trace directory
trace_directory = "trace"
if os.path.exists(trace_directory):
shutil.rmtree(trace_directory)
os.mkdir(trace_directory)
# Wait for game to start
while True:
screenshot = utility.image.downscale(utility.image.screenshot())
if utility.cv.match_template(screenshot, template_game_over)["score"] < 0.5:
# Game over screen cleared
utility.log.separator()
break
utility.log.info("Waiting for game to start...")
time.sleep(1)
# Begin player run loop
while True:
start = time.time()
# Grab screenshot
screenshot_original = utility.image.screenshot()
screenshot = utility.image.downscale(screenshot_original)
utility.log.performance("screenshot", start)
# Calculate character and jump matches
#
# See http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
matches = []
map_fn = functools.partial(utility.cv.multi_match_template, screenshot)
map_args = template_refs
map_results = pool.map_async(map_fn, map_args).get(1)
utility.log.performance("multi_match_template", start)
for (idx, match_template_multiple_results) in enumerate(map_results):
for result in match_template_multiple_results:
# Adjust vertical center for character type towards bottom
if result["type"] == "character":
result["center"] = {
"x": result["center"]["x"],
"y": result["y1"] + ((result["y2"] - result["y1"]) * utility.config.character_vertical_center)
}
# Filter any conflicts from existing matches
conflicting_matches = []
def keep(match):
if match["type"] != result["type"]:
# Not conflicting by type
return True
if match["type"] == "jump" and match["action"] != result["action"]:
# Not conflicting by jump action
return True
if not utility.geometry.rects_overlap(match, result):
# Not conflicting by overlap
return True
# Conflicts with result
return False
matches = [m for m in matches if keep(m)]
# Determine best match to keep
best_match = result
for match in conflicting_matches:
if match["score"] > best_match["score"]:
# Conflicting match has higher score
best_match = match
continue
# Save best match
matches.append(best_match)
utility.log.performance("matches", start)
# Determine action
possible_actions = utility.geometry.calculate_actions(matches)
utility.log.performance("calculate_actions", start)
for action in possible_actions:
if action["action"] == "double" and action["distance"] <= utility.config.double_jump_action_distance:
# Double jump
|
utility.log.info("double click")
utility.gui.mouse_double_click()
break
elif action["action"] == "single" and action["distance"] <= utility.config.single_jump_action_distance:
# Single jump
utility.log.info("single click")
utility.gui.mouse_click()
break
else:
# Try next action
continue
utility.log.performance("execute action", start)
# Highlight resul
|
ts
composite_image = utility.image.highlight_regions(screenshot, matches)
utility.log.performance("highlight_regions", start)
# Present composite image
# utility.image.show(composite_image)
# utility.log.performance("show", start)
# Log trace
utility.log.trace(trace_directory, screenshot_original, composite_image, matches, possible_actions)
utility.log.performance("trace", start)
# Match game over
game_over = (len(matches) == 0 and utility.cv.match_template(screenshot, template_game_over)["score"] > 0.5)
# Log total
utility.log.performance("total", start)
utility.log.separator()
# Check exit condition
if game_over:
# Game ended
break
|
ep1cman/workload-automation
|
wlauto/instrumentation/energy_model/__init__.py
|
Python
|
apache-2.0
| 42,085
| 0.00354
|
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=attribute-defined-outside-init,access-member-before-definition,redefined-outer-name
from __future__ import division
import os
import math
import time
from tempfile import mktemp
from base64 import b64encode
from collections import Counter, namedtuple
try:
import jinja2
import pandas as pd
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
import numpy as np
low_filter = np.vectorize(lambda x: x > 0 and x or 0) # pylint: disable=no-member
import_error = None
except ImportError as e:
import_error = e
jinja2 = None
pd = None
plt = None
np = None
low_filter = None
from wlauto import Instrument, Parameter, File
from wlauto.exceptions import ConfigError, InstrumentError, DeviceError
from wlauto.instrumentation import instrument_is_installed
from wlauto.utils.types import caseless_string, list_or_caseless_string, list_of_ints
from wlauto.utils.misc import list_to_mask
FREQ_TABLE_FILE = 'frequency_power_perf_data.csv'
CPUS_TABLE_FILE = 'projected_cap_power.csv'
MEASURED_CPUS_TABLE_FILE = 'measured_cap_power.csv'
IDLE_TABLE_FILE = 'idle_power_perf_data.csv'
REPORT_TEMPLATE_FILE = 'report.template'
EM_TEMPLATE_FILE = 'em.template'
IdlePowerState = namedtuple('IdlePowerState', ['power'])
CapPowerState = namedtuple('CapPowerState', ['cap', 'power'])
class EnergyModel(object):
def __init__(self):
self.big_cluster_idle_states = []
self.little_cluster_idle_states = []
self.big_cluster_cap_states = []
self.little_cluster_cap_states = []
self.big_core_idle_states = []
self.little_core_idle_states = []
self.big_core_cap_states = []
self.little_core_cap_states = []
def add_cap_entry(self, cluster, perf, clust_pow, core_pow):
if cluster == 'big':
self.big_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.big_core_cap_states.append(CapPowerState(perf, core_pow))
elif cluster == 'little':
self.little_cluster_cap_states.append(CapPowerState(perf, clust_pow))
self.little_core_cap_states.append(CapPowerState(perf, core_pow))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_cluster_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_cluster_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_cluster_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
def add_core_idle(self, cluster, values):
for value in values:
if cluster == 'big':
self.big_core_idle_states.append(IdlePowerState(value))
elif cluster == 'little':
self.little_core_idle_states.append(IdlePowerState(value))
else:
raise ValueError('Unexpected cluster: {}'.format(cluster))
class PowerPerformanceAnalysis(object):
def __init__(self, data):
self.summary = {}
big_freqs = data[data.cluster == 'big'].frequency.unique()
little_freqs = data[data.cluster == 'little'].frequency.unique()
self.summary['frequency'] = max(set(big_freqs).intersection(set(little_freqs)))
big_sc = data[(data.cluster == 'big') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
little_sc = data[(data.cluster == 'little') &
(data.frequency == self.summary['frequency']) &
(data.cpus == 1)]
self.summary['performance_ratio'] = big_sc.performance.item() / little_sc.performance.item()
self.summary['power_ratio'] = big_sc.power.item() / little_sc.power.item()
self.summary['max_performance'] = data[data.cpus == 1].performance.max()
self.summary['max_power'] = data[data.cpus == 1].power.max()
def build_energy_model(freq_power_table, cpus_power, idle_power, first_cluster_idle_state):
# pylint: disable=too-many-locals
em = EnergyModel()
idle_power_sc = idle_power[idle_power.cpus == 1]
perf_data = get_normalized_single_core_data(freq_power_table)
for cluster in ['little', 'big']:
cluster_cpus_power = cpus_power[cluster].dropna()
cluster_power = cluster_cpus_power['cluster'].apply(int)
core_power = (cluster_cpus_power['1'] - cluster_power).apply(int)
performance = (perf_data[perf_data.cluster == cluster].performance_norm * 1024 / 100).apply(int)
for perf, clust_pow, core_pow in zip(performance, cluster_power, core_power):
em.add_cap_entry(cluster, perf, clust_pow, core_pow)
all_idle_power = idle_power_sc[idle_power_sc.cluster == cluster].power.values
# CORE idle states
# We want the delta of each state w.r.t. the power
# consumption of the shallowest one at this level (core_ref)
idle_core_power = low_filter(all_idle_power[:first_cluster_idle_state] -
|
all_idle_power[first_cluster_idle_state - 1])
# CLUSTER idle states
# We want the absolute value of each idle state
idle_cluster_power = low_filter(all_idle_power[first_cluster_idle_state - 1:])
em.add_cluster_idle(cluster, idle_cluster_power)
em.add_core_idle(cluster, idle_core_power)
return em
def generate_em_c_file(em, bi
|
g_core, little_core, em_template_file, outfile):
with open(em_template_file) as fh:
em_template = jinja2.Template(fh.read())
em_text = em_template.render(
big_core=big_core,
little_core=little_core,
em=em,
)
with open(outfile, 'w') as wfh:
wfh.write(em_text)
return em_text
def generate_report(freq_power_table, measured_cpus_table, cpus_table, idle_power_table, # pylint: disable=unused-argument
report_template_file, device_name, em_text, outfile):
# pylint: disable=too-many-locals
cap_power_analysis = PowerPerformanceAnalysis(freq_power_table)
single_core_norm = get_normalized_single_core_data(freq_power_table)
cap_power_plot = get_cap_power_plot(single_core_norm)
idle_power_plot = get_idle_power_plot(idle_power_table)
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(16, 8)
for i, cluster in enumerate(reversed(cpus_table.columns.levels[0])):
projected = cpus_table[cluster].dropna(subset=['1'])
plot_cpus_table(projected, axes[i], cluster)
cpus_plot_data = get_figure_data(fig)
with open(report_template_file) as fh:
report_template = jinja2.Template(fh.read())
html = report_template.render(
device_name=device_name,
freq_power_table=freq_power_table.set_index(['cluster', 'cpus', 'frequency']).to_html(),
cap_power_analysis=cap_power_analysis,
cap_power_plot=get_figure_data(cap_power_plot),
idle_power_table=idle_power_table.set_index(['cluster', 'cpus', 'state']).to_html(),
idle_power_plot=get_figure_data(idle_power_plot),
cpus_table=cpus_table.to_html(),
cpus_plot=cpus_plot_data,
em_text=em_text,
)
with open(outfile, 'w') as wfh:
wfh.write(html)
return html
def wa_result_to_power_perf_table(df, performance_metric, index):
table = df.pivot_table(index=index + ['iteration'],
columns='metric', values='value').reset_index()
result_mean = table.groupby
|
cgwalters/imagefactory
|
imagefactory_plugins/Rackspace/__init__.py
|
Python
|
apache-2.0
| 667
| 0
|
# encoding:
|
utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Un
|
less required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Rackspace import Rackspace as delegate_class
|
cederstrom/natet-sos-generator
|
generator/integration/dryg.py
|
Python
|
mit
| 327
| 0.003058
|
import requests
class DrygDAO:
def __init__(self):
pass
def get_days_for_year(self, year):
response = requests.get("http://api.dryg.net/dagar/v2.1/%s" % year)
data = response.json()
workdays = [x["datum"
|
] for x in data["dagar"] i
|
f x["arbetsfri dag"] == "Nej"]
return workdays
|
ethankennerly/hotel-vs-gozilla
|
user/h4.news.py
|
Python
|
mit
| 126
| 0.007937
|
{
|
'level_mc': {'_txt': {'text': '6'},
'currentLabel': 'up',
'progress_mc': {'currentLabel': '_0'}
|
}}
|
jpedroan/megua
|
megua/csection.py
|
Python
|
gpl-3.0
| 10,442
| 0.009289
|
# coding=utf8
r"""
csection.py -- Create a tree of contents, organized by sections and inside
sections the exercises unique_name.
AUTHOR:
- Pedro Cruz (2012-01): initial version
- Pedro Cruz (2016-03): improvment for smc
An exercise could contain um its %summary tag line a description of section
in form::
%sumary section descriptive text; subsection descriptive text; etc
The class transform contents of some MegUA database into a tree of sections specifying exercises as leaves.
Then, this tree can be flushed out to some file or output system.
STRUTURE SAMPLE::
contents -> { 'Section1': Section('Section1',0), 'Section2': Section('Section2',0) }
For each Section object see below in this file.
A brief description is:
* a SectionClassifier is the "book" made with keys (chapter names) that are keys of a dictionary.
* SectionClassifier is a dictionary: keys are the chapter names and the values are Section objects.
* a Section object is defined by
* a name (the key of the SectionClassifiers appears again in sec_name)
* level (0 if it is top level sections: chapters, and so on)
* a list of exercises beloging to the section and
* a dictionary of subsections (again Section objects)
* Section = (sec_name, level, [list of exercises names], dict( subsections ) )
EXAMPLES:
Test with:
::
sage -t csection.py
Create or edit a database:
::
sage: from megua.megbook import MegBook
sage: meg = MegBook(r'_input/csection.sqlite')
Save a new or changed exercise
::
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Trigonometric
....:
....: Here, is a summary.
....:
....: %Problem Some Name2
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C
|
$, for $C in \mathbb{R}$.
....:
....: class E28E28_pimtrig_002(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pimtrig_002
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answe
|
r instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary Primitives; Imediate primitives; Polynomial
....:
....: Here, is a summary.
....:
....: %Problem Some Problem 1
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_001(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
-------------------------------
Instance of: E28E28_pdirect_001
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
sage: txt=r'''
....: %Summary
....:
....: Here, is a summary.
....:
....: %Problem
....: What is the primitive of $a x + b@()$ ?
....:
....: %Answer
....: The answer is $prim+C$, for $C in \mathbb{R}$.
....:
....: class E28E28_pdirect_003(ExerciseBase):
....: pass
....: '''
sage: meg.save(txt)
Each exercise can belong to a section/subsection/subsubsection.
Write sections using ';' in the '%summary' line. For ex., '%summary Section; Subsection; Subsubsection'.
<BLANKLINE>
Each problem can have a suggestive name.
Write in the '%problem' line a name, for ex., '%problem The Fish Problem'.
<BLANKLINE>
Check exercise E28E28_pdirect_003 for the above warnings.
-------------------------------
Instance of: E28E28_pdirect_003
-------------------------------
==> Summary:
Here, is a summary.
==> Problem instance
What is the primitive of $a x + b$ ?
==> Answer instance
The answer is $prim+C$, for $C in \mathbb{R}$.
Travel down the tree sections:
::
sage: s = SectionClassifier(meg.megbook_store)
sage: s.textprint()
Primitives
Imediate primitives
Polynomial
> E28E28_pdirect_001
Trigonometric
> E28E28_pimtrig_001
> E28E28_pimtrig_002
E28E28_pdirect
> E28E28_pdirect_003
Testing a recursive iterator:
::
sage: meg = MegBook("_input/paula.sqlite")
sage: s = SectionClassifier(meg.megbook_store)
sage: for section in s.section_iterator():
....: print section
"""
#*****************************************************************************
# Copyright (C) 2011,2016 Pedro Cruz <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
#PYHTON modules
import collections
#MEGUA modules
from megua.localstore import ExIter
class SectionClassifier:
"""
"""
def __init__(self,megbook_store,max_level=4,debug=False,exerset=None):
#save megstore reference
self.megbook_store = megbook_store
self.max_level = max_level
#Exercise set or none for all
self.exercise_set = exerset
#dictionary of sections
self.contents = dict()
self.classify()
def classify(self):
"""
Classify by sections.
"""
for row in ExIter(self.megbook_store):
if self.exercise_set and not row['unique_name'] in self.exercise_set:
continue
#get a list in form ["section", "subsection", "subsubsection", ...]
sec_list = str_to_list(row['sections_text'])
if sec_list == [] or sec_list == [u'']:
sec_list = [ first_part(row['unique_name']) ]
#sec_list contain at least one element.
if not sec_list[0] in self.contents:
self.contents[sec_list[0]] = Section(sec_list[0])
#sec_list contains less than `max_level` levels
subsec_list = sec_list[1:self.max_level]
self.contents[sec_list[0]].add(row['unique_name'],subsec_list)
def textprint(self):
"""
Textual print of all the contents.
"""
for c in self.contents:
self.contents[c].textprint()
def section_iterator(self):
r"""
OUTPUT:
- an iterator yielding (secname, sorted exercises)
"""
# A stack-based alternative to the traverse_tree method above.
od_top = collections.OrderedDict(sorted(self.contents.items()))
stack = []
for secname,section in od_top.iteritems():
stack.append(section)
while stack:
section_top = stack.pop(0) #remove left element
yield section_top
od_sub = collec
|
opencord/xos
|
xos/core/migrations/0012_backupoperation_decl_uuid.py
|
Python
|
apache-2.0
| 1,124
| 0.00089
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-10 23:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
|
('core', '0011_auto_20190430_1254'),
]
operations = [
migrations.AddField(
model_name='backupoperation_decl',
name='uuid',
field=models.CharField(blank=True, help_text=b'unique identifer of this request', ma
|
x_length=80, null=True),
),
]
|
klim-iv/phantomjs-qt5
|
src/webkit/Tools/efl/common.py
|
Python
|
bsd-3-clause
| 1,100
| 0.000909
|
#!/usr/bin/env python
# Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the Lice
|
nse, or (at your option) any later version.
#
# This libr
|
ary is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
script_dir = None
def script_path(*args):
global script_dir
if not script_dir:
script_dir = os.path.join(os.path.dirname(__file__), '..', 'Scripts')
return os.path.join(*(script_dir,) + args)
def top_level_path(*args):
return os.path.join(*((script_path('..', '..'),) + args))
|
alexgarciac/scrapi
|
tasks.py
|
Python
|
apache-2.0
| 6,604
| 0.002574
|
import base64
import logging
import platform
from datetime import date, timedelta
from invoke import run, task
from elasticsearch import helpers
from dateutil.parser import parse
from six.moves.urllib import parse as urllib_parse
import scrapi.harvesters # noqa
from scrapi import linter
from scrapi import registry
from scrapi import settings
from scrapi.processing.elasticsearch import es
logger = logging.getLogger()
@task
def reindex(src, dest):
helpers.reindex(es, src, dest)
es.indices.delete(src)
@task
def alias(alias, index):
es.indices.delete_alias(index=alias, name='_all', ignore=404)
es.indices.put_alias(alias, index)
@task
def migrate(migration, sources=None, kwargs_string=None, dry=True, async=False, group_size=1000):
''' Task to run a migration.
:param migration: The migration function to run. This is passed in
as a string then interpreted as a function by the invoke task.
:type migration: str
:param kwargs_string: parsed into an optional set of keyword
arguments, so that the invoke migrate task can accept a variable
number of arguments for each migration.
The kwargs_string should be in the following format:
'key:value, key2:value2'
...with the keys and values seperated by colons, and each kwarg seperated
by commas.
:type kwarg_string: str
An example of usage renaming mit to mit 2 as a real run would be:
inv migrate rename -s mit -k 'target:mit2' --no-dry
An example of calling renormalize on two sources as an async dry run:
inv migrate renormalize -s 'mit,asu' -a
'''
kwargs_string = kwargs_string or ':'
sources = sources or ''
from scrapi import migrations
from scrapi.tasks import migrate
kwargs = {}
for key, val in map(lambda x: x.split(':'), kwargs_string.split(',')):
key, val = key.strip(), val.strip()
if key not in kwargs.keys():
kwargs[key] = val
elif isinstance(kwargs[key], list):
kwargs[key].append(val)
else:
kwargs[key] = [kwargs[key], val]
kwargs['dry'] = dry
kwargs['async'] = async
kwargs['group_size'] = group_size
kwargs['sources'] = map(lambda x: x.strip(), sources.split(','))
if kwargs['sources'] == ['']:
kwargs.pop('sources')
migrate_func = migrations.__dict__[migration]
migrate(migrate_func, **kwargs)
@task
def migrate_to_source_partition(dry=True, async=False):
from scrapi.tasks import migrate_to_source_partition
migrate_to_source_partition(dry=dry, async=async)
@task
def reset_search():
run("curl -XPOST 'http://localhost:9200/_shutdown'")
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
@task
def elasticsearch():
'''Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
'''
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch restart")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print(
"Your system is not recognized, you will have to start elasticsearch manually")
@task
def test(cov=True, doctests=True, verbose=False, debug=False, pdb=False):
"""
Runs all tests in the 'tests/' directory
"""
cmd = 'py.test scrapi tests'
if doctests:
cmd += ' --doctest-modules'
if verbose:
cmd += ' -v'
if debug:
cmd += ' -s'
if cov:
cmd += ' --cov-report term-missing --cov-config .coveragerc --cov scrapi'
if pdb:
cmd += ' --pdb'
run(cmd, pty=True)
@task
def requirements():
run('pip install -r requirements.txt')
@task
def beat():
from scrapi.tasks import app
app.conf['CELERYBEAT_SCHEDULE'] = registry.beat_schedule
app.Beat().run()
@task
def worker(loglevel='INFO', hostname='%h'):
from scrapi.tasks import app
command = ['worker']
if loglevel:
command.extend(['--loglevel', loglevel])
if hostname:
command.extend(['--hostname', hostname])
app.worker_main(command)
@task
def harvester(harvester_name, async=False, start=None, end=None):
settings.CELERY_ALWAYS_EAGER = not async
from scrapi.tasks import run_harvester
if not registry.get(harvester_name):
raise ValueError('No such harvesters {}'.format(harvester_name))
end = parse(end).date() if end else date.today()
start = parse
|
(start).date() if
|
start else end - timedelta(settings.DAYS_BACK)
run_harvester.delay(harvester_name, start_date=start, end_date=end)
@task
def harvesters(async=False, start=None, end=None):
settings.CELERY_ALWAYS_EAGER = not async
from scrapi.tasks import run_harvester
start = parse(start).date() if start else date.today() - timedelta(settings.DAYS_BACK)
end = parse(end).date() if end else date.today()
exceptions = []
for harvester_name in registry.keys():
try:
run_harvester.delay(harvester_name, start_date=start, end_date=end)
except Exception as e:
logger.exception(e)
exceptions.append(e)
logger.info("\n\nNumber of exceptions: {}".format(len(exceptions)))
for exception in exceptions:
logger.exception(e)
@task
def lint_all():
for name in registry.keys():
lint(name)
@task
def lint(name):
harvester = registry[name]
try:
linter.lint(harvester.harvest, harvester.normalize)
except Exception as e:
print('Harvester {} raise the following exception'.format(harvester.short_name))
print(e)
@task
def provider_map(delete=False):
from scrapi.processing.elasticsearch import es
if delete:
es.indices.delete(index='share_providers', ignore=[404])
for harvester_name, harvester in registry.items():
with open("img/favicons/{}_favicon.ico".format(harvester.short_name), "rb") as f:
favicon = urllib_parse.quote(base64.encodestring(f.read()))
es.index(
'share_providers',
harvester.short_name,
body={
'favicon': 'data:image/png;base64,' + favicon,
'short_name': harvester.short_name,
'long_name': harvester.long_name,
'url': harvester.url
},
id=harvester.short_name,
refresh=True
)
print(es.count('share_providers', body={'query': {'match_all': {}}})['count'])
|
audiohacked/pyBusPirate
|
tests/test_buspirate_onewire.py
|
Python
|
gpl-2.0
| 3,002
| 0.000333
|
# Created by Sean Nelson on 2018-08-19.
# Copyright 2018 Sean Nelson <[email protected]>
#
# This file is part of pyBusPirate.
#
# pyBusPirate is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# pyBusPirate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBusPirate. If not, see <https://www.gnu.org/licenses/>.
"""
Unit Tests for BusPirate SPI class
"""
import unittest
from unittest import mock
from buspirate import onewire
# pylint: disable=C0111,E1101
class BusPirateOneWireTest(unittest.TestCase):
@mock.patch('serial.Serial', autospec=True)
def setUp(self, mock_serial): # pylint: disable=W0613,W0221
self.bus_pirate = onewire.OneWire("/dev/ttyUSB0")
def tearDown(self):
pass
def test_exit(self):
self.bus_pirate.serial.read.return_value = "BBIO1"
self.assertEqual(self.bus_pirate.exit, True)
self.bus_pirate.serial.
|
write.assert_called_with(0
|
x00)
def test_mode(self):
self.bus_pirate.serial.read.return_value = "1W01"
self.assertEqual(self.bus_pirate.mode, "1W01")
self.bus_pirate.serial.write.assert_called_with(0x01)
def test_enter(self):
self.bus_pirate.serial.read.return_value = "1W01"
self.assertEqual(self.bus_pirate.enter, True)
self.bus_pirate.serial.write.assert_called_with(0x04)
def test_read_byte(self) -> bytes:
self.bus_pirate.serial.read.side_effect = [0x01, 0xFF]
self.assertEqual(self.bus_pirate.read_byte(), True)
self.bus_pirate.serial.write.assert_called_with(0x04)
def test_rom_search(self):
self.bus_pirate.serial.read.return_value = 0x01
self.assertEqual(self.bus_pirate.rom_search, True)
self.bus_pirate.serial.write.assert_called_with(0x08)
def test_alarm_search(self):
self.bus_pirate.serial.read.return_value = 0x01
self.assertEqual(self.bus_pirate.alarm_search, True)
self.bus_pirate.serial.write.assert_called_with(0x09)
def test_1wire_bulk_write(self):
read_data = [0x00 for idx in range(1, 17)]
write_data = [idx for idx in range(1, 17)]
self.bus_pirate.serial.read.side_effect = [0x01, read_data]
result = self.bus_pirate.bulk_write(16, write_data)
self.assertEqual(result, read_data)
self.bus_pirate.serial.write.assert_any_call(0x1F)
self.bus_pirate.serial.write.assert_any_call(write_data)
def test_pullup_voltage_select(self):
with self.assertRaises(NotImplementedError):
self.bus_pirate.pullup_voltage_select()
|
amitu/gitology
|
src/gitology/d/templatetags/clevercsstag.py
|
Python
|
bsd-3-clause
| 480
| 0.008333
|
from django import template
import cle
|
vercss
register = template.Library()
@register.tag(name="clevercss")
def do_clevercss(parser, token):
nodelist = parser.parse(('endclevercss',))
parser.delete_first_token()
return CleverCSSNode(nodelist)
class CleverCSSNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
output = s
|
elf.nodelist.render(context)
return clevercss.convert(output)
|
lesavoie/nagiosservice
|
controlserver/servicelevelinterface/serializers.py
|
Python
|
gpl-2.0
| 816
| 0.015931
|
from django.contrib.auth.models import User
from rest_framework import serializers
from servicelevelinterface.models import Monitor, Contact, Command
class MonitorSerializer(serializers.ModelSerializer):
owner = serializers.CharField(source='ow
|
ner.username', read_only=True)
class Meta:
model = Monitor
class ContactSerializer(serializers.ModelSerializer):
owner = serializers.CharField(source='owner.username', read_only=True)
class Meta:
model = Contact
class CommandSerializer(serializers.ModelSerializer):
class Meta:
model = Command
# Serializer used just when creating users. It only provides
|
a subset of the
# fields.
class CreateUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'password', 'email')
|
none-da/zeshare
|
debug_toolbar/panels/logger.py
|
Python
|
bsd-3-clause
| 2,377
| 0.002945
|
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is provided,
returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = ThreadTrackingHandler()
logging.root.setLevel(logging.NOTSET)
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_recor
|
ds()
return records
def nav_title(self):
return _("Logging")
def nav_subtitle(self):
return "%s message%s" % (len(handler.get_records()), (len(handler.get_records()) == 1) and '' or 's')
def title(self):
return 'Log Messages'
def url(self):
return ''
|
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': record.pathname,
'line': record.lineno,
})
return render_to_string('debug_toolbar/panels/logger.html', {'records': records})
|
informatics-isi-edu/webauthn
|
webauthn2/scripts/globus_oauth_client.py
|
Python
|
apache-2.0
| 1,011
| 0.004946
|
import globus_sdk
CLIENT_ID = 'f7cfb4d6-8f20-4983-a9c0-be3f0e2681fd'
client = globus_sdk.Nati
|
veAppAuthClient(CLIENT_ID)
#client.oauth2_start_flow(requested_scopes="https://auth.globus.org/scopes/0fb084ec-401d-41f4-990e-e236f325010a/deriva_all")
client.oauth2_start_flow(requested_scopes="https://auth.globus.org/scopes/nih-commo
|
ns.derivacloud.org/deriva_all")
authorize_url = client.oauth2_get_authorize_url(additional_params={"access_type" : "offline"})
print('Please go to this URL and login: {0}'.format(authorize_url))
# this is to work on Python2 and Python3 -- you can just use raw_input() or
# input() for your specific version
get_input = getattr(__builtins__, 'raw_input', input)
auth_code = get_input(
'Please enter the code you get after login here: ').strip()
token_response = client.oauth2_exchange_code_for_tokens(auth_code)
print str(token_response)
nih_commons_data = token_response.by_resource_server['nih_commons']
DERIVA_TOKEN = nih_commons_data['access_token']
print DERIVA_TOKEN
|
dominjune/LeetCode
|
074 Search a 2D Matrix.py
|
Python
|
mit
| 1,679
| 0.011316
|
"""
Write an efficient algorithm that searches for a
|
value in an m x n matrix. This matrix has the follow
|
ing properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
For example,
Consider the following matrix:
[
[1, 3, 5, 7],
[10, 11, 16, 20],
[23, 30, 34, 50]
]
Given target = 3, return true.
"""
__author__ = 'Danyang'
class Solution:
def searchMatrix(self, matrix, target):
"""
binary search. Two exactly the same binary search algorithm
:param matrix: a list of lists of integers
:param target: an integer
:return: a boolean
"""
if not matrix:
return False
m = len(matrix)
n = len(matrix[0])
# binary search
start = 0
end = m # [0, m)
while start<end:
mid = (start+end)/2
if matrix[mid][0]==target:
return True
if target<matrix[mid][0]:
end = mid
elif target>matrix[mid][0]:
start = mid+1
lst = matrix[end] if matrix[end][0]<=target else matrix[start] # positioning !
# binary search
start = 0
end = n # [0, n)
while start<end:
mid = (start+end)/2
if lst[mid]==target:
return True
if target<lst[mid]:
end = mid
elif target>lst[mid]:
start = mid+1
return False
if __name__=="__main__":
assert Solution().searchMatrix([[1], [3]], 3)==True
|
openstax/openstax-cms
|
openstax/api.py
|
Python
|
agpl-3.0
| 2,453
| 0.003675
|
from django.core.exceptions import MultipleObjectsReturned
from django.shortcuts import redirect
from django.urls import reverse, path
from wagtail.api.v2.router import WagtailAPIRouter
from wagtail.api.v2.views import PagesAPIViewSet, BaseAPIViewSet
from wagtail.images.api.v2.views import ImagesAPIViewSet
from wagtail.documents.api.v2.views import DocumentsAPIViewSet
class OpenstaxPagesAPIEndpoint(PagesAPIViewSet):
"""
OpenStax custom Pages API endpoint that allows finding pages and books by pk or slug
"""
def detail_view(self, request, pk=None, slug=None):
param = pk
if slug is not None:
self.lookup_field = 'slug'
param = slug
try:
return super().detail_view(request, param)
except MultipleObjectsReturned:
# Redirect to the listing view, filtered by the relevant slug
# The router is registered with the `wagtailapi` namespace,
# `pages` is our endpoint namespace and `listing` is the listing view url name.
return redirect(
reverse('wagtailapi:pages:listing') + f'?{self.lookup_field}={param}'
)
@classmethod
def get_urlpatterns(cls):
"""
This returns a list of URL patterns for the endpoint
"""
return [
path('', cls.as_view({'get': 'listing_view'}), name='listing'),
path('<int:pk>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('<slug:slug>/', cls.as_view({'get': 'detail_view'}), name='detail'),
path('find/', cls.as_view({'get': 'find_view'}), name='find'),
]
class OpenStaxImagesAPIViewSet(ImagesAPIViewSet):
meta_fields = BaseAPIViewSet.meta_fields + ['tags', 'download_url', 'height', 'width']
nested_default_fields =
|
BaseAPIViewSet.nested_default_fields + ['title', 'download_url', 'hei
|
ght', 'width']
# Create the router. “wagtailapi” is the URL namespace
api_router = WagtailAPIRouter('wagtailapi')
# Add the three endpoints using the "register_endpoint" method.
# The first parameter is the name of the endpoint (eg. pages, images). This
# is used in the URL of the endpoint
# The second parameter is the endpoint class that handles the requests
api_router.register_endpoint('pages', OpenstaxPagesAPIEndpoint)
api_router.register_endpoint('images', OpenStaxImagesAPIViewSet)
api_router.register_endpoint('documents', DocumentsAPIViewSet)
|
OCA/connector-telephony
|
sms_no_automatic_delete/__manifest__.py
|
Python
|
agpl-3.0
| 543
| 0
|
# Copyright 2021 Akretion (http://www.akretion.com).
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "No automatic deletion of SMS",
"summary": "Avoid automatic delete of sended sms",
"author": "Akretion,Odoo Community Association (OCA)",
"website": "https://github.com/OCA/connector-telephony",
"license": "AGPL-3",
"category": "",
"version": "14.0.1.1.0",
"depends": ["sms"],
"data": [
"data/ir_cr
|
on_data.xml",
|
],
"application": False,
"installable": True,
}
|
appleseedhq/cortex
|
test/IECore/ops/presetParsing/presetParsing-1.py
|
Python
|
bsd-3-clause
| 3,526
| 0.047646
|
##########################################################################
#
# Copyright (c) 2007-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
class presetParsing( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"An Op to test the parsing of parameter presets.",
IECore.IntParameter(
name = "result",
description = "d",
defaultValue = 2,
)
)
self.parameters().addParameters(
[
IECore.V3fParameter(
name = "h",
description = "a v3f",
defaultValue = IECore.V3fData(),
presets = (
( "x", imath.V3f( 1, 0, 0 ) ),
( "y", imath.V3f( 0, 1, 0 ) ),
( "z", imath.V3f( 0, 0, 1 ) )
)
),
IECore.V2dParameter(
name = "i",
description = "a v2d",
defaultValue = IECor
|
e.V2dData( imath.V2d( 0 ) ),
),
IECore.CompoundParameter(
name = "compound",
description = "a compound parameter",
members = [
IECore.V3dParameter(
name = "j",
description = "a v3d",
defaultValue = IECore.V3dData(),
presets = (
( "one", imath.V3d( 1 ) ),
( "two", imath.V3d( 2 ) )
)
),
IECore.M44fParameter(
name = "k",
description = "an m44f",
defaultValue = IECore.M44fD
|
ata(),
presets = (
( "one", imath.M44f( 1 ) ),
( "two", imath.M44f( 2 ) )
)
),
]
)
]
)
def doOperation( self, operands ) :
assert operands["h"] == IECore.V3fData( imath.V3f( 1, 0, 0 ) )
assert operands["i"] == IECore.V2dData( imath.V2d( 0 ) )
compoundPreset = IECore.CompoundObject()
compoundPreset["j"] = IECore.V3dData( imath.V3d( 1 ) )
compoundPreset["k"] = IECore.M44fData( imath.M44f( 1 ) )
assert operands["compound"] == compoundPreset
return IECore.IntData( 1 )
IECore.registerRunTimeTyped( presetParsing )
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/detector/anomaly/test_ae_detector.py
|
Python
|
apache-2.0
| 2,541
| 0
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.chronos.detector.anomaly.ae_detector import AEDetector
class TestAEDetector(ZooTestCase):
def setup_method(self, method):
pass
def teardown_method(self, method):
pass
def create_data(self):
cycles = 10
time = np.arange(0, cycles * np.pi, 0.01)
data = np.sin(time)
data[600:800] = 10
return data
def test_ae_fit_score_rolled_keras(self):
y = self.create_data()
ad = AEDetector(roll_len=314)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_rolled_pytorch(self):
y = self.create_data()
ad = AEDetector(roll_len=314, backend="torch")
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
as
|
sert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_ae_fit_score_unrolled(self):
y = self.create_data()
ad = AEDetector(roll_len=0)
ad.fit(y)
anomaly_scores = ad.score()
assert len(anomaly_scores) == len(y)
anomaly_indexes = ad.anomaly_indexes()
assert len(anomaly_indexes) == int(ad.ratio * len(y))
def test_corner_cases(self):
y = self.create_data()
|
ad = AEDetector(roll_len=314, backend="dummy")
with pytest.raises(ValueError):
ad.fit(y)
ad = AEDetector(roll_len=314)
with pytest.raises(RuntimeError):
ad.score()
y = np.array([1])
with pytest.raises(ValueError):
ad.fit(y)
y = self.create_data()
y = y.reshape(2, -1)
with pytest.raises(ValueError):
ad.fit(y)
|
vidartf/hyperspy
|
hyperspy/io_plugins/__init__.py
|
Python
|
gpl-3.0
| 2,279
| 0.000439
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import logging
from hyperspy.io_plugins import (msa, digital_micrograph, fei, mrc, ripple,
tiff, semper_unf, blockfile, dens, emd,
protochips)
io_plugins = [msa, digital_micrograph, fei, mrc, ripple, tiff, semper_unf,
blockfile, dens, emd, protochips]
_logger = logging.getLogger(__name__)
|
try:
from hyperspy.io_plugins import netcdf
io_plugins.append(netcdf)
except ImportError:
pass
# NetCDF is obsolate and is only provided for users who have
# old EELSLab files. Therefore, we silenly ignore if missing.
try:
from hype
|
rspy.io_plugins import hdf5
io_plugins.append(hdf5)
from hyperspy.io_plugins import emd
io_plugins.append(emd)
except ImportError:
_logger.warning('The HDF5 IO features are not available. '
'It is highly reccomended to install h5py')
try:
from hyperspy.io_plugins import image
io_plugins.append(image)
except ImportError:
_logger.info('The Signal2D (PIL) IO features are not available')
try:
from hyperspy.io_plugins import bcf
io_plugins.append(bcf)
except ImportError:
_logger.warning('The Bruker composite file reader cant be loaded',
'due to lxml library missing. Please install lxml',
'and python bindings, to enable the bcf loader.')
default_write_ext = set()
for plugin in io_plugins:
if plugin.writes:
default_write_ext.add(
plugin.file_extensions[plugin.default_extension])
|
aweisberg/cassandra-dtest
|
thrift_bindings/thrift010/Cassandra.py
|
Python
|
apache-2.0
| 403,615
| 0.002118
|
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(object):
def login(self, auth_request):
"""
Parameters:
- auth_request
"""
pass
def set_keyspace(self, keyspace):
"""
Parameters:
- keyspace
"""
pass
def get(self, key, column_path, consistency_level):
"""
Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
the only method that can throw an exception under non-failure conditions.)
Parameters:
- key
- column_path
- consistency_level
"""
pass
def get_slice(self, key, column_parent, predicate, consistency_level):
"""
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
pass
def get_count(self, key, column_parent, predicate, consistency_level):
"""
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
Parameters:
- key
- column_parent
- predicate
- consistency_level
"""
pass
def multiget_slice(self, keys, column_parent, predicate, consistency_level):
"""
Performs a get_slice for column_parent and predicate for the given keys in parallel.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
pass
def multiget_count(self, keys, column_parent, predicate, consistency_level):
"""
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
Parameters:
- keys
- column_parent
- predicate
- consistency_level
"""
pass
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
pass
def get_paged_slice(self, column_family, range, start_column, consistency_level):
"""
returns a range of columns, wrapping to the next rows if necessary to collect max_results.
Parameters:
- column_family
- range
- start_column
- consistency_level
"""
pass
def get_indexed_slices(self, column_parent, index_clause, column_predicate, consistency_level):
"""
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
Parameters:
- column_parent
- index_clause
- column_predicate
- consistency_level
"""
pass
def insert(self, key, column_parent, column, consistency_level):
"""
Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
pass
def add(self, key, column_parent, column, consistency_level):
"""
Increment or decrement a counter.
Parameters:
- key
- column_parent
- column
- consistency_level
"""
pass
def cas(self, key, column_family, expected, updates, serial_consistency_level, commit_consistency_level):
"""
Atomic compare and set.
If the cas is successfull, the success boolean in CASResult will be true and there will be no current_values.
Otherwise, success will be false and current_values will contain the current values for the columns in
expected (that, by definition of compare-and-set, will differ from the values in expected).
A cas operation takes 2 consistency leve
|
l. The first one, serial_consistency_level, simply indicates the
level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
is a more traditional consistency level (the sam
|
e CL than for traditional writes are accepted) that impact
the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
the write.
Parameters:
- key
- column_family
- expected
- updates
- serial_consistency_level
- commit_consistency_level
"""
pass
def remove(self, key, column_path, timestamp, consistency_level):
"""
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
Parameters:
- key
- column_path
- timestamp
- consistency_level
"""
pass
def remove_counter(self, key, path, consistency_level):
"""
Remove a counter at the specified location.
Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
until the delete has reached all the nodes and all of them have been fully compacted.
Parameters:
- key
- path
- consistency_level
"""
pass
def batch_mutate(self, mutation_map, consistency_level):
"""
Mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
pass
def atomic_batch_mutate(self, mutation_map, consistency_level):
"""
Atomically mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
*
Parameters:
- mutation_map
- consistency_level
"""
pass
def truncate(self, cfname):
"""
Truncate will mark and entire column family as deleted.
From the user's perspective a successful call to truncate will result complete data deletion from cfname.
Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
only marks the data as deleted.
The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
some hosts are down.
Parameters:
- cfname
"""
|
mattduan/proof
|
adapter/NoneAdapter.py
|
Python
|
bsd-3-clause
| 653
| 0.009188
|
"""
This DatabaseHandler is used when you do not have a database installed.
"""
import proof.ProofConstants as ProofConstants
import proof.adapter.Adapter as Adapter
class NoneAdapter(Adapter.Adapter):
def __init__(self):
pass
def getResourceType(self):
return ProofConstants.NONE
def getConnection(self):
return None
def toUpperCase(self, s):
return s
d
|
ef ignoreCase(self, s):
return self.toUpperCase(s)
def getIDMethodSQL(self, obj):
return None
def lockTable(self, con
|
, table):
pass
def unlockTable(self, con, table):
pass
|
ModestoCabrera/is210-week-12-synthesizing
|
task_02.py
|
Python
|
mpl-2.0
| 616
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains Custom
|
Exception Class"""
class CustomError(Exception):
"""
Attributes:
None
"""
def __init__(self, message, cause):
"""Custom Error that stores error reason.
Args:
cause (str): Reason for error.
message (str): User input.
Returns:
None
Examples:
>>> myerr = CustomError('Whoah!', cause='Messed up!')
>>> print myerr.cause
Messed up!
""
|
"
self.cause = cause
self.message = message
Exception.__init__(self)
|
aronsky/home-assistant
|
homeassistant/components/pioneer/__init__.py
|
Python
|
apache-2.0
| 29
| 0
|
"""Th
|
e pioneer component."""
| |
demorest/mark5access
|
python/examples/m5subband.py
|
Python
|
gpl-3.0
| 8,051
| 0.033288
|
#!/usr/bin/python
"""
m5subband.py ver. 1.1 Jan Wagner 20150603
Extracts a narrow subband via filtering raw VLBI data.
Reads formats supported by the mark5access library.
Usage : m5subband.py <infile> <dataformat> <outfile>
<if_nr> <factor> <Ldft>
<start_bin> <stop_binN> [<offset>]
<dataformat> should be of the form: <FORMAT>-<Mbps>-<nchan>-<nbit>, e.g.:
VLBA1_2-256-8-2
MKIV1_4-128-2-1
Mark5B-512-16-2
VDIF_1000-64-1-2 (here 1000 is payload size in bytes)
<outfile> output file for 32-bit float subband data (VDIF format)
<if_nr> the IF i.e. baseband channel to be filtered (1...nchan)
<factor> overlap-add factor during filtering (typ. 4)
<Ldft> length of DFT
<start_bin> take output starting from bin (0...Ldft-2)
<stop_bin> take output ending with bin (start_bin...Ldft-1)
note that for real-valued VLBI data 0..Ldft/2 contains
the spectrum and Ldft/2+1...Ldft-1 its mirror image
<offset> is the byte offset into the file
"""
import ctypes, numpy, re, struct, sys
import mark5access as m5lib
from datetime import datetime
from scipy import stats
refMJD_Mark5B = 57000 # reference MJD for Mark5B input data
def usage():
print __doc__
def m5subband(fn, fmt, fout, if_nr, factor, Ldft, start_bin, stop_bin, offset):
"""Extracts narrow-band signal out from file"""
# Derived settings
nin = Ldft
nout = stop_bin - start_bin + 1
#Lout = next_pow2(2*(nout-nout%2)) # time-domain output data will be somewhat oversampled
Lout = next_even(2*(nout-nout%2)) # time-domain output data will be closer to critically sampled
iter = 0
# Open file
try:
m5file = m5lib.new_mark5_stream_file(fn, ctypes.c_longlong(offset))
m5fmt = m5lib.new_mark5_format_generic_from_string(fmt)
ms = m5lib.new_mark5_stream_absorb(m5file, m5fmt)
dms = ms.contents
m5lib.mark5_stream_fix_mjd(ms, refMJD_Mark5B)
(mjd,sec,ns) = m5lib.helpers.get_sample_time(ms)
except:
print ('Error: problem o
|
pening or decoding %s\n' % (fn))
return 1
# Safety checks
if (if_nr<0) or (if_nr>=dms.nchan) or (factor<0) or (factor>32) or (Ldft<2) or (start_bin>stop_bin) or (stop_bin>=Ldft):
print ('Error: invalid command line arguments')
return 1
if (Ldft % factor)>0:
print ('Error: length of DFT (Ldft=%u) must be divisible by overlap-add factor (factor=%u)' % (Ldft,factor))
return 1
if (Lout % factor)>0:
print ('Error: length derived for output IDFT (Lo
|
ut=%u) does not divide the overlap-add factor (factor=%u)' % (Lout,factor))
return 1
# Get storage for raw sample data from m5lib.mark5_stream_decode()
pdata = m5lib.helpers.make_decoder_array(ms, nin, dtype=ctypes.c_float)
if_data = ctypes.cast(pdata[if_nr], ctypes.POINTER(ctypes.c_float*nin))
# Numpy 2D arrays for processed data
fp = 'float32'
cp = 'complex64' # complex64 is 2 x float32
flt_in = numpy.zeros(shape=(factor,nin), dtype=fp)
flt_out = numpy.zeros(shape=(factor,Lout), dtype=cp)
iconcat = numpy.array([0.0 for x in range(2*nin)], dtype=fp)
oconcat = numpy.array([0.0+0.0j for x in range(2*Lout)], dtype=cp)
# Coefficient for coherent phase connection between overlapped input segments
r = float(start_bin)/float(factor)
rfrac = r - numpy.floor(r)
rot_f0 = numpy.exp(2j*numpy.pi*rfrac)
if (abs(numpy.imag(rot_f0)) < 1e-5):
# set near-zero values to zero
rot_f0 = numpy.real(rot_f0) + 0.0j
rot_f = rot_f0**0.0
# Window functions for DFT and IDFT
win_in = numpy.cos((numpy.pi/nin)*(numpy.linspace(0,nin-1,nin) - 0.5*(nin-1)))
win_in = numpy.resize(win_in.astype(fp), new_shape=(factor,nin))
win_out = numpy.cos((numpy.pi/Lout)*(numpy.linspace(0,Lout-1,Lout) - 0.5*(Lout-1)))
win_out = numpy.resize(win_out.astype(fp), new_shape=(factor,Lout))
# Prepare VDIF output file with reduced data rate and same starting timestamp
bwout = float(dms.samprate)*(nout/float(nin))
fsout = 2*bwout
outMbps = fsout*1e-6 * 32 # 32 for real-valued data, 64 for complex data
vdiffmt = 'VDIF_8192-%u-1-32' % (outMbps)
if not(int(outMbps) == outMbps):
print ('*** Warning: output rate is non-integer (%e Ms/s)! ***' % (outMbps))
(vdifref,vdifsec) = m5lib.helpers.get_VDIF_time_from_MJD(mjd,sec+1e-9*ns)
vdif = m5lib.writers.VDIFEncapsulator()
vdif.open(fout, format=vdiffmt, complex=False, station='SB')
vdif.set_time(vdifref,vdifsec, framenr=0)
vdiffmt = vdif.get_format()
# Report
bw = float(dms.samprate)*0.5
print ('Input file : start MJD %u/%.6f sec' % (mjd,sec+ns*1e-9))
print ('Bandwidth : %u kHz in, %.2f kHz out, bandwidth reduction of ~%.2f:1' % (1e-3*bw, nout*1e-3*bw/nin, float(nin)/nout))
print ('Input side : %u-point DFT with %u bins (%u...%u) extracted' % (nin,nout,start_bin,stop_bin))
print ('Output side : %u-point IDFT with %u-point zero padding' % (Lout,Lout-nout))
print ('Overlap : %u samples on input, %u samples on output' % (nin-nin/factor,Lout-Lout/factor))
print ('Phasors : %s^t : %s ...' % (str(rot_f0), str([rot_f0**t for t in range(factor+2)])))
print ('Output file : rate %.3f Mbps, %u fps, format %s'
% (outMbps,vdif.get_fps(),vdif.get_format()) )
# Do filtering
print ('Filtering...')
while True:
# Get next full slice of data
rc = m5lib.mark5_stream_decode(ms, nin, pdata)
if (rc < 0):
print ('\n<EOF> status=%d' % (rc))
return 0
in_new = numpy.frombuffer(if_data.contents, dtype='float32')
# Debug: replace data with noise + tone
if False:
t = iter*nin + numpy.array(range(nin))
f = (start_bin + numpy.floor(nout/2.0)) / float(nin)
in_new = numpy.random.standard_normal(size=in_new.size) + 10*numpy.sin(2*numpy.pi * f*t)
in_new = in_new.astype('float32')
# Feed the window-overlap-DFT processing input stage
iconcat = numpy.concatenate([iconcat[0:nin],in_new]) # [old,new]
for ii in range(factor):
iconcat = numpy.roll(iconcat, -nin/factor)
flt_in[ii] = iconcat[0:nin]
# Window and do 1D DFT of 2D array
flt_in = numpy.multiply(flt_in,win_in)
F = numpy.fft.fft(flt_in)
# Copy the desired bins and fix DC/Nyquist bins
for ii in range(factor):
flt_out[ii][0:nout] = F[ii][start_bin:(start_bin+nout)]
flt_out[ii][0] = 0.0 # numpy.real(flt_out[ii][0])
flt_out[ii][nout-1] = 0.0 # numpy.real(flt_out[ii][nout-1])
# Do inverse 1D DFT and window the result
F = numpy.fft.ifft(flt_out)
F = numpy.multiply(F,win_out)
# Reconstruct time domain signal by shifting and stacking overlapped segments coherently
for ii in range(factor):
oconcat[Lout:] = oconcat[Lout:] + F[ii]*rot_f
rot_f = rot_f * rot_f0
oconcat = numpy.roll(oconcat, -Lout/factor)
# note: numpy has a circular shift (numpy.roll), but no "shift array left/right" function,
# so we need to zero out the undesired values shifted back in by the circular shift:
oconcat[(-Lout/factor):] = 0
# Output real part of complex time domain data
# (If suppression of upper Nyquist is zone desired, should write out both real&imag)
vdif.write(numpy.real(oconcat[0:Lout]).view('float32').tostring())
# Reporting
if (iter % 100)==0:
(mjd,sec,ns) = m5lib.helpers.get_sample_time(ms)
T_abs = sec + 1e-9*ns
T_count = 1e-9*dms.framens * dms.nvalidatepass
print ('Iter %7d : %u/%f : %u : %f sec\r' % (iter, mjd,T_abs, dms.nvalidatepass, T_count)),
iter = iter + 1
vdif.close()
return 0
def next_pow2(n):
"""Returns the power-of-2 closest to and larger than or equal to n"""
return int(2.0**numpy.ceil(numpy.log(n)/numpy.log(2)))
def next_even(n):
"""Returns the even number closest to and larger than or equal to n"""
return int(n + n%2)
def main(argv=sys.argv):
if len(argv) not in [9,10]:
usage()
sys.exit(1)
offset = 0
if len(argv) == 10:
offset = int(argv[9])
if_nr = int(argv[4])-1
factor = int(argv[5])
Ldft = int(argv[6])
start_bin = int(argv[7])
stop_bin = int(argv[8])
rc = m5subband(argv[1],argv[2],argv[3], if_nr, factor,Ldft,start_bin,stop_bin, offset)
return rc
if __name__ == "__main__":
sys.exit(main())
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/bidding_seasonality_adjustment_service/transports/grpc.py
|
Python
|
apache-2.0
| 12,389
| 0.001291
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v9.resources.types import (
bidding_seasonality_adjustment,
)
from google.ads.googleads.v9.services.types import (
bidding_seasonality_adjustment_service,
)
from .base import (
BiddingSeasonalityAdjustmentServiceTransport,
DEFAULT_CLIENT_INFO,
)
class BiddingSeasonalityAdjustmentServiceGrpcTransport(
BiddingSeasonalityAdjustmentServiceTransport
):
"""gRPC backend transport for BiddingSeasonalityAdjustmentService.
Service to manage bidding seasonality adjustments.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SC
|
OPES)
|
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the c
|
Zashas/segway
|
structs.py
|
Python
|
gpl-3.0
| 5,933
| 0.008933
|
#coding: utf-8
from scapy.all import *
class WILDCARD:
""" Used to indicate that some fields in a scapy packet should be ignored when comparing """
pass
class NO_PKT:
""" Indicate that a sent packet should have no reply """
pass
def pkt_match(expected, actual):
""" Check if all fields described in packet `expected` match the fields of pkt `actual`' """
if expected == NO_PKT and actual == NO_PKT:
return True
elif expected == NO_PKT or actual == NO_PKT:
return False
if expected.oif != WILDCARD and expected.oif != actual.oif:
# This can't be added to `fields` because it's not a proper scapy field
return False
fields = {
IPv6: ('src', 'dst'),
IPv6ExtHdrSegmentRouting: ('addresses', 'lastentry', 'segleft', 'tag',
'unused1', 'protected', 'oam', 'alert', 'hmac', 'unused2'), # Flags
IPv6ExtHdrSegmentRoutingTLVHMAC : ('hmac', 'keyid'),
IPv6ExtHdrSegmentRoutingTLVIngressNode : ('ingress_node',),
IPv6ExtHdrSegmentRoutingTLVEgressNode : ('egress_node',),
IPv6ExtHdrSegmentRoutingTLVOpaque : ('container',),
IPv6ExtHdrSegmentRoutingTLVPadding : ('len',),
IPv6ExtHdrSegmentRoutingTLVNSHCarrier : ('nsh_object',),
IPv6ExtHdrSegmentRoutingTLV : ('type', 'value'),
TCP: ('sport', 'dport'),
UDP: ('sport', 'dport'),
Raw: ('load',)
}
layer = 0
while 1:
sub_expected, sub_actual = expected.getlayer(layer), actual.getlayer(layer)
if sub_expected.__class__ != sub_actual.__class__:
return False
if sub_actual == None: # Compared all layers
return True
if sub_actual.__class__ not in fields: # Unknown layer ..
return False
for field in fields[sub_expected.__class__]:
# Don't care if field not set in expected packet
if getattr(sub_expected, field) != WILDCARD and \
getattr(sub_expected, field) != getattr(sub_actual, field):
return False
layer += 1
def pkt_str(pkt):
if pkt == NO_PKT:
return "none"
_ = lambda x: x if x != WILDCARD else "*"
def srh_str(srh):
from collections import OrderedDict
segs = list(srh.addresses)
if srh.segleft and srh.segleft < len(segs):
segs[srh.segleft] = "+"+segs[srh.segleft]
options = OrderedDict((('sl',srh.segleft), ('le',srh.lastentry)))
if srh.tag:
options['tag'] = srh.tag
flags = ""
fl_mapping = {'oam':'O', 'hmac':'H', 'alert':'A','protected':'P'} # TODO organiser selon draft
for key,val in fl_mapping.items():
if getattr(srh,key) == 1:
flags += val
if flags != "":
options['fl'] = flags
tlvs = []
for tlv in srh.tlv_objects:
if isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVHMAC):
tlvs.append('{{HMAC: {}, {}}}'.format(tlv.hmac.encode('hex'), tlv.keyid))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVPadding):
tlvs.append('{{Pad: {}}}'.format(tlv.len))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutin
|
gTLVIngressNode):
|
tlvs.append('{{Ingr: {}}}'.format(tlv.ingress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVEgressNode):
tlvs.append('{{Egr: {}}}'.format(tlv.egress_node))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVOpaque):
tlvs.append('{{Opaq: {}}}'.format(tlv.container.encode('hex')))
elif isinstance(tlv,IPv6ExtHdrSegmentRoutingTLVNSHCarrier):
tlvs.append('{{NSH: {}}}'.format(tlv.nsh_object.encode('hex')))
else:
tlvs.append('{{Type:{} Value:{}}}'.format(tlv.type, tlv.value.encode('hex')))
return "[{}] <{}>{}".format(",".join(segs), ",".join(map(lambda key: "{} {}".format(key, options[key]),options)), "" if not tlvs else " "+" ".join(tlvs))
def ip_str(ip):
return "{} -> {}".format(_(ip.src), _(ip.dst))
def udp_str(udp):
if udp.sport or udp.dport:
return "UDP({},{})".format(_(udp.sport), _(udp.dport))
return "UDP"
def tcp_str(tcp):
if tcp.sport or tcp.dport:
return "TCP({},{})".format(_(tcp.sport), _(tcp.dport))
return "TCP"
def payload_str(raw):
if raw.load == WILDCARD:
return "*"
return '"{}"'.format(raw.load)
fcts = {
IPv6: ip_str,
IPv6ExtHdrSegmentRouting: srh_str,
UDP: udp_str,
TCP: tcp_str,
Raw: payload_str
}
i = 0
protos = []
while 1:
layer = pkt.getlayer(i)
if layer == None:
break
elif isinstance(layer, IPv6ExtHdrSegmentRoutingTLV):
pass
elif layer.__class__ in fcts:
protos.append(fcts[layer.__class__](layer))
else:
protos.append(layer.name)
i += 1
iface = ""
if pkt.oif and pkt.oif != "dum0" and pkt.oif != WILDCARD:
iface = "({}) ".format(pkt.oif)
return iface+" / ".join(protos)
class Event:
type = None
cmd = None #only used if CMD
pkt = None # only used if PKT
answer = None
expected_answer = None
oif = None # only used if OIF
PKT = 1
CMD = 2
OIF = 3
def __unicode__(self):
return self.__str__()
def __str__(self):
if self.type == Event.PKT:
s = "> {}".format(self.pkt)
if self.expected_answer:
s += "\n< {}".format(self.expected_answer)
return s
elif self.type == Event.CMD:
return "`"+self.cmd+"`"
elif self.type == Event.OIF:
return "if add {}".format(self.oif)
else:
return "Unknown event"
def __repr__(self):
return self.__str__()
|
Gustry/inasafe
|
safe/metadata/property/boolean_property.py
|
Python
|
gpl-3.0
| 1,696
| 0
|
# -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '08/12/15'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import json
from types import NoneType
from safe.common.exceptions import MetadataCastError
from safe.metadata.property import BaseProperty
class BooleanProperty(BaseProperty):
"""A property that accepts boolean."""
# if you edit this you need to adapt accordingly xml_value and is_valid
_allowed_python_types = [bool, NoneType]
def __init__(self, name, value, xml_path):
super(BooleanProperty, self).__init__(
name, value, xml_path, self._allo
|
wed_python_types)
@classmethod
def is_valid(cls, value):
return True
def cast_from_str(self, value):
try:
return bool(
|
int(value))
except ValueError as e:
raise MetadataCastError(e)
@property
def xml_value(self):
if self.python_type is bool:
return str(int(self.value))
elif self.python_type is NoneType:
return ''
else:
raise RuntimeError('self._allowed_python_types and self.xml_value'
'are out of sync. This should never happen')
|
guschmue/tensorflow
|
tensorflow/contrib/framework/python/ops/variables.py
|
Python
|
apache-2.0
| 29,690
| 0.005322
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import d
|
types
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform
|
import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
from tensorflow.python.util.deprecation import deprecated
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'global_variable',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
@deprecated(None, "Please switch to tf.train.assert_global_step")
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
@deprecated(None, "Please switch to tf.train.get_global_step")
def get_global_step(graph=None):
return training_util.get_global_step(graph)
@deprecated(None, "Please switch to tf.train.create_global_step")
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
@deprecated(None, "Please switch to tf.train.get_or_create_global_step")
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None, use_resource=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully de
|
pipcat/kobo
|
translation-tools/qm2ts.py
|
Python
|
gpl-3.0
| 3,181
| 0.033008
|
#!/usr/bin/python2
# -- coding: utf-8 --
# Converts a .qm file to a .ts file.
# More info: http://www.mobileread.com/forums/showthread.php?t=261771
# By pipcat & surquizu. Thanks to: tshering, axaRu, davidfor, mobileread.com
import codecs, cgi
def clean_text(txt, is_utf) :
if is_utf == False:
txt = txt.decode('utf-16be').encode('utf-8', 'ignore')
txt = txt.rstrip() #bypass errors on trans_ca
else:
txt = txt.replace('\x20\xB7', '\x20\xC2\xB7') #bypass errors on trans_ca
txt = txt.replace('\x54\xFC', '\x54\xC3\xBC') #bypass errors on trans_ca
txt = txt.replace('\x6B\xE7', '\x6B\xC3\xA7') #bypass errors on trans_ca
txt = cgi.escape(txt)
return txt
def qm2ts(filename) :
with open(filename, 'rb') as fh:
data = fh.read()
pos = 0
found = 0
last_t3 = ''
ts_filename = filename+'.ts'
f = open(ts_filename, 'w')
f.write(codecs.BOM_UTF8)
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<!DOCTYPE TS>\n')
f.write('<TS version="2.1" language="es">\n') #use a language code with singular/plural if needed (Ex: es)
while pos < len(data) :
if data[pos:pos+3] == '\x03\x00\x00'
|
:
l1 = (ord(data[pos+3]) * 256) + ord(data[pos+4])
t1 = data[pos+5:pos+5+l1]
t1b = ''
t1c = ''
if data[pos+5+l1:pos+5+l1
|
+3] == '\x03\x00\x00': #optional, when exists singular/plural
l1b = (ord(data[pos+5+l1+3]) * 256) + ord(data[pos+5+l1+4])
t1b = data[pos+5+l1+5:pos+5+l1+5+l1b]
pos = pos+l1b+5
if data[pos+5+l1:pos+5+l1+3] == '\x03\x00\x00': #optional, when exists singular/undecal/plural
l1c = (ord(data[pos+5+l1+3]) * 256) + ord(data[pos+5+l1+4])
t1c = data[pos+5+l1+5:pos+5+l1+5+l1c]
pos = pos+l1c+5
if data[pos+5+l1:pos+5+l1+8] == '\x08\x00\x00\x00\x00\x06\x00\x00':
pos = pos+5+l1+8
l2 = (ord(data[pos]) * 256) + ord(data[pos+1])
t2 = data[pos+2:pos+2+l2]
if data[pos+2+l2:pos+2+l2+3] == '\x07\x00\x00':
pos = pos+2+l2+3
l3 = (ord(data[pos]) * 256) + ord(data[pos+1])
t3 = data[pos+2:pos+2+l3]
found += 1
# save xml
if last_t3 != t3:
if last_t3 != '':
f.write('</context>\n')
f.write('<context>\n')
f.write('\t<name>'+t3+'</name>\n')
last_t3 = t3
f.write('\t<message>\n') if t1b == '' else f.write('\t<message numerus="yes">\n')
f.write('\t\t<source>'+clean_text(t2, True)+'</source>\n')
if t1b == '':
f.write('\t\t<translation>'+clean_text(t1, False)+'</translation>\n')
else:
f.write('\t\t<translation>\n')
f.write('\t\t\t<numerusform>'+clean_text(t1, False)+'</numerusform>\n')
f.write('\t\t\t<numerusform>'+clean_text(t1b, False)+'</numerusform>\n')
if t1c != '':
f.write('\t\t\t<numerusform>'+clean_text(t1c, False)+'</numerusform>\n')
f.write('\t\t</translation>\n')
f.write('\t</message>\n')
pos += 1
if pos >= len(data):
break
if last_t3 != '':
f.write('</context>\n')
f.write('</TS>\n')
f.close()
print 'File saved: '+ts_filename+' with '+str(found)+' strings.'
# MAIN
#qm2ts('nickel-3.17.3-8-es.qm')
#qm2ts('nickel-3.19.5761-5-es.qm')
#qm2ts('3.17.3_trans_ca.qm')
#qm2ts('3.19.5761_trans_ca.qm')
qm2ts('nickel-5-es.qm')
|
SirEdvin/Pandas-Pipe
|
pandaspipe/pipeline.py
|
Python
|
apache-2.0
| 10,315
| 0.002714
|
# -*- coding:utf-8 -*-
import abc
import sys
import inspect
import types
import itertools
import networkx as nx
from pandaspipe.util import patch_list, isSubset
from pandaspipe.base import PipelineEntity
import logging
_log = logging.getLogger(__name__)
_log.addHandler(logging.StreamHandler(stream=sys.stdout))
class Pipeline:
def __init__(self, name='Undefined Pipeline', env=None):
"""(Pipeline, str) -> NoneType
Creating the contents of the Pipeline Object
"""
if env is None:
env = {}
self._entities = []
self.name = name
self.env = env
self.graph = None
def process(self, channels=('root',), ignore_outlet_node=False, output_channels=()):
"""(Pipeline, pandas.DataFrame, str) -> type(df_map)
*Description*
:param ignore_outlet_node:
"""
start_nodes = [self._get_start_node(channel) for channel in channels]
active_dfs = {}
active_nodes = []
acomplete_nodes = self.graph.nodes()
complete_nodes = []
active_nodes.extend(start_nodes)
while len(active_nodes) > 0:
next_nodes = []
processed = False
for active_node in active_nodes:
pred_nodes = self.graph.pred.get(active_node).keys()
depencencies = active_node.external_dependencies
if (len(pred_nodes) == 0 or isSubset(complete_nodes, pred_nodes)) and isSubset(active_dfs.keys(), depencencies):
_log.info('Call entity %s' % active_node)
processed = True
# Process
parameters = [active_dfs[channel] for channel in active_node.input_channels]
if active_node.type in ('node', 'bignode'):
external_dependencies = {}
if active_node.external_dependencies:
for external_dependency in active_node.external_dependencies:
external_dependencies[external_dependency] = active_dfs[external_dependency]
self.env['ext_dep'] = external_dependencies
result = active_node(*parameters)
active_nodes.remove(active_node)
complete_nodes.append(active_node)
acomplete_nodes.remove(active_node)
# Update active dataframes
if len(active_node.output_channels) == 1:
active_dfs[active_node.output_channels[0]] = result
elif len(active_node.output_channels) > 1:
active_dfs.update(result)
# Add next nodes
for node in self.graph.succ.get(active_node).keys():
if node not in active_nodes and node not in next_nodes:
next_nodes.append(node)
if not processed:
_log.error('Infinite cycle detected!')
return None
active_nodes.extend(next_nodes)
# Clear useless dfs
# Check if required by next node
for channel in active_dfs.keys():
if channel not in output_channels and len(
[active_node for active_node in active_nodes if channel in active_node.input_channels]) == 0:
# Check if required by external dependencies
required = reduce(lambda x, y: x or y, [channel in node.external_dependencies for node in acomplete_nodes], False)
if not required:
active_dfs.pop(channel)
if len(active_dfs.keys()) == 1:
return active_dfs.values()[0]
return active_dfs
def append(self, cls, channel=None, output_channel=None, construct_arguments=()):
"""(Pipeline, classobj, str, str) -> NoneType
*Description*
:param construct_arguments:
:param cls:
:param channel:
:param output_channel:
"""
self(channel, output_channel, construct_arguments=construct_arguments)(cls)
def build_process_graph(self):
builder = GraphBuilder(self._entities)
return builder.build()
def _check_graph(self):
if self.graph is None:
self.graph = self.build_process_graph()
def _get_start_node(self, channel):
self._check_graph()
nodes = filter(lambda x: channel in x.output_channels and x.type == 'source', self.graph.nodes())
if len(nodes) > 0:
return nodes[0]
raise Exception('You can\'t use channel without source node')
def _process_entity(self, cls, channel, outchannel, construct_arguments, priority):
"""(Pipeline, type(cls), type(channel), type(outchannel),
type(entity_map)) -> type(cls)
*Description*
"""
obj = cls(*construct_arguments)
obj.env = self.env
if priority:
obj.priority = priority
obj.register(self)
self._entities.append(obj)
if channel is None and len(obj.input_channels) == 0 and len(obj.output_channels) == 0:
channel = 'root'
if channel:
if outchannel is None:
outchannel = channel
if obj.type == 'node':
obj.input_channels = channel[:1] if isinstance(channel, list) else [channel]
obj.output_channels = outchannel[:1] if isinstance(outchannel, list) else [outchannel]
elif obj.type == 'bignode':
patch_list(obj.input_channels, channel)
patch_list(obj.output_cha
|
nnels, outchannel)
|
elif obj.type == 'source':
obj.input_channels = []
patch_list(obj.output_channels, outchannel)
elif obj.type == 'outlet':
patch_list(obj.input_channels, channel)
obj.output_channels = []
else:
raise Exception('Well, you use bad type for entity ....')
return cls
def __call__(self, channel=None, outchannel=None, construct_arguments=(), priority=None):
"""(Pipeline, str, str) ->
type(process_function)
*Description*
"""
def process_function(cls):
"""(type(cls)) ->
type(self._process_entity(cls, channel, outchannel, self._filters))
*Description*
:param cls:
"""
cls_mro = inspect.getmro(cls)
if PipelineEntity in cls_mro:
self._process_entity(cls, channel, outchannel, construct_arguments, priority)
return cls
if inspect.isclass(channel) or isinstance(channel, abc.ABCMeta):
cls = channel
channel = None
return process_function(cls)
return process_function
class GraphBuilder:
def __init__(self, entities):
self.entities = entities
self.channel_io_nodes = {}
self.graph = nx.DiGraph()
pass
def build(self):
self.graph.add_nodes_from(self.entities)
self._build_inchannel_connections()
self._build_multichannel_connections()
self._validate_external_dependencies()
return self.graph
def _build_inchannel_connections(self):
all_channels = set(
itertools.chain(*map(lambda x: set(itertools.chain(x.input_channels, x.output_channels)), self.entities)))
for channel in all_channels:
# Process simple nodes
channel_nodes = filter(lambda x: x.type == 'node'
and channel in x.input_channels and channel in x.output_channels,
self.entities)
channel_nodes.sort(key=lambda x: (x.priority, x.__class__.__name__))
self.channel_io_nodes[channel] = {}
if len(channel_nodes) > 0:
self.channel_io_nodes[channel]['input'] = channel_nodes[0]
self.channel_io_nodes[channel]['output'] = channel_nodes[-1]
# noinspection PyCompatibility
for i in xrange(0
|
zlsun/ProjectEuler
|
091.py
|
Python
|
mit
| 484
| 0.008421
|
#-*- encoding: utf-8 -*-
"""
Right triangles with integer coordinates
The points P (x1, y1) and Q (x2, y2) a
|
re plotted at integer co-ordinates and are joined to the origin, O(0,0), to form ΔOPQ.
There are exactly fourteen triangles containing a right angle that can be formed when each co-ordinate l
|
ies between 0 and 2 inclusive; that is,0 ≤ x1, y1, x2, y2 ≤ 2.
Given that 0 ≤ x1, y1, x2, y2 ≤ 50, how many right triangles can be formed?
"""
from utils import *
#
|
d-plaindoux/fluent-rest
|
tests/verb_test.py
|
Python
|
lgpl-2.1
| 1,943
| 0
|
# Copyright (C)2016 D. Plaindoux.
#
# This program
|
is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option) any
# later version.
import unittest
from fluent_rest.spec.rest import *
from fluent_rest.exceptions import OverloadedVerbException
class TestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_should_have_GET(self):
|
@GET
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'GET'))
def test_should_have_PUT(self):
@PUT
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'PUT'))
def test_should_have_POST(self):
@POST
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'POST'))
def test_should_have_DELETE(self):
@DELETE
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'DELETE'))
def test_should_have_a_Verb(self):
@Verb(u'UPLOAD')
def test():
pass
self.assertTrue(specification(test).hasGivenVerb(u'UPLOAD'))
def test_should_not_have_GET_and_PUT(self):
try:
@GET
@PUT
def test_function_to_be_rejected():
pass
self.fail('Cannot have more than one verb')
except OverloadedVerbException, _:
pass
def test_should_have_GET_in_class(self):
@GET
class Test:
def __init__(self):
pass
self.assertTrue(specification(Test).hasGivenVerb(u'GET'))
def suite():
aSuite = unittest.TestSuite()
aSuite.addTest(unittest.makeSuite(TestCase))
return aSuite
if __name__ == '__main__':
unittest.main()
|
skoolkid/skoolkit
|
skoolkit/skoolctl.py
|
Python
|
gpl-3.0
| 29,525
| 0.001897
|
# Copyright 2010-2021 Richard Dymond ([email protected])
#
# This file is part of SkoolKit.
#
# SkoolKit is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# SkoolKit is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# SkoolKit. If not, see <http://www.gnu.org/licenses/>.
import re
from skoolkit import SkoolParsingError, write_line, get_int_param, get_address_format, open_file
from skoolkit.components import get_assembler, get_component, get_operand_evaluator
from skoolkit.skoolparser import (Comment, parse_entry_header, parse_instruction,
parse_address_comments, join_comments, read_skool, DIRECTIVES)
from skoolkit.textutils import partition_unquoted
ASM_DIRECTIVES = 'a'
BLOCKS = 'b'
BLOCK_TITLES = 't'
BLOCK_DESC = 'd'
REGISTERS = 'r'
BLOCK_COMMENTS = 'm'
SUBBLOCKS = 's'
COMMENTS = 'c'
NON_ENTRY_BLOCKS = 'n'
# ASM directives
AD_START = 'start'
AD_ORG = 'org'
AD_IGNOREUA = 'ignoreua'
AD_LABEL = 'label'
AD_REFS = 'refs'
# An entry ASM directive is one that should be placed before the entry title
# when it is associated with the first instruction in the entry
RE_ENTRY_ASM_DIRECTIVE = re.compile("assemble=|def[bsw]=|end$|equ=|expand=|if\(|org$|org=|remote=|replace=|set-[-a-z]+=|start$|writer=")
# Comment types to which the @ignoreua directive may be applied
TITLE = 't'
DESCRIPTION = 'd'
REGISTERS = 'r'
MID_BLOCK = 'm'
INSTRUCTION = 'i'
END = 'e'
FORMAT_NO_BASE = {
'b': 'b{}',
'c': 'c{}',
'd': '{}',
'h': '{}',
'm': 'm{}'
}
FORMAT_PRESERVE_BASE = {
'b': 'b{}',
'c': 'c{}',
'd': 'd{}',
'h': 'h{}',
'm': 'm{}'
}
class ControlDirectiveComposer:
"""Initialise the control directive composer.
:param preserve_base: Whether to preserve the base of decimal and
hexadecimal values with explicit 'd' and 'h' base
indicators.
"""
# Component API
def __init__(self, preserve_base):
self.preserve_base = preserve_base
self.op_evaluator = get_operand_evaluator()
# Component API
def compose(self, operation):
"""Compute the type, length and sublengths of a DEFB/DEFM/DEFS/DEFW
statement, or the operand bases of a regular instruction.
:param operation: The operation (e.g. 'LD A,0' or 'DEFB 0').
:return: A 3-element tuple, ``(ctl, length, sublengths)``, where:
* ``ctl`` is 'B' (DEFB), 'C' (regular instruction), 'S' (DEFS),
'T' (DEFM) or 'W' (DEFW)
* ``length`` is the number of bytes in the DEFB/DEFM/DEFS/DEFW
statement, or the operand base indicator for a regular
instruction (e.g. 'b' for 'LD A,%00000001')
* ``sublengths`` is a colon-separated sequence of sublengths (e.g.
'1:c1' for 'DEFB 0,"a"'), or `None` for a regular instruction
"""
op = operation.upper()
if op.startswith(('DEFB', 'DEFM', 'DEFS', 'DEFW')):
ctl = op[3].replace('M', 'T')
length, sublengths = self._get_length(ctl, operation)
else:
ctl = 'C'
length, sublengths = self._get_operand_bases(operation), None
return (ctl, length, sublengths)
def _parse_string(self, item):
try:
return self.op_evaluator.eval_string(item)
except ValueError:
if item.startswith('"') and not item.endswith('"'):
try:
return [self.op_evaluator.eval_int(item)]
except ValueError:
return
def _get_operand_bases(self, operation):
elements = operation.split(None, 1)
if len(elements) > 1:
elements[1:] = [e.strip() for e in self.op_evaluator.split_operands(elements[1])]
if not elements:
return ''
if self.preserve_base:
base_fmt = {'b': 'b', 'c': 'c', 'd': 'd', 'h': 'h', 'm': 'm'}
else:
base_fmt = {'b': 'b', 'c': 'c', 'd': 'n', 'h': 'n', 'm': 'm'}
if elements[0].upper() in ('BIT', 'RES', 'SET'):
operands = elements[2:]
else:
operands = elements[1:]
bases = ''
for operand in operands:
if operand.upper().startswith(('(IX+', '(IX-', '(IY+', '(IY-')):
num = operand[4:]
elif operand.startswith('('):
num = operand[1:]
else:
num = operand
if num.startswith(('"', '%', '$', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9')):
bases += base_fmt[_get_base(num)]
if bases in ('n', 'nn'):
return ''
return bases
def _get_length(self, ctl, operation):
if ctl == 'B':
return self._get_defb_defm_length(operation, FORMAT_NO_BASE, 'c{}')
if ctl == 'T':
byte_fmt = {'b': 'b{}', 'd': 'n{}', 'h': 'n{}', 'm': 'm{}'}
return self._get_defb_defm_length(operation, byte_fmt, '{}')
if ctl == 'S':
return self._get_defs_length(operation)
return self._get_defw_length(operation)
def _get_defb_defm_length(self, operation, byte_fmt, text_fmt):
i
|
tems = self.op_evaluator.split_operands(operation[5:])
if self.preserve_base:
byte_fmt = FORMAT_PRESERVE_BASE
full_length = 0
lengths = []
length = 0
prev_base = N
|
one
for item in items + ['""']:
c_data = self._parse_string(item)
if c_data is not None:
if length:
lengths.append(byte_fmt[prev_base].format(length))
full_length += length
prev_base = None
length = len(c_data)
if length:
lengths.append(text_fmt.format(length))
full_length += length
length = 0
else:
cur_base = _get_base(item, self.preserve_base)
if cur_base == 'c':
cur_base = 'd'
if prev_base != cur_base and length:
lengths.append(byte_fmt[prev_base].format(length))
full_length += length
length = 0
length += 1
prev_base = cur_base
return full_length, ':'.join(lengths)
def _get_defw_length(self, operation):
if self.preserve_base:
word_fmt = FORMAT_PRESERVE_BASE
else:
word_fmt = FORMAT_NO_BASE
full_length = 0
lengths = []
length = 0
prev_base = None
for item in self.op_evaluator.split_operands(operation[5:]):
cur_base = _get_base(item, self.preserve_base)
if prev_base != cur_base and length:
lengths.append(word_fmt[prev_base].format(length))
full_length += length
length = 0
length += 2
prev_base = cur_base
lengths.append(word_fmt[prev_base].format(length))
full_length += length
return full_length, ':'.join(lengths)
def _get_defs_length(self, operation):
if self.preserve_base:
fmt = FORMAT_PRESERVE_BASE
else:
fmt = FORMAT_NO_BASE
items = self.op_evaluator.split_operands(operation[5:])[:2]
try:
size = self.op_evaluator.eval_int(items[0])
except ValueError:
raise SkoolParsingError("Invalid integer '{}': {}".format(items[0], operation))
size_base = _get_base(items[0], self.preserve_base)
try:
get_int_param(items[0])
size_fmt = fmt[size_base].format(items[0])
|
chop-dbhi/django-dicom-review
|
dicom_review/prioritizers.py
|
Python
|
bsd-2-clause
| 1,748
| 0.014302
|
from django.db.models import Count
from django.conf import settings
from solo.models import SingletonModel
import loader
MAX_REVIEWERS = settings.MAX_REVIEWERS
# Simple algorithm that checks to see the number of years the studies span and
# returns one study per year
def one_per_year(candidate_studies, user, annotation_class = None):
studies = []
years = candidate_studies.dates('study_date', 'year')
for period in years:
this_year = candidate_studies.annotate(num_reviews=Count("ra
|
diologystudyreview"))\
.filter(study_date__year=period.year, num_reviews__lt=MAX_REVIEWERS)\
.exclude(radiologystudyreview__user_id=user.id).order_by("?")[:1]
for study in this_year:
studies.append(study)
return studies
# Whether the list method is the global default or set on the user object explicitly does not matter. The workflow will be same
# Check to see if the user object has an associated list object if so use that one
# If not check to
|
see if there is a global list object setup, if so use that one
# Otherwise just pull from the candidate_studies
def lists(candidate_studies, user, annotation_class = None):
from models import Config
study_list = (hasattr(user, 'study_list') and user.study_list) or Config.get_solo().default_study_list
# if no lists are configured, just pass thru
if not study_list:
return candidate_studies
studies = study_list.studies.exclude(radiologystudyreview__user_id = user.id)
return studies
#TODO Cross Validate Algorithm that chooses studies and puts them on other users lists.
registry = loader.Registry(default=one_per_year, default_name = "one per year")
registry.register(lists, name = "lists")
loader.autodiscover()
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/quinoa/package.py
|
Python
|
lgpl-2.1
| 2,175
| 0.00092
|
##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Quinoa(CMakePackage):
"""Quinoa is a set of computational tools that enables research and
numerical analysis in fluid dynamics. At this time it is a test-bed
to experiment with various algorithms using fully asynchronous runtime
systems.
"""
homepage = "http://quinoacomputing.org"
url = "https://github.com/quinoacomputing/quinoa/tarball/quinoa_v0.1"
version('develop', git='https://github.com/quinoacomputing/quinoa', branch='master')
depends_on('hdf5+mpi')
depends_on("charm backend=mpi")
depends_on("trilinos+exodus")
depends_on("boost")
depends_on("hypre~internal-superlu")
depends_on("random123"
|
)
depends_on("netlib-lapack+lapacke")
depends_on("mad-numdiff")
depend
|
s_on("h5part")
depends_on("boostmplcartesianproduct")
depends_on("tut")
depends_on("pugixml")
depends_on("pstreams")
depends_on("pegtl")
root_cmakelists_dir = 'src'
|
ekesken/istatistikciadamlazim
|
allauth/socialaccount/helpers.py
|
Python
|
gpl-3.0
| 7,010
| 0.00271
|
from django.conf import settings
from django.contrib import messages
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import login, logout as auth_logout
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.http import urlencode
from django.template.defaultfilters import slugify
from allauth.utils import get_login_redirect_url, \
generate_unique_username, email_address_exists
from allauth.account.utils import send_email_confirmation, \
perform_login, complete_signup
from allauth.account import app_settings as account_settings
import app_settings
from allauth.socialaccount.models import SocialAccount
def _process_signup(request, data, account):
# If email is specified, check for duplicate and if so, no auto signup.
auto_signup = app_settings.AUTO_SIGNUP
email = data.get('email')
if auto_signup:
# Let's check if auto_signup is really possible...
if email:
if account_settings.UNIQUE_EMAIL:
if email_address_exists(email):
# Oops, another user already has this address. We
# cannot simply connect this social account to the
# existing user. Reason is that the email adress may
# not be verified, meaning, the user may be a hacker
# that has added your email address to his account in
# the hope that you fall in his trap. We cannot check
# on 'email_address.verified' either, because
# 'email_address' is not guaranteed to be verified.
auto_signup = False
# FIXME: We redirect to signup form -- user will
# see email address conflict only after posting
# whereas we detected it here already.
elif account_settings.EMAIL_REQUIRED:
# Nope, email is required and we don't have it yet...
auto_signup = False
if not auto_signup:
request.session['socialaccount_signup'] = dict(data=data,
account=account)
url = reverse('socialaccount_signup')
next = request.REQUEST.get('next')
if next:
url = url + '?' + urlencode(dict(next=next))
ret = HttpResponseRedirect(url)
else:
# FIXME: There is some duplication of logic inhere
# (create user, send email, in active etc..)
username = generate_unique_username \
(data.get('username', email or 'user'))
u = User(username=username,
email=email or '',
last_name = data.get('last_name', '')[0:User._meta.get_field('last_name').max_length],
first_name = data.get('first_name', '')[0:User._meta.ge
|
t_field('first_name').max_length])
u.set_unusable_password()
u.is_active = not account_settings.EMAIL_VERIFI
|
CATION
u.save()
accountbase = SocialAccount()
accountbase.user = u
accountbase.save()
account.base = accountbase
account.sync(data)
send_email_confirmation(u, request=request)
ret = complete_social_signup(request, u, account)
return ret
def _login_social_account(request, account):
user = account.base.user
perform_login(request, user)
if not user.is_active:
ret = render_to_response(
'socialaccount/account_inactive.html',
{},
context_instance=RequestContext(request))
else:
ret = HttpResponseRedirect(get_login_redirect_url(request))
return ret
def render_authentication_error(request, extra_context={}):
return render_to_response(
"socialaccount/authentication_error.html",
extra_context, context_instance=RequestContext(request))
def complete_social_login(request, data, account):
if request.user.is_authenticated():
if account.pk:
# Existing social account, existing user
if account.user != request.user:
# Social account of other user. Simply logging in may
# not be correct in the case that the user was
# attempting to hook up another social account to his
# existing user account. For now, this scenario is not
# supported. Issue is that one cannot simply remove
# the social account from the other user, as that may
# render the account unusable.
pass
ret = _login_social_account(request, account)
else:
# New social account
account.base.user = request.user
account.sync(data)
messages.add_message \
(request, messages.INFO,
_('The social account has been connected to your existing account'))
return HttpResponseRedirect(reverse('socialaccount_connections'))
else:
if account.pk:
# Login existing user
ret = _login_social_account(request, account)
else:
# New social user
ret = _process_signup(request, data, account)
return ret
def _name_from_url(url):
"""
>>> _name_from_url('http://google.com/dir/file.ext')
u'file.ext'
>>> _name_from_url('http://google.com/dir/')
u'dir'
>>> _name_from_url('http://google.com/dir')
u'dir'
>>> _name_from_url('http://google.com/dir/..')
u'dir'
>>> _name_from_url('http://google.com/dir/../')
u'dir'
>>> _name_from_url('http://google.com')
u'google.com'
>>> _name_from_url('http://google.com/dir/subdir/file..ext')
u'file.ext'
"""
from urlparse import urlparse
p = urlparse(url)
for base in (p.path.split('/')[-1],
p.path,
p.netloc):
name = ".".join(filter(lambda s: s,
map(slugify, base.split("."))))
if name:
return name
def _copy_avatar(request, user, account):
import urllib2
from django.core.files.base import ContentFile
from avatar.models import Avatar
url = account.get_avatar_url()
if url:
ava = Avatar(user=user)
ava.primary = Avatar.objects.filter(user=user).count() == 0
try:
content = urllib2.urlopen(url).read()
name = _name_from_url(url)
ava.avatar.save(name, ContentFile(content))
except IOError, e:
# Let's nog make a big deal out of this...
pass
def complete_social_signup(request, user, account):
success_url = get_login_redirect_url(request)
if app_settings.AVATAR_SUPPORT:
_copy_avatar(request, user, account)
return complete_signup(request, user, success_url)
|
xlcteam/py-soccersim
|
soccersim/env.py
|
Python
|
apache-2.0
| 1,133
| 0
|
import pygame
import sys
import os
class Env:
def __init__(self, teamA, teamB, field_size, display, robots=None,
debug=False):
self.teamA = teamA
self.teamB = teamB
self.width = field_size[0]
self.height = field_size[1]
self.display = display
self.ball = None
self.robots = robots
self.robots_out = {'A': [False, False], 'B
|
': [False, False]}
self.debug
|
= debug
self.dir = os.path.dirname(os.path.realpath(__file__)) + os.sep
self.field = pygame.image.load(self.dir + 'img/field.png')
self.halftime = 1
self.teamAscore = 0
self.teamBscore = 0
def teamA_add_goal(self):
self.teamAscore += 1
def teamB_add_goal(self):
self.teamBscore += 1
def draw_field(self):
self.display.blit(self.field, [0, 0])
def reset_robots(self):
for robot in self.robots:
robot.stop()
robot.move_to_pos(robot.default_pos)
def set_ball(self, ball):
self.ball = ball
def set_robots(self, robots):
self.robots = robots
|
dsweet04/rekall
|
rekall-gui/manuskript/plugin.py
|
Python
|
gpl-2.0
| 836
| 0
|
import StringIO
class Plugin(object):
ANGULAR_MODULE = None
JS_FILES = []
CSS_FILES = []
@classmethod
def PlugIntoApp(cls, app):
pass
@classmethod
def GenerateHTML(cls, root_url="/"):
out = StringIO.StringIO()
for js_file in cls.JS_FILES:
js_file = js_file.lstrip("/")
out.write('<script src="%s%s"></script>\n' % (root_url,
|
js_file))
for css_file in cls.CSS_FILES:
css_file = css_file.lstrip("/")
out.write('<link rel="stylesheet" href="%s%s"></link>\n' % (
root_url, css_file))
if cls.ANGULAR_MODULE:
out.write("""
<script>var manuskriptPluginsList = manuskriptPluginsList || [];\n
manuskriptPluginsList.push("%s");</script>\n""" % cls.ANGULA
|
R_MODULE)
return out.getvalue()
|
DigitalSkills-fr/Docs
|
docs/conf.py
|
Python
|
apache-2.0
| 8,474
| 0.006136
|
# -*- coding: utf-8 -*-
#
# Read the Docs Template documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 26 14:19:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from recommonmark.parser import CommonMarkParser
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# A
|
dd any Sphinx extension
|
module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
## Add parser for Makdown
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Documentation DigitalSkills'
copyright = u'2017, DigitalSkills'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'sphinx_rtd_theme_digitalskills'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes',]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReadtheDocsTemplatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ReadtheDocsTemplate.tex', u'Read the Docs Template Documentation',
u'Read the Docs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'readthedocstemplate', u'Read the Docs Template Documentation',
[u'Read the Docs'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ReadtheDocsTemplate', u'Read the Docs Template Documentation',
u'Read the Docs', 'ReadtheDocsTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
|
steffenschroeder/tarnow
|
tests/conftest.py
|
Python
|
mit
| 290
| 0
|
import pytest
import os
@pytest.fixture(autouse=True)
def change_tempory_direc
|
tory(tmpdir):
tmpdir.chdir()
yield
if os.path.exists("tarnow.tmp"):
os.r
|
emove("tarnow.tmp")
@pytest.fixture(autouse=True)
def patch_subprocess(mocker):
mocker.patch("subprocess.call")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.