repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
viiru-/pytrainer
|
pytrainer/gui/windowmain.py
|
Python
|
gpl-2.0
| 103,483
| 0.011026
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) Fiz Vazquez [email protected]
# Modified by dgranda
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import gobject
import sys
import logging
import datetime
import matplotlib
import dateutil.parser
from dateutil.tz import * # for tzutc()
from SimpleGladeApp import *
from popupmenu import PopupMenu
from aboutdialog import About
import pytrainer.record
from pytrainer.lib.date import Date, second2time
from pytrainer.lib.xmlUtils import XMLParser
#from pytrainer.lib.gpx import Gpx
from pytrainer.extensions.googlemaps import Googlemaps
from pytrainer.extensions.osm import Osm
from pytrainer.lib.unitsconversor import *
from pytrainer.recordgraph import RecordGraph
from pytrainer.daygraph import DayGraph
from pytrainer.weekgraph import WeekGraph
from pytrainer.monthgraph import MonthGraph
from pytrainer.yeargraph import YearGraph
from pytrainer.totalgraph import TotalGraph
from pytrainer.heartrategraph import HeartRateGraph
from pytrainer.extensions.mapviewer import MapViewer
from pytrainer.extensions.waypointeditor import WaypointEditor
from pytrainer.core.equipment import EquipmentService
from pytrainer.gui.drawGraph import DrawGraph
from pytrainer.gui.windowcalendar import WindowCalendar
from pytrainer.lib.listview import ListSearch
from pytrainer.lib.uc import UC
class Main(SimpleGladeApp):
def __init__(self, sport_service, data_path = None, parent = None, version = None, gpxDir = None):
self._sport_service = sport_service
def url_hook(dialog, url):
pytrainer.lib.webUtils.open_url_in_browser(url)
# Available in PyGTK 2.6 and above
gtk.about_dialog_set_url_hook(url_hook)
self.version = version
self.parent = parent
self.pytrainer_main = parent
self.data_path = data_path
glade_path="glade/pytrainer.glade"
root = "window1"
domain = None
SimpleGladeApp.__init__(self, self.data_path+glade_path, root, domain)
self.uc = UC()
self.popup = PopupMenu(data_path,self)
self.block = False
self.activeSport = None
self.gpxDir = gpxDir
self.record_list = None
self.laps = None
#Setup graph
self.grapher = DrawGraph(self, self.pytrainer_main)
self.
|
y1_limits = None
self.y1_color = None
self.y1_linewidth = 1
# setup Search ListView
self.listsearch = ListSearch(sport_service, self, self.pytrainer_main)
self.aboutwindow = None
def new(self):
self.menublocking = 0
self.selected_view="day"
self.window1.set_title ("pytrainer %s" % self.version)
try:
width, height = self.pytrainer_main.profile.getValue("py
|
training","window_size").split(',')
self.window1.resize(int(width), int(height))
except:
pass
self.record_list = []
#create the columns for the listdayrecord
if self.pytrainer_main.profile.prf_us_system:
distance_unit = _("Miles")
else:
distance_unit = _("Km")
columns = [{'name':_("id"), 'visible':False},{'name':_("Start"), }, {'name':_("Sport")},{'name':distance_unit}]
self.create_treeview(self.recordTreeView,columns)
#create the columns for the listarea
# different codings for mean see eg http://de.wikipedia.org/wiki/%C3%98#Kodierung
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Title")},
{'name':_("Date")},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity': 'distance'},
{'name':_("Sport")},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_(u"\u2300 HR"), 'xalign':1.0},
{'name':_(u"\u2300 Speed"), 'xalign':1.0, 'format_float':'%.1f', 'quantity': 'speed'},
{'name':_("Calories"), 'xalign':1.0}
]
self.create_treeview(self.allRecordTreeView,columns)
self.create_menulist(columns)
#create the columns for the waypoints treeview
columns=[{'name':_("id"), 'visible':False},{'name':_("Waypoint")}]
self.create_treeview(self.waypointTreeView,columns)
#create the columns for the athlete history treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Date")},
{'name':_("Weight"), 'xalign':1.0},
{'name':_("Body Fat %"), 'xalign':1.0},
{'name':_("Resting HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0}
]
self.create_treeview(self.athleteTreeView,columns)
#create the columns for the stats treeview
columns=[ {'name':_("id"), 'visible':False},
{'name':_("Sport")},
{'name':_("Records"), 'xalign':1.0},
{'name':_("Total duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Total distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'maxspeed', 'xalign':1.0},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Max duration"), 'xalign':1.0, 'format_duration':True},
{'name':_("Max distance"), 'xalign':1.0, 'format_float':'%.1f', 'quantity':'distance'},
]
self.create_treeview(self.statsTreeView,columns)
#create the columns for the laps treeview
columns=[
{'name':_("Lap")},
{'name':_("Trigger"), 'xalign':0, 'pixbuf':True},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
{'name':_("Avg speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Max speed"), 'format_float':'%.2f', 'quantity':'speed'},
{'name':_("Avg pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Max pace"), 'xalign':1.0, 'quantity':'pace'},
{'name':_("Avg HR"), 'xalign':1.0},
{'name':_("Max HR"), 'xalign':1.0},
{'name':_("Calories"), 'xalign':1.0},
{'name':_("Intensity"), 'visible':False},
{'name':_("Comments"), 'xalign':0.0},
]
self.create_treeview(self.lapsTreeView,columns)
#create the columns for the projected times treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Race"), 'xalign':1.0},
{'name':_("Distance"), 'xalign':1.0, 'format_float':'%.2f', 'quantity':'distance'},
{'name':_("Time"), 'xalign':1.0, 'format_duration':True},
]
self.create_treeview(self.analyticsTreeView,columns,sortable=False)
#create the columns for the rank treeview
columns=[
{'name':_("id"), 'visible':False},
{'name':_("Rank"), 'visible':True},
{'name':_(
|
pudo/aleph
|
aleph/logic/aggregator.py
|
Python
|
mit
| 378
| 0
|
imp
|
ort logging
from ftmstore import get_dataset
log = logging.getLogger(__name__)
MODEL_ORIGIN = "model"
def get_aggregator_name(collection):
return "collection_%s" % collection.id
def get_aggregator(collection, origin="aleph"):
"""Connect to a followthemoney dataset."""
dataset = get_aggregator_name(collection)
return get_dataset(dataset, origi
|
n=origin)
|
MarionPiEnsg/RaspiModel
|
Application/Raspberry_Pi/scripts_python/1-activeRobotHaut.py
|
Python
|
gpl-3.0
| 910
| 0.015402
|
import RPi.GPIO as GPIO
import time
import sys
#on renseigne le pin sur lequel est branché le cable de commande du servo moteur superieur (haut-bas)
servo_pin = 12
#recuperation de la valeur du mouvement a envoyer au servo
duty_cycle = float(sys.argv[1])
GPIO.setmod
|
e(GPIO.BOARD)
GPIO.setup(servo_pin, GPIO.OUT)
# Creation du cannal PWM sur le servo pin avec une frequence de 50Hz
pwm_servo = GPIO.PWM(servo_pin, 50)
pwm_servo.start(duty_cycle)
try:
while True:
pwm_servo.ChangeDutyCycle(duty_cycle) #le servo se pivote avec la valeur donnee en entree
time.sleep(0.01) # on attend un petit moment q
|
ue le servo finisse son action
GPIO.cleanup() # on sort proprement de GPIO et on sort de la fonction avec exit()
exit()
except KeyboardInterrupt:
print("CTRL-C: Terminating program.") # si le programme est utilise seul, cela permet de l'eteindre en cas d'urgence
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/special/_precompute/setup.py
|
Python
|
mit
| 396
| 0
|
#!/usr/bin/env pytho
|
n
from __future__ import division, print_function, absolute_import
def configuration(parent_name='special', top_path=None):
from numpy.distutils.misc_util import C
|
onfiguration
config = Configuration('_precompute', parent_name, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration().todict())
|
tensorflow/tpu
|
models/experimental/dcgan/mnist_model.py
|
Python
|
apache-2.0
| 3,215
| 0.002799
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple generator and discriminator models.
Based on the convolutional and "deconvolutional" models presented in
"Unsupervised Representation Learning with Deep Convolutional Generative
Adversarial Networks" by A. Radford et. al.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
def _leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.2)
def _batch_norm(x, is_training, name):
return tf.layers.batch_normalization(
x, momentum=0.9, epsilon=1e-5, training=is_training, name=name)
def _dense(x, channels, name):
return tf.layers.dense(
x, channels,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _conv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def _deconv2d(x, filters, kernel_size, stride, name):
return tf.layers.conv2d_transpose(
x, filters, [kernel_size, kernel_size],
strides=[stride, stride], padding='same',
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name=name)
def discriminator(x, is_training=True, scope='Discriminator'):
# conv64-lrelu + conv128-bn-lrelu + fc1024-bn-lrelu + fc1
with tf.variable_scope(scope, reuse=tf.AUT
|
O_REUSE):
x = _conv2d(x, 64, 4, 2, name='d_conv1')
x = _leaky_relu(x)
x = _conv2d(x, 128, 4, 2, name='d_conv2')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn2'))
x = tf.reshape(x, [-1, 7 * 7 * 128])
x = _dense(x, 1024, name='d_fc3')
x = _leaky_relu(_batch_norm(x, is_training, name='d_bn3'))
x = _dense(x, 1, name='d_fc4')
return x
d
|
ef generator(x, is_training=True, scope='Generator'):
# fc1024-bn-relu + fc6272-bn-relu + deconv64-bn-relu + deconv1-tanh
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
x = _dense(x, 1024, name='g_fc1')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn1'))
x = _dense(x, 7 * 7 * 128, name='g_fc2')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn2'))
x = tf.reshape(x, [-1, 7, 7, 128])
x = _deconv2d(x, 64, 4, 2, name='g_dconv3')
x = tf.nn.relu(_batch_norm(x, is_training, name='g_bn3'))
x = _deconv2d(x, 1, 4, 2, name='g_dconv4')
x = tf.tanh(x)
return x
# TODO(chrisying): objective score (e.g. MNIST score)
|
yinzishao/NewsScrapy
|
thepaper/thepaper/spiders/donews_spider.py
|
Python
|
lgpl-3.0
| 3,445
| 0.02148
|
# -*- coding: utf-8 -*-
__author__ = 'k'
import re
import scrapy
from bs4 import BeautifulSoup
import logging
from thepaper.items import NewsItem
import json
logger = logging.getLogger("NbdSpider")
from thepaper.settings import *
from thepaper.util import judge_news_crawl
import time
class DonewsSpider(scrapy.spiders.Spider):
domain = "http://www.donews.com/net/"
name = "donews"
allowed_domains = ["donews.com",]
flag = {}
start_urls = [
"http://www.donews.com/net/",
"http://www.donews.com/original/",
]
def parse(self,response):
origin_url = response.url
topic_url = origin_url[:-1]
self.flag.setdefault(topic_url,0)
yield scrapy.Request(origin_url,callback=self.parse_topic)
def parse_topic(self,response):
origin_url = response.url
temp = origin_url.rsplit("/",1)
topic_url = temp[0]
if temp[1] == "":
pageindex = 1
else:
pageindex = temp[1].split("_",1)[-1].split(".",1)[0]
soup = BeautifulSoup(response.body,"lxml")
catalogue = soup.find("div",class_ ="arttitle").text.strip()
news_list = soup.find("ul",class_ = "art_list mt11").find_all("li")
for news in news_list:
title_info = news.find("h5",class_= "title")
text_info = news.find("div",class_ = "text")
news_date = text_info.find("span",class_ = "time").text
news_date = "%s-%s-%s %s:00" % (time.strftime("%Y"),int(news_date[0:2]),int(news_date[3:5]),news_date[7:])
author = text_info.find("span",class_ = "place").text.strip()
if author == "":
author = None
abstract = text_info.find("p",class_ = "info").text.strip()
pic = text_info.find("img").get("src") if text_info.find("img") else None
title = title_info.find("a").text.strip()
news_url = title_info.find("a").get("href")
temp = news_url.split("/")
news_no = temp[-2] + "_" + temp[-1].split(".")[0]
item = NewsItem(
news_url =news_url,
news_date = news_date,
title = title,
abstract = abstract,
author = author,
news_no = news_no,
catalogue = catalogue,
pic = pic,
)
item = judge_news_crawl(item)
if item:
yield scrapy.Request(item["news_url"],callback=self.parse_news,meta={'item':item})
else:
self.flag[topic_url] = pageindex
if not self.flag[topic_url]:
next_url = "%s/index_%s.html" % (topic_url,int(pageindex) + 1)
yield scrapy.Request(next_url,callback=self.parse_topic)
def parse_news(self,response):
item = response.meta.get("item",NewsItem())
soup = BeautifulSoup(response.body,"lxml")
referer_web = soup.find("span", id= "source_baidu").text if soup.find("span", id= "source_baidu") else None
temp = soup.find("div",id = "arttext")
if item["pic"] == None:
item["pic"] = temp.find("img").get("src") if temp.find("img") else None
content = "\n\n".join([ t.text.strip() for t in temp.f
|
ind_all("p")])
item['referer_web
|
'] = referer_web
item['content'] = content
item['crawl_date'] = NOW
yield item
|
pyparallel/numpy
|
numpy/ma/tests/test_core.py
|
Python
|
bsd-3-clause
| 161,025
| 0.000689
|
# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
__author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
from functools import reduce
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import TestCase, run_module_suite, assert_raises
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
assert_mask_equal,
)
from numpy.ma.core import (
MAError, MaskError, MaskType, MaskedArray, abs, absolute, add, all,
allclose, allequal, alltrue, angle, anom, arange, arccos, arctan2,
arcsin, arctan, argsort, array, asarray, choose, concatenate,
conjugate, cos, cosh, count, default_fill_value, diag, divide, empty,
empty_like, equal, exp, flatten_mask, filled, fix_invalid,
flatten_structured_array, fromflex, getmask, getmaskarray, greater,
greater_equal, identity, inner, isMaskedArray, less, less_equal, log,
log10, make_mask, make_mask_descr, mask_or, masked, masked_array,
masked_equal, masked_greater, masked_greater_equal, masked_inside,
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
pi = np.pi
class TestMaskedArray(TestCase):
# Base test class for MaskedArrays.
def setUp(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = masked_array(x, mask=m1)
ym = masked_array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = masked_array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)
def test_basicattributes(self):
# Tests some basic array attributes.
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
assert_equal(a.ndim, 1)
assert_equal(b.ndim, 1)
assert_equal(a.size, 3)
assert_equal(b.size, 3)
assert_equal(a.shape, (3,))
assert_equal(b.shape, (3,))
def test_basic0d(self):
# Checks masking a scalar
x = masked_array(0)
assert_equal(str(x), '0')
x = masked_array(0, mask=True)
assert_equal(str(x), str(masked_print_option))
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
self.assertTrue(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertTrue((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.dtype, x.dtype)
assert_equal(zm.dtype, z.dtype)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_array_equal(xm, xf)
assert_array_equal(filled(xm, 1.e20), xf)
assert_array_equal(x, xm)
def test_basic2d(self):
# Test of basic array creation and properties in 2 dimensions.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
for s in [(4, 3), (6, 2)]:
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertTrue(not isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
assert_equal(xm, xf)
assert_equal(filled(xm, 1.e20), xf)
assert_equal(x, xm)
def test_concatenate_basic(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# basic concatenation
assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))
assert_equal(np.concatenate((x, y)), concatenate((x, y)))
assert_equal(np.concatenate((x, y)), concatenate((xm, y)))
assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))
def test_concatenate_alongaxis(self):
# Tests concatenations.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
# Concatenation along an axis
s = (3, 4)
x.shape = y.shape = xm.shape = ym.shape = s
assert_equal(xm.mask, np.reshape(m1, s))
assert_equal(ym.mask, np.reshape(m2, s))
xmym = concatenate((xm, ym), 1)
assert_equal(np.concatenate((x, y), 1), xmym)
assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)
x = zeros(2)
y = array(ones(2), mask=[False, True])
z = concatenate((x, y))
assert_array_equal(z, [0, 0, 1, 1])
assert_array_equal(z.mask, [False, False, False, True])
z = concatenate((y, x))
assert_array_equal(z, [1, 1, 0, 0])
assert_array_equal(z.mask, [False, True, False, False])
def test_concatenate_flexible(self):
# Tests the concatenation on flexible arrays.
data = masked_array(list(zip(np.random.rand(10),
np.arange(10))),
dtype=[('a', float), ('b', int)])
test = concatenate([data[:5], data[5:]])
assert_equal_records(test, data)
def test_creation_ndmin(self):
# Check the use of ndmin
x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)
|
assert_equal(x.shape, (1, 3))
assert_equal(x._data, [[1, 2, 3]])
assert_equal(x._mask, [[1, 0, 0]])
def test_creation_ndmin_from_maskedarray(self):
# Make sure we're not losing the original mask w/ ndmin
x = array([1, 2, 3])
x[-1] = masked
xx = array(x, ndmin=2, dtype=float)
assert_equal(x.shape, x._mask.shape)
assert_equal(xx.shape, xx._mask.shape)
|
def test_creation_maskcreation(self):
# Tests how masks are initialized at the creation of Maskedarrays.
data = arange(24, dtype=float)
data[[3, 6, 15]] = masked
dma_1 = MaskedArray(data)
assert_equal(dma_1.mask, data.mask)
dma_2 = MaskedArray(dma_1)
assert_equal(dma_2.mask, dma_1.mask)
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
self.assertTrue(data.mask is nomask)
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
xm.fill_value = -9999
xm._hardmask = True
|
schrockn/graphscale
|
graphscale/grapple/graphql_impl.py
|
Python
|
mit
| 2,294
| 0.000872
|
from typing import cast, List, TypeVar, Any, Type, Optional
from uuid import UUID
from graphscale import check
from graphscale.pent import (
create_pent,
delete_pent,
update_pent,
Pent,
PentContext,
PentMutationData,
PentMutationPayload,
)
T = TypeVar('T')
def typed_or_none(obj: Any, cls: Type[T]) -> Optional[T]:
return obj if isinstance(obj, cls) else None
async def gen_pent_
|
dynamic(context: PentContext, out_cls_name: str, obj_id: U
|
UID) -> Pent:
out_cls = context.cls_from_name(out_cls_name)
pent = await out_cls.gen(context, obj_id)
return cast(Pent, pent)
async def gen_delete_pent_dynamic(
context: PentContext, pent_cls_name: str, payload_cls_name: str, obj_id: UUID
) -> PentMutationPayload:
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
deleted_id = await delete_pent(context, pent_cls, obj_id)
return cast(PentMutationPayload, payload_cls(deleted_id))
async def gen_create_pent_dynamic(
context: PentContext,
pent_cls_name: str,
data_cls_name: str,
payload_cls_name: str,
data: PentMutationData
) -> PentMutationPayload:
data_cls = context.cls_from_name(data_cls_name)
check.isinst(data, data_cls)
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
out_pent = await create_pent(context, pent_cls, data)
return cast(PentMutationPayload, payload_cls(out_pent))
async def gen_update_pent_dynamic(
context: PentContext,
obj_id: UUID,
pent_cls_name: str,
data_cls_name: str,
payload_cls_name: str,
data: PentMutationData
) -> PentMutationPayload:
data_cls = context.cls_from_name(data_cls_name)
check.isinst(data, data_cls)
pent_cls = context.cls_from_name(pent_cls_name)
payload_cls = context.cls_from_name(payload_cls_name)
pent = await update_pent(context, pent_cls, obj_id, data)
return cast(PentMutationPayload, payload_cls(pent))
async def gen_browse_pents_dynamic(
context: PentContext, after: UUID, first: int, out_cls_name: str
) -> List[Pent]:
out_cls = context.cls_from_name(out_cls_name)
pents = await out_cls.gen_browse(context, after, first)
return cast(List[Pent], pents)
|
tidepool-org/dfaker
|
dfaker/tools.py
|
Python
|
bsd-2-clause
| 3,495
| 0.009442
|
import pytz
from datetime import datetime, timedelta
def is_dst(zonename, date):
local_tz = pytz.timezone(zonename)
localized_time = local_tz.localize(date)
return localized_time.dst() != timedelta(0)
def get_offset(zonename, date):
local_tz = pytz.timezone(zonename)
if zonename == 'UTC':
return 0
elif is_dst(zonename, date):
return local_tz.utcoffset(date, is_dst=True).total_seconds() / 60
else:
return local_tz.utcoffset(date, is_dst=False).total_seconds() / 60
def convert_to_mmol(iterable):
conversion_factor = 18.01559
if isinstance(iterable, float) or isinstance(iterable, int):
return iterable / conversion_factor
return [reading / conversion_factor for reading in iterable]
def round_to(n, precision=0.005):
""" The round function can take positive or negative values
and round them to a certain precision.
In the fake data generator, only positive values are being passed into it
"""
if n >= 0:
correction = 0.5
else:
correction = -0.5
result = int(n / precision +
|
correction) * precision
return round(result, 3)
def make_timesteps(start_time, offset, timelist):
""" Convert list of floats representing time into epoch time
start_time -- a timezone nai
|
ve datetime object
offset -- offset in minutes
timelist -- a list of incrementing floats representing time increments
"""
timesteps = []
epoch_ts = convert_ISO_to_epoch(str(start_time), '%Y-%m-%d %H:%M:%S')
local_timestamp = epoch_ts - offset*60
for time_item in timelist:
new_time = int(local_timestamp) + int(time_item * 60)
timesteps.append(new_time)
return timesteps
def convert_ISO_to_epoch(datetime_string, date_format):
""" Takes a datetime string and returns an epoch time in seconds
Only works when datetime_string is in UTC
"""
datetime_object = datetime.strptime(datetime_string, date_format)
epoch = datetime.utcfromtimestamp(0)
delta = datetime_object - epoch
return int(delta.total_seconds())
def get_rate_from_settings(schedule, time, name):
"""Obtains a rate or amount from settings based on time of day
If name is basalSchedules, returns rate as well as start and stop times
Otherwise, if name is carbRatio or insulinSensitivity, returns just amount
Returned results are in mmol/L.
"""
t = datetime.strptime(time, '%Y-%m-%dT%H:%M:%S')
if name == "basalSchedules": #account for variation in naming
value_name = "rate" #set initial rate
else:
value_name = "amount"
ms_since_midnight = t.hour*60*60*1000 + t.minute*60*1000 + t.second*1000
last_segment = schedule[len(schedule)-1]
full_day = 86400000 #24 hours in ms
rate = schedule[0][value_name] #set initial rate
initial_start = ms_since_midnight #set initial start time
for segment in schedule:
end = segment["start"]
if ms_since_midnight < segment["start"]:
break
elif ms_since_midnight >= last_segment["start"]:
start = last_segment["start"]
end = full_day
rate = last_segment[value_name]
break
start = segment["start"]
rate = segment[value_name] #update rate to next segment rate
if name == "basalSchedules":
return rate, start, initial_start, end
return rate #only rate needed for insulin sensitivity/carb ratio events
|
serathius/elasticsearch-raven
|
tests/test_utils.py
|
Python
|
mit
| 838
| 0.001193
|
import time
from unittest import TestCase
from unittest import mock
from elasticsearch_raven import utils
class RetryLoopTest(TestCase):
@mock.patch('time.sleep')
def test_delay(self, sleep):
retry_generator = utils.retry_loop(1)
for i in range(4):
retry = next(retry_generator)
retry(Exception('test'))
self.assertEqual([mock.call(1), mock.call(1), mock.call(1)],
sleep.mo
|
ck_calls)
@mock.patch('time.sleep')
def test_back_off(self, sleep):
retry_generator = utils.retry_loop(1, max_delay=4, back_off=2)
for i in range(5):
retry = next(retry_generator)
retry(Exception('test'))
self.assertEqual([mock.call(1), mock.call(2), mock.call(4), mock.call(4)],
|
sleep.mock_calls)
|
sjl767/woo
|
gui/qt4/ExceptionDialog.py
|
Python
|
gpl-2.0
| 1,473
| 0.017651
|
# encoding: utf-8
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class ExceptionDialog(QMessageBox):
def __init__(self,parent,exc,t1=None,t2=None):
QMessageBox.__init__(self,parent)
if t1==None:
|
t1=(exc.args[0] if len(exc.args)>0 else None)
self.setText(u'<b>'+exc.__class__.__name__+':</b><br>\n'+unicode(t1))
#QMessageBox.setTitle(self,xc.__class__.__name__)
import traceback
|
tbRaw=traceback.format_exc()
# newlines are already <br> after Qt.convertFromPlainText, discard to avoid empty lines
tb='<small><pre>'+Qt.convertFromPlainText(tbRaw).replace('\n','')+'</pre></small>'
self.setInformativeText(t2 if t2 else tb)
self.setDetailedText(tbRaw)
self.setIcon(QMessageBox.Critical)
self.setStandardButtons(QMessageBox.Ok)
self.setDefaultButton(QMessageBox.Ok)
self.setEscapeButton(QMessageBox.Ok)
def showExceptionDialog(parent,exc,t1=None,t2=None):
# event loop brokne, modal dialogs won't work
# just show and don't care anymore
ExceptionDialog(parent,exc).show()
# import traceback
# QMessageBox.critical(parent,exc.__class__.__name__,'<b>'+exc.__class__.__name__+':</b><br>'+exc.args[0]+'+<br><small><pre>'+Qt.convertFromPlainText((traceback.format_exc()))+'</pre></small>')
if __name__=='__main__':
import sys
qapp=QApplication(sys.argv)
e=ValueError('123, 234, 345','asdsd')
showExceptionDialog(None,e)
|
mluo613/osf.io
|
admin/common_auth/admin.py
|
Python
|
apache-2.0
| 2,230
| 0.000897
|
from __future__ import absolute_import
from django.contrib import admin
from django.contrib.admin.models import DELETION
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.utils.html import escape
from admin.common_auth.logs import OSFLogEntry
from admin.common_auth.forms import UserRegistrationForm
from osf.models.user import OSFUser
class PermissionAdmin(admin.ModelAdmin):
search_fields = ['name', 'codename']
class CustomUserAdmin(UserAdmin):
add_form = UserRegistrationForm
list_display = ['username', 'given_name', 'is_active']
admin.site.register(OSFUser, CustomUserAdmin)
admin.site.register(Permission, PermissionAdmin)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
readonly_fields = [f.name for f in OSFLogEntry._meta.get_fields()]
list_filter = [
'user',
'action_flag'
]
search_fields = [
'object_repr',
'change_message'
]
list_display = [
'action_time',
'user',
'object_link',
'object_id',
|
'message',
]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_superuser and request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def object_link(self, obj):
if obj.action_flag == DELETION:
link = e
|
scape(obj.object_repr)
elif obj.content_type is None:
link = escape(obj.object_repr)
else:
ct = obj.content_type
link = u'<a href="%s">%s</a>' % (
reverse('admin:%s_%s_change' % (ct.app_label, ct.model), args=[obj.object_id]),
escape(obj.object_repr),
)
return link
object_link.allow_tags = True
object_link.admin_order_field = 'object_repr'
object_link.short_description = u'object'
def queryset(self, request):
return super(LogEntryAdmin, self).queryset(request) \
.prefetch_related('content_type')
admin.site.register(OSFLogEntry, LogEntryAdmin)
|
karacos/karacos-wsgi
|
lib/wsgioauth/request.py
|
Python
|
lgpl-3.0
| 3,175
| 0.004409
|
# -*- coding: utf-8 -*-
import oauth2 # XXX pumazi: factor this out
from webob.multidict import MultiDict, NestedMultiDict
from webob.request import Request as WebObRequest
__all__ = ['Request']
class Request(WebObRequest):
|
"""The OAuth version of the WebOb Request.
Provides an easier way to obtain OAuth request parameters
(e.g. oauth_token) from the WSGI environment."""
def _checks_positive_for_oauth(self, params_var):
"""Simple check for the presence of OAuth parameters."""
checks = [ p.find('oauth_
|
') >= 0 for p in params_var ]
return True in checks
@property
def str_oauth_header(self):
extracted = {}
# Check for OAuth in the Header
if 'authorization' in self.headers:
auth_header = self.headers['authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header.lstrip('OAuth ')
try:
# Extract the parameters from the header.
extracted = oauth2.Request._split_header(auth_header)
except:
raise Error('Unable to parse OAuth parameters from '
'the Authorization header.')
return extracted
@property
def str_oauth_POST(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_POST):
extracted = dict([ (k, v,) for k, v in self.str_POST.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
@property
def str_oauth_GET(self):
extracted = {}
if self._checks_positive_for_oauth(self.str_GET):
extracted = dict([ (k, v,) for k, v in self.str_GET.iteritems()
if (k.find('oauth_') >= 0) ])
return extracted
def params(self):
params = WebObRequest.params.fget(self)
return NestedMultiDict(params, self.str_oauth_header)
params = property(params, doc=WebObRequest.params.__doc__)
@property
def oauth_params(self):
"""Simple way to get the OAuth parameters without sifting through
the entire stack of parameters.
We check the header first, because it is low hanging fruit.
However, it would be more efficient to check for the POSTed
parameters, because the specification defines the POST method as the
recommended request type before using GET or the Authorization
header."""
extracted = {}
# OAuth in the Header
extracted.update(self.str_oauth_header)
# OAuth in a GET or POST method
extracted.update(self.str_oauth_GET)
extracted.update(self.str_oauth_POST)
# Return the extracted oauth variables
return MultiDict(extracted)
@property
def nonoauth_params(self):
"""Simple way to get the non-OAuth parameters from the request."""
oauth_param_keys = self.oauth_params.keys()
return dict([(k, v) for k, v in self.params.iteritems() if k not in oauth_param_keys])
|
nnadeau/pybotics
|
pybotics/geometry.py
|
Python
|
mit
| 4,716
| 0.000848
|
"""Geometry functions and utilities."""
from enum import Enum
from typing import Sequence, Union
import numpy as np # type: ignore
from pybotics.errors import PyboticsError
class OrientationConvention(Enum):
"""Orientation of a body with respect to a fixed coordinate system."""
EULER_XYX = "xyx"
EULER_XYZ = "xyz"
EULER_XZX = "xzx"
EULER_XZY = "xzy"
EULER_YXY = "yxy"
EULER_YXZ = "yxz"
EULER_YZX = "yzx"
EULER_YZY = "yzy"
EULER_ZXY = "zxy"
EULER_ZXZ = "zxz"
EULER_ZYX = "zyx"
EULER_ZYZ = "zyz"
FIXED_XYX = "xyx"
FIXED_XYZ = "zyx"
FIXED_XZX = "xzx"
FIXED_XZY = "yzx"
FIXED_YXY = "yxy"
FIXED_YXZ = "zxy"
FIXED_YZX = "xzy"
FIXED_YZY = "yzy"
FIXED_ZXY = "yxz"
FIXED_ZXZ = "zxz"
FIXED_ZYX = "xyz"
FIXED_ZYZ = "zyz"
def vector_2_matrix(
vector: Sequence[float],
convention: Union[OrientationConvention, str] = OrientationConvention.EULER_ZYX,
) -> np.ndarray:
"""
Calculate the pose from the position and euler angles.
:param convention:
:param vector: transform vector
:return: 4x4 transform matrix
"""
# get individual variables
translation_component = vector[:3]
rotation_component = vector[-3:]
# validate and extract orientation info
if isinstance(convention, OrientationConvention):
convention = convention.value
try:
OrientationConvention(convention)
except ValueError as e:
raise PyboticsError(str(e))
# iterate through rotation order
# build rotation matrix
transform_matrix = np.eye(4)
for axis, value in zip(convention, rotation_component): # type: ignore
current_rotation = globals()[f"rotation_matrix_{axis}"](value)
transform_matrix = np.dot(transform_matrix, current_rotation)
# add translation component
transform_matrix[:-1, -1] = translation_component
return transform_matrix
def position_from_matrix(matrix: np.ndarray) -> np.ndarray:
"""Get the position values from a 4x4 transform matrix."""
return matrix[:-1, -1]
def matrix_2_vector(
matrix: np.ndarray,
convention: OrientationConvention = OrientationConvention.EULER_ZYX,
) -> np.ndarray:
"""Convert 4x4 matrix to a vector."""
# call function
try:
return globals()[f"_matrix_2_{convention.name.lower()}"](matrix)
except KeyError: # pragma: no cover
raise NotImplementedError
def _matrix_2_euler_zyx(matrix: np.ndarray) -> np.ndarray:
"""
Calculate the equivalent position and euler angles of the given pose.
From: Craig, John J. Introduction to robotics: mechanics and control, 2005
:param matrix: 4x4 transform matrix
:return: transform vector
"""
# solution degenerates near ry = +/- 90deg
sb = -matrix[2, 0]
cb = np.sqrt(matrix[0, 0] ** 2 + matrix[1, 0] ** 2)
if np.isclose(cb, 0):
a = 0.0
b = np.sign(sb) * np.pi / 2
sc = matrix[0, 1]
cc = matrix[1, 1]
c = np.sign(sb) * np.arctan2(sc, cc)
else:
b = np.arctan2(sb, cb)
sa = matrix[1, 0] / cb
ca = matrix[0, 0] / cb
a = np.arctan2(sa, ca)
sc = matrix[2, 1] / cb
cc = matrix[2, 2] / cb
c = np.arctan2(sc, cc)
vector = np.hstack((matrix[:-1, -1], [a, b, c]))
return vector
def wrap_2_pi(angle: float) -> float:
"""
Wrap given angle to +/- PI.
:param angle: angle to wrap
:return: wrapped angle
"""
# FIXME: remove float() cast when numpy is supported in mypy
result = float((angle + np.pi) % (2 * np.pi) - np.pi)
return result
def rotation_matrix_x(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the X axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([1, 0, 0, 0, 0, c, -s, 0, 0, s, c, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def rotation_matrix_y(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the Y axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([c, 0, s, 0, 0, 1, 0, 0, -s, 0, c, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def rotation_matrix_z(angle: float) -> np.ndarray:
"""Generate a basic 4x4 rotation matrix about the Z axis."""
s = np.sin(angle)
c = np.cos(angle)
matrix = np.array([c, -s, 0, 0, s, c, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape((4, 4))
return matrix
def translation_matrix(xyz: Sequence[float]) -> np.ndarray:
"""Generate a basic 4x4 translation matrix."""
# validate
if len(xyz) != 3:
raise PyboticsError("len(xyz) must be
|
3")
|
matrix = np.eye(4)
matrix[:-1, -1] = xyz
return matrix
|
jdfekete/progressivis
|
progressivis/core/changemanager_literal.py
|
Python
|
bsd-2-clause
| 2,053
| 0.000487
|
"Change Manager for literal values (supporting ==)"
from __future__ import annotations
from .bitmap import bitmap
from .index_update import IndexUpdate
from .changemanager_base import BaseChangeManager
from typing import (
Any,
TYPE_CHECKING,
)
if TYPE_CHECKING:
from .slot import Slot
class LiteralChangeManager(BaseChangeManager):
"""
Manage changes that occured in a literal value between runs.
"""
VALUE = bitmap([0])
def __init__(
self,
slot: Slot,
buffer_created: bool = True,
buffer_updated: bool = False,
buffer_deleted: bool = True,
buffer_exposed: bool = False,
buffer_masked: bool = False,
) -> None:
super(LiteralChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked,
)
self._last_value: Any = None
def reset(self, mid: str) -> None:
super(LiteralChangeManager, self).reset(mid)
self._last_value = None
def compute_updates(self, data: Any) -> IndexUpdate:
last_value = self._last_value
changes = IndexUpdate()
if last_value == data:
return changes
if last_value is None:
if self.created.buffer:
changes.created.update(self.VALUE)
elif data is None:
if self.deleted.buffer:
changes.deleted.update(self.VALUE)
elif self.updated.buffer:
changes.update
|
d.update(self.VALUE)
self._last_value = data
return changes
def update(self, run_number: int, data: Any, mid: str) -> None:
# pylint: disable=unused-argument
if run_number != 0 and run_number <= self._last_update:
return
changes = self.compute_updates(data)
self._last_update = run_number
self._row_changes.combine(
changes, self.created.buffer, self.updated.
|
buffer, self.deleted.buffer
)
|
lawsie/guizero
|
examples/alert.py
|
Python
|
bsd-3-clause
| 205
| 0.004878
|
from
|
guizero import App
app = App()
app.info("Info", "This is a guizero app")
app.error("Error", "Try and keep these o
|
ut your code...")
app.warn("Warning", "These are helpful to alert users")
app.display()
|
aldebaran/strong_typing
|
strong_typing/__init__.py
|
Python
|
bsd-3-clause
| 4,043
| 0.004116
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Softbank Robotics Europe
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used
|
to endorse or promote products derived from
# this
|
software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
**strong_typing** is a Python package containing some classes to create strongly
typed structures in Python
``strong_typing`` in a few words
--------------------------------
In Python, all variables are weakly typed, which means that a variable can take
all values of any type. The Python interpreter will then infer at runtime which
operations this variable can undergo depending on what it contains. This is
called "type inference".
This can be a problem in different situations
- A function that does not receive the expected type as input
- A variable or a class attribute whose type is changed through assignment
To avoid functions being called with bad arguments, you can use Python's
`typing module <https://docs.python.org/3/library/typing.html>`_) (however only
with Python3). To check if a variable is not incorrectly used, you can install
and run `mypy module <http://mypy.readthedocs.io/en/latest/>`_).
But if the latest is great for static check (without running the code), it does
not work on the code you don't own.
If, for instance you design a class expecting a certain type of attributes,
``mypy`` can very easily detect if you don't mistakenly override these
attributes with wrong typed data.
But if you put this class in a Python package and that someone else uses it,
there is no way to be sure they will respect your attribute's type.
To make sure they do, you would need to define a descriptor's class for each
attribute and define a setter function protecting your value against abusive
set. That's what we did :)
In the end, your class could look like this:
::
class MyTypedStruct(Struct):
__ATTRIBUTES__ = [IntegerParameter(name="my_int"),
FloatParameter(name="my_float")]
__DESCRIPTION__ = "A sample of class with typed attributes"
"""
def load_version():
import os
CONTAINING_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
return open(os.path.join(CONTAINING_DIRECTORY,"VERSION")).read().split()[0]
__VERSION__ = load_version()
from . import typed_parameters
from . import typed_containers
from ._struct import *
from ._versioned_struct import *
from ._display_widget import *
# Remove symbols that must not be exported
del load_version
#––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––#
|
johren/RackHD
|
test/tests/rackhd20/test_rackhd20_api_config.py
|
Python
|
apache-2.0
| 3,739
| 0.006419
|
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class rackhd20_api_config(fit_common.unittest.TestCase):
def test_api_20_config(self):
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
# check required fields
self.assertIn('PATH', api_data['json'], 'PATH field error')
self.assertIn('amqp', api_data['json'], 'amqp field error')
self.assertIn('apiServerAddress', api_data['json'], 'apiServerAddress field error')
self.assertIn('apiServerPort', api_data['json'], 'apiServerPort field error')
self.assertIn('broadcastaddr', api_data['json'], 'broadcastaddr field error')
self.assertIn('subnetmask', api_data['json'], 'subnetmask field error')
self.assertIn('mongo', api_data['json'], 'mongo field error')
def test_api_20_config_httpendpoints(self):
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
self.assertIn('httpEndpoints', api_data['json'], 'httpEndpoints field list error')
# verify both northbound and southbound endpoints are configured (as a minimum)
for endpoint in api_data['json']['httpEndpoints']:
self.assertIn('address', endpoint, 'missing httpEndpoints address fiel
|
d')
self.assertIn('authEnabled', endpoin
|
t, 'missing httpEndpoints authEnabled field')
self.assertIn('httpsEnabled', endpoint, 'missing httpEndpoints httpsEnabled field')
self.assertIn('proxiesEnabled', endpoint, 'missing httpEndpoints proxiesEnabled field')
self.assertIn('routers', endpoint, 'missing httpEndpoints routers field')
self.assertIn(endpoint['routers'], ['northbound-api-router', 'southbound-api-router'], 'unexpected httpEndpoints routers field')
def test_api_20_config_patch(self):
api_data_save = fit_common.rackhdapi('/api/2.0/config')['json']
if ("logColorEnable" not in api_data_save):
api_data_save['logColorEnable'] = False
if (api_data_save['logColorEnable'] is True):
data_payload = {"logColorEnable": False}
else:
data_payload = {"logColorEnable": True}
api_data = fit_common.rackhdapi("/api/2.0/config", action="patch", payload=data_payload)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']:
self.assertNotEqual(item, '', 'Empty JSON Field:' + item)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
if ("logColorEnable" in api_data_save and api_data_save['logColorEnable'] is True):
self.assertEqual(api_data['json']['logColorEnable'], False, "Incorrect value for 'logColorEnable', should be False")
else:
self.assertEqual(api_data['json']['logColorEnable'], True, "Incorrect value 'logColorEnable', should be True")
api_data = fit_common.rackhdapi("/api/2.0/config", action="patch", payload=api_data_save)
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
api_data = fit_common.rackhdapi('/api/2.0/config')
self.assertEqual(api_data['json'], api_data_save, "Patch failure, config not returned to default.")
if __name__ == '__main__':
fit_common.unittest.main()
|
moonbury/notebooks
|
github/Numpy/Chapter6/lognormaldist.py
|
Python
|
gpl-3.0
| 563
| 0.008881
|
import numpy as np
import matplotlib.pyplot as plt
N=10000
np.random.seed(34)
lognormal_values = np.random.lognormal(size=N)
_, bins, _ = plt.hist
|
(lognormal_values, np.sqrt(N), normed=True, lw=1, label="Histogram")
sigma = 1
mu = 0
x = np.linspace(min(bins), max(bins), len(bins))
pdf = np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))/ (x * sigma * np.sqrt(2 * np.pi))
plt.xlim([0, 15])
plt.plot(x, p
|
df,'--', lw=3, label="PDF")
plt.title('Lognormal distribution')
plt.xlabel('Value')
plt.ylabel('Normalized frequency')
plt.grid()
plt.legend(loc='best')
plt.show()
|
lmallin/coverage_test
|
python_venv/lib/python2.7/site-packages/pandas/tests/test_common.py
|
Python
|
mit
| 4,870
| 0.000205
|
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas import Series, Timestamp
from pandas.compat import range, lmap
import pandas.core.common as com
import pandas.util.testing as tm
def test_mut_exclusive():
msg = "mutually exclusive arguments: '[ab]' and '[ab]'"
with tm.assert_raises_regex(TypeError, msg):
com._mut_exclusive(a=1, b=2)
assert com._mut_exclusive(a=1, b=None) == 1
assert com._mut_exclusive(major=None, major_axis=None) is None
def test_get_callable_name():
from functools import partial
getname = com._get_callable_name
def fn(x):
return x
lambda_ = lambda x: x
part1 = partial(fn)
part2 = partial(part1)
class somecall(object):
def __call__(self):
return x # noqa
assert getname(fn) == 'fn'
assert getname(lambda_)
assert getname(part1) == 'fn'
assert getname(part2) == 'fn'
assert getname(somecall()) == 'somecall'
assert getname(1) is None
def test
|
_any_none():
assert (com._any_none(1, 2, 3, None))
assert (not com._any_none(1, 2, 3, 4))
def test_all_not_none():
|
assert (com._all_not_none(1, 2, 3, 4))
assert (not com._all_not_none(1, 2, 3, None))
assert (not com._all_not_none(None, None, None, None))
def test_iterpairs():
data = [1, 2, 3, 4]
expected = [(1, 2), (2, 3), (3, 4)]
result = list(com.iterpairs(data))
assert (result == expected)
def test_split_ranges():
def _bin(x, width):
"return int(x) as a base2 string of given width"
return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1))
def test_locs(mask):
nfalse = sum(np.array(mask) == 0)
remaining = 0
for s, e in com.split_ranges(mask):
remaining += e - s
assert 0 not in mask[s:e]
# make sure the total items covered by the ranges are a complete cover
assert remaining + nfalse == len(mask)
# exhaustively test all possible mask sequences of length 8
ncols = 8
for i in range(2 ** ncols):
cols = lmap(int, list(_bin(i, ncols))) # count up in base2
mask = [cols[i] == 1 for i in range(len(cols))]
test_locs(mask)
# base cases
test_locs([])
test_locs([0])
test_locs([1])
def test_map_indices_py():
data = [4, 3, 2, 1]
expected = {4: 0, 3: 1, 2: 2, 1: 3}
result = com.map_indices_py(data)
assert (result == expected)
def test_union():
a = [1, 2, 3]
b = [4, 5, 6]
union = sorted(com.union(a, b))
assert ((a + b) == union)
def test_difference():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.difference(b, a))
assert ([4, 5, 6] == inter)
def test_intersection():
a = [1, 2, 3]
b = [1, 2, 3, 4, 5, 6]
inter = sorted(com.intersection(a, b))
assert (a == inter)
def test_groupby():
values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3']
expected = {'f': ['foo', 'foo3'],
'b': ['bar', 'baz', 'baz2'],
'q': ['qux']}
grouped = com.groupby(values, lambda x: x[0])
for k, v in grouped:
assert v == expected[k]
def test_random_state():
import numpy.random as npr
# Check with seed
state = com._random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert (com._random_state(state2).uniform() ==
npr.RandomState(10).uniform())
# check with no arg random state
assert com._random_state() is np.random
# Error for floats or strings
with pytest.raises(ValueError):
com._random_state('test')
with pytest.raises(ValueError):
com._random_state(5.5)
def test_maybe_match_name():
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='x'))
assert (matched == 'x')
matched = com._maybe_match_name(
Series([1], name='x'), Series(
[2], name='y'))
assert (matched is None)
matched = com._maybe_match_name(Series([1]), Series([2], name='x'))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), Series([2]))
assert (matched is None)
matched = com._maybe_match_name(Series([1], name='x'), [2])
assert (matched == 'x')
matched = com._maybe_match_name([1], Series([2], name='y'))
assert (matched == 'y')
def test_dict_compat():
data_datetime64 = {np.datetime64('1990-03-15'): 1,
np.datetime64('2015-03-15'): 2}
data_unchanged = {1: 2, 3: 4, 5: 6}
expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2}
assert (com._dict_compat(data_datetime64) == expected)
assert (com._dict_compat(expected) == expected)
assert (com._dict_compat(data_unchanged) == data_unchanged)
|
onedox/selenium
|
py/test/selenium/webdriver/firefox/ff_profile_tests.py
|
Python
|
apache-2.0
| 8,146
| 0.002701
|
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See
|
the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import unittest
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
class TestFirefox
|
Profile:
def setup_method(self, method):
self.driver = webdriver.Firefox()
self.webserver = SimpleWebServer()
self.webserver.start()
def test_that_we_can_accept_a_profile(self):
profile1 = webdriver.FirefoxProfile()
profile1.set_preference("startup.homepage_welcome_url",
self.webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = webdriver.FirefoxProfile(profile1.path)
driver = webdriver.Firefox(firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_unicode_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.decodestring(encoded)
fp = BytesIO(decoded)
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert True == line.endswith(b'hi there");')
# there should be only one user.js
break
fp.close()
def test_that_integer_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
profile = webdriver.FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert True == profile.default_preferences["sample.bool.preference"]
def test_that_we_delete_the_profile(self):
path = self.driver.firefox_profile.path
self.driver.quit()
assert not os.path.exists(path)
def test_profiles_do_not_share_preferences(self):
self.profile1 = webdriver.FirefoxProfile()
self.profile1.accept_untrusted_certs = False
self.profile2 = webdriver.FirefoxProfile()
# Default is true. Should remain so.
assert self.profile2.default_preferences["webdriver_accept_untrusted_certs"] == True
def test_none_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = None
try:
self.profile.set_proxy(proxy)
assert False, "exception after passing empty proxy is expected"
except ValueError as e:
pass
assert "network.proxy.type" not in self.profile.default_preferences
def test_unspecified_proxy_is_set(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
self.profile.set_proxy(proxy)
assert "network.proxy.type" not in self.profile.default_preferences
def test_manual_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.no_proxy = 'localhost, foo.localhost'
proxy.http_proxy = 'some.url:1234'
proxy.ftp_proxy = None
proxy.sslProxy = 'some2.url'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.MANUAL['ff_value']
assert self.profile.default_preferences["network.proxy.no_proxies_on"] == 'localhost, foo.localhost'
assert self.profile.default_preferences["network.proxy.http"] == 'some.url'
assert self.profile.default_preferences["network.proxy.http_port"] == 1234
assert self.profile.default_preferences["network.proxy.ssl"] == 'some2.url'
assert "network.proxy.ssl_port" not in self.profile.default_preferences
assert "network.proxy.ftp" not in self.profile.default_preferences
def test_pac_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.proxy_autoconfig_url = 'http://some.url:12345/path'
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.PAC['ff_value']
assert self.profile.default_preferences["network.proxy.autoconfig_url"] == 'http://some.url:12345/path'
def test_autodetect_proxy_is_set_in_profile(self):
# The setup gave us a browser but we dont need it
self.driver.quit()
self.profile = webdriver.FirefoxProfile()
proxy = Proxy()
proxy.auto_detect = True
self.profile.set_proxy(proxy)
assert self.profile.default_preferences["network.proxy.type"] == ProxyType.AUTODETECT['ff_value']
def teardown_method(self, method):
try:
self.driver.quit()
except:
pass #don't care since we may have killed the browser above
self.webserver.stop()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
def teardown_module(module):
try:
TestFirefoxProfile.driver.quit()
except:
pass #Don't Care since we may have killed the browser above
|
jimberlage/servo
|
tests/wpt/web-platform-tests/tools/ci/taskcluster-run.py
|
Python
|
mpl-2.0
| 3,009
| 0.000665
|
#!/usr/bin/env python
import argparse
import gzip
import logging
import os
import shutil
import subprocess
browser_specific_args = {
"firefox": ["--install-browser"]
}
def tests_affected(commit_range):
output = subprocess.check_output([
"python", "./wpt", "tests-affected", "--null", commit_range
], stderr=open(os.devnull, "w"))
tests = output.split("\0")
# Account for trailing null byte
if tests and not tests[-1]:
tests.pop()
return tests
def find_wptreport(args):
parser = argparse.ArgumentParser()
parser.add_argument('--log-wptreport', action='store')
return parser.parse_known_args(args)[0].log_wptreport
def gzip_file(filename, delete_original=True):
with open(filename, 'rb') as f_in:
with gzip.open('%s.gz' % filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
if delete_original:
os.unlink(filename)
def main(product, commit_range, wpt_args):
"""Invoke the `wpt run` command according to the needs of the TaskCluster
continuous integration service."""
logger = logging.getLogger("tc-run")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(handler)
child = subprocess.Popen(['python', './wpt', 'manifest-download'])
child.wait()
if commit_range:
logger.info(
"Identifying tests affected in range '%s'..." % commit_range
)
|
tests = tests_affected(commit_range)
logger.info("Identified %s affected tests" % len(tests))
if not tests:
logger.info("Quitting because no tests were affected.")
|
return
else:
tests = []
logger.info("Running all tests")
wpt_args += [
"--log-tbpl-level=info",
"--log-tbpl=-",
"-y",
"--no-pause",
"--no-restart-on-unexpected",
"--install-fonts",
"--no-headless"
]
wpt_args += browser_specific_args.get(product, [])
command = ["python", "./wpt", "run"] + wpt_args + [product] + tests
logger.info("Executing command: %s" % " ".join(command))
subprocess.check_call(command)
wptreport = find_wptreport(wpt_args)
if wptreport:
gzip_file(wptreport)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("--commit-range", action="store",
help="""Git commit range. If specified, this will be
supplied to the `wpt tests-affected` command to
determine the list of test to execute""")
parser.add_argument("product", action="store",
help="Browser to run tests in")
parser.add_argument("wpt_args", nargs="*",
help="Arguments to forward to `wpt run` command")
main(**vars(parser.parse_args()))
|
stpettersens/Packager
|
travis.py
|
Python
|
mit
| 114
| 0
|
#!/usr/bin/env python
from subprocess import call
call(["bick
|
le", "builds", "stpettersens/Packager",
|
"-n", "5"])
|
prataprc/CouchPy
|
couchpy/.Attic/attachment.py
|
Python
|
gpl-3.0
| 7,105
| 0.024208
|
"""Module provides provides a convinient class :class:`Attachment` to access (Create,
Read, Delete) document attachments."""
import base64, logging
from os.path import basename
from copy import deepcopy
from mimetypes import guess_type
from httperror import *
from httpc import HttpSession, ResourceNotFound, OK, CREATED
from couchpy import CouchPyError
# TODO :
# 1. URL-encoding for attachment file-names
log = logging.getLogger( __name__ )
def _readattach( conn, paths=[], hthdrs={} ) :
"""
GET /<db>/<doc>/<attachment>
GET /<db>/_design/<design-doc>/<attachment>
"""
s, h, d = conn.get( paths, hthdrs, None )
if s == OK :
return s, h, d
else :
return (None, None, None)
def _writeattach( conn, paths=[], body='', hthdrs={}, **query ) :
"""
PUT /<db>/<doc>/<attachment>
PUT /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
if 'Content-Length' not in hthdrs :
raise CouchPyError( '`Content-Length` header field not supplied' )
if 'Content-Type' not in hthdrs :
raise CouchPyError( '`Content-Type` header field not supplied' )
s, h, d = conn.put( paths, hthdrs, body, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
def _deleteattach( conn, paths=[], hthdrs={}, **query ) :
"""
DELETE /<db>/<doc>/<attachment>
DELETE /<db>/_design/<design-doc>/<attachment>
query,
rev=<_rev>
"""
s, h, d = conn.delete( paths, hthdrs, None, _query=query.items() )
if s == OK and d['ok'] == True :
return s, h, d
else :
return (None, None, None)
clas
|
s Attachment( object ) :
def __i
|
nit__( self, doc, filename ) :
"""Class instance object represents a single attachment in a document,
use the :class:`Document` object and attachment `filename` to create
the instance.
"""
self.doc = doc
self.db = doc.db
self.filename = filename
self.conn = doc.conn
self.hthdrs = self.conn.mixinhdrs( self.doc.hthdrs, hthdrs )
def __eq__( self, other ) :
"""Compare whether the attachment info and data are same"""
cond = self.doc._id == other.doc._id and self.doc._rev == self.doc._rev
cond = cond and self.attachinfo() == other.attachinfo()
return cond
def attachinfo( self, field=None ) :
"""Information from attachment stub in the document. If `field`
key-word argument is provided, value of that particular field is
returned, otherwise, entire dictionary of information is returned
"""
a = self.doc.doc.get( '_attachments', {} ).get( self.filename, {} )
val = a if field == None else a.get( field, None )
return val
def data( self, hthdrs={} ) :
"""Returns the content of the file attached to the document. Can
optionally take a dictionary of http headers.
"""
hthdrs = self.conn.mixinhdrs( self.hthdrs, hthdrs )
data, content_type = self.getattachment(
self.db, self.doc, self.filename, hthdrs=hthdrs
)
return data, content_type
content_type = property( lambda self : self.attachinfo('content_type') )
length = property( lambda self : self.attachinfo('length') )
revpos = property( lambda self : self.attachinfo('revpos') )
stub = property( lambda self : self.attachinfo('stub') )
content = property( lambda self : self.data() )
@classmethod
def getattachment( cls, db, doc, filename, hthdrs={} ) :
"""Returns a tuple of, ( <filedata>, <content_type> )
for attachment `filename` in `doc` stored in database `db`
"""
id_ = doc if isinstance(doc, basestring) else doc._id
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _readattach( db.conn, paths, hthdrs=hthdrs )
content_type = h.get( 'Content-Type', None )
return (d.getvalue(), content_type)
@classmethod
def putattachment( cls, db, doc, filepath, data, content_type=None,
hthdrs={}, **query ) :
"""Upload the supplied content (data) as attachment to the specified
document (doc). `filepath` provided must be a URL encoded string.
If `doc` is document-id, then `rev` keyword parameter should be
present in query.
"""
from couchpy.doc import Document
from couchpy.designdoc import DesignDocument
filename = basename( filepath )
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
(ctype, enc) = guess_type(filepath)
hthdrs.update(
{ 'Content-Type' : content_type
} if content_type != None else { 'Content-Type' : ctype }
)
hthdrs.update( {'Content-Length' : len(data)} if data else {} )
s, h, d = _writeattach( db.conn, paths, data, hthdrs=hthdrs, rev=rev )
if isinstance( doc, (Document,DesignDocument) ) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def delattachment( cls, db, doc, filename, hthdrs={}, **query ) :
"""Deletes the attachment form the specified doc. You must
supply the rev argument with the current revision to delete the
attachment."""
id_ = doc if isinstance(doc, basestring) else doc._id
rev = query['rev'] if 'rev' in query else doc._rev
paths = db.paths + [ id_, filename ]
hthdrs = db.conn.mixinhdrs( self.hthdrs, hthdrs )
s, h, d = _deleteattach( db.conn, paths, hthdrs=hthdrs, rev=rev )
if isinstance(doc, Document) and d != None :
doc.update({ '_rev' : d['rev'] })
return d
@classmethod
def files2attach( cls, fnames=[] ) :
"""Helper method that will convert specified files `fnames` into
attachment structures in document format (key, value) pairs that is
suitable for writing into CouchDB.
"""
fnames = ( isinstance(fnames, basestring) and [fnames] ) or fnames
attachs = {}
for f in fnames :
if isinstance(f, (list,tuple)) :
ctype, fname = f
fdata = base64.encodestring( open(fname).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
elif isinstance(f, basestring) :
(ctype, enc) = guess_type(f)
fname, data = f, base64.encodestring( open(f).read() )
attachs.setdefault(
basename(fname), { 'content_type' : ctype, 'data' : data }
)
return attachs
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/misc/strutil.py
|
Python
|
bsd-3-clause
| 1,620
| 0
|
from __future__ import absolute_import, print_function, division
from six.moves import xrange
def render_string(string, sub):
"""
string: a string, containing formatting instructions
sub: a dictionary containing keys and values to substitute for
them.
returns: string % sub
The only difference between this function and the % operator
|
is that it raises an exception with a more informative error
message than the % operator does.
"""
try:
finalCode = string % sub
except Exception as E:
# If unable
|
to render the string, render longer and longer
# initial substrings until we find the minimal initial substring
# that causes an error
i = 0
while i <= len(string):
try:
finalCode = string[0:i] % sub
except Exception as F:
if str(F) == str(E):
raise Exception(
string[0:i] + "<<<< caused exception " + str(F))
i += 1
assert False
return finalCode
def pretty_format(string):
lines = string.split('\n')
lines = [strip_leading_white_space(line) for line in lines]
indent = 0
for i in xrange(len(lines)):
indent -= lines[i].count('}')
if indent < 0:
indent = 0
#
lines[i] = (' ' * indent) + lines[i]
indent += lines[i].count('{')
#
rval = '\n'.join(lines)
return rval
def strip_leading_white_space(line):
while len(line) > 0 and (line[0] == ' ' or line[0] == '\t'):
line = line[1:]
return line
|
emsrc/pycornetto
|
lib/cornetto/argparse.py
|
Python
|
gpl-3.0
| 77,238
| 0.000531
|
# -*- coding: utf-8 -*-
# Copyright � 2006 Steven J. Bethard <[email protected]>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted under the terms of the 3-clause BSD
# license. No warranty expressed or implied.
# For details, see the accompanying file LICENSE.txt.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
* handles both optional and positional arguments
* produces highly informative usage messages
* supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file:
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
HelpFormatter, RawDescriptionHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default, while
RawDescriptionHelpFormatter tells the parser not to perform any
line-wrapping on description text.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '0.9.0'
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = '==PARSER=='
# =============================
# Utility functions and
|
classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format:
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-l
|
evel attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join(func(*args) for func, args in self.items)
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocation
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_diax.py
|
Python
|
mit
| 426
| 0.049296
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed
|
_diax.iff"
result.attribute_template_id = 9
result.stfName("npc_name","diax")
|
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
suraj-jayakumar/lstm-rnn-ad
|
src/testdata/random_data_15min_ts/point_to_batch_data_conversion.py
|
Python
|
apache-2.0
| 762
| 0.018373
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 10:21:20 2016
@author: suraj
"""
import pickle
import numpy as np
X = pickle.load(open('x_att.p'))
y = pickle.load(open('y_att.p'))
batchX = []
batchy = []
def convertPointsToBatch(day_of_week,data1,data2):
for i in range(5):
batchX.extend(data1[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
batchy.extend(data2[((i*672)+((day_of_week)*96)):((i*672)+((day_of_week)*96))+96])
pass
for i in range(7):
convertPointsToBatch(i,X,y)
batchX = np.array(batchX)
batchy = np.array(batchy)
print batchX.shape
print batchy.shape
pr
|
int batchX[
|
0]
print batchy[0]
pickle.dump(batchX,open('batch_x_att.p','wb'))
pickle.dump(batchy,open('batch_y_att.p','wb'))
|
jscontreras/learning-gae
|
pgae-examples-master/2e/python/clock/clock4/prefs.py
|
Python
|
lgpl-3.0
| 541
| 0.003697
|
import webapp2
import models
class PrefsPage
|
(webapp2.RequestHandler):
def post(self):
userprefs = models.get_userprefs()
try:
tz_offset = int(self.request.get('tz_offset'))
userprefs.tz_offset = tz_offset
userprefs.put()
except ValueError:
# User entered a value that wasn't an integer. Ignore for now.
pass
self.redirect('/')
application = webapp2.WSGIApplication([('/prefs', PrefsPage)],
debug=T
|
rue)
|
3v1n0/pywws
|
src/pywws/device_pyusb.py
|
Python
|
gpl-2.0
| 5,584
| 0.002328
|
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-15 Jim Easterbrook [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB v0.4.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library. It
is one of several USB device modules, each of which uses a different
USB library interface. See :ref:`Installation - USB
library<dependencies-usb>` for details.
Testing
=======
Run :py:mod:`pywws.testweatherstation` with increased verbosity so it
reports which USB device access module is being used::
python -m pywws.testweatherstation -vv
18:28:09:pywws.weatherstation.CUSBDrive:using pywws.device_pyusb
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import platform
import usb
class USBDevice(object):
def __init__(self, idVendor, idProduct):
"""Low level USB device access via PyUSB library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
dev = self._find_
|
device(idVendor, idProduct)
if not dev:
raise IOError("Weather station device not found")
self.devh = dev.open()
|
if not self.devh:
raise IOError("Open device failed")
self.devh.reset()
## if platform.system() is 'Windows':
## self.devh.setConfiguration(1)
try:
self.devh.claimInterface(0)
except usb.USBError:
# claim interface failed, try detaching kernel driver first
if not hasattr(self.devh, 'detachKernelDriver'):
raise RuntimeError(
"Please upgrade pyusb (or python-usb) to 0.4 or higher")
try:
self.devh.detachKernelDriver(0)
self.devh.claimInterface(0)
except usb.USBError:
raise IOError("Claim interface failed")
# device may have data left over from an incomplete read
for i in range(4):
try:
self.devh.interruptRead(0x81, 8, 1200)
except usb.USBError:
break
def __del__(self):
if self.devh:
try:
self.devh.releaseInterface()
except usb.USBError:
# interface was not claimed. No problem
pass
def _find_device(self, idVendor, idProduct):
"""Find a USB device by product and vendor id."""
for bus in usb.busses():
for device in bus.devices:
if (device.idVendor == idVendor and
device.idProduct == idProduct):
return device
return None
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.devh.interruptRead(0x81, size, 1200)
if result is None or len(result) < size:
raise IOError('pywws.device_libusb.USBDevice.read_data failed')
return list(result)
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True
|
bwesterb/germain
|
exact.py
|
Python
|
gpl-3.0
| 437
| 0.004577
|
""" Generates a list of [b, N, n] where N is the amount of b-bit primes
and n
|
is the amount of b-bit safe primes. """
import gmpy
import json
for b in xrange(1,33):
N = 0
n = 0
p = gmpy.mpz(2**b)
while True:
p = gmpy.next_prime(p)
if p > 2**(b+
|
1):
break
if gmpy.is_prime(2*p + 1):
n += 1
N += 1
d = n/float(N)
print json.dumps([b, N, n])
|
appuio/ansible-role-openshift-zabbix-monitoring
|
vendor/openshift-tools/ansible/roles/lib_gcloud/build/ansible/gcloud_iam_sa_keys.py
|
Python
|
apache-2.0
| 2,395
| 0.00334
|
# pylint: skip-file
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud iam service-account keys'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
service_account_name=dict(required=True, type='str'),
key_format=dict(type='str', choices=['p12', 'json']),
key_id=dict(default=None, type='str'),
display_name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
gcloud = GcloudIAMServiceAccountKeys(module.params['service_account_name'],
key_format=module.params['key_format'])
state = module.params['state']
#####
# Get
#####
if state == 'list':
api_rval = gcloud.list_service_account_keys()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.delete_service_account_key(module.params['key_id'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
outputfile = '/tmp/glcoud_iam_sa_keys'
api_rval = gcloud.create_service_account_key(outputfile)
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
mo
|
dule.exit_json(changed=True, results=api_rval, state="prese
|
nt")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
macioosch/dynamo-hard-spheres-sim
|
to_csv_pretty.py
|
Python
|
gpl-3.0
| 3,184
| 0.005653
|
#!/usr/bin/env python2
# encoding=utf-8
from __future__ import division, print_function
from math import ceil, floor, log10, pi
from sys import argv, stdout
from xml.dom import minidom
import bz2
import csv
# local imports
from my_helper_functions_bare import *
def pretty_mean_std(data):
return uncertain_number_string(my_mean(data), my_means_std(data))
varying_parameters = ["pressures_virial", "pressures_collision", "msds_val",
"msds_diffusion", "times"]
data = { i:[] for i in varying_parameters }
data = dict(data.items() + {"packings": [], "collisions": [], "n_atoms": []}.items())
for input_file in argv[1:]:
xmldoc = minidom.parse(bz2.BZ2File(input_file))
packing = float(xmldoc.getElementsByTagName('PackingFraction')[0].attributes['val'].value)
n_atoms = int(xmldoc.getElementsByTagName('ParticleCount')[0].attributes['val'].value)
if len(data["packings"]) == 0 or packing != data["packings"][-1] \
or n_atoms != data["n_atoms"][-1]:
data["packings"].append(packing)
data["n_atoms"].append(n_atoms)
data["collisions"].append(int(xmldoc.getElementsByTagName(
'Duration')[0].attributes['TwoParticleEvents'].value))
for parameter in varying_parameters:
data[parameter].append([])
data["times"][-1].append(float(
xmldoc.getElementsByTagName('Duration')[0].attributes['Time'].value))
data["pressures_virial"][-1].append(float(
xmldoc.getElementsByTagName('Pressure')[0].attributes
|
['Avg'].value))
data["pressures_collision"][-1].append(my_pressure(data["n_atoms"][-1],
data["collisions"][-1], data["times"][-1][-1]))
try:
data["msds_val"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['val'].value))
data["msds_diffusion"][-1].append(float(
xmldoc.getElementsByTagName('Species')[0].attributes['diffusi
|
onCoeff'].value))
except:
data["msds_val"][-1].append(None)
data["msds_diffusion"][-1].append(None)
stdout_writer = csv.writer(stdout, delimiter='\t')
"""
stdout.write("### Data format: packings\tdensities\tcollisions\tn_atoms\t"
"pressures_virial\tpressures_collision\tmsds_val\tmsds_diffusion\t"
"times\n")
"""
stdout.write("\multicolumn{1}{c}{$\zeta$}\t\multicolumn{1}{c}{$Z_{MD}$}\t"
"\multicolumn{1}{c}{$\Delta Z_{MD}$}\n")
for i in xrange(len(data["packings"])):
if data["msds_diffusion"][i][0] is None:
continue
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(data["packings"][i]*6.0/pi),
data["collisions"][i],
data["n_atoms"][i],
pretty_mean_std(data["pressures_virial"][i]),
pretty_mean_std(data["pressures_collision"][i]),
pretty_mean_std(data["msds_val"][i]),
pretty_mean_std(data["msds_diffusion"][i]),
pretty_mean_std(data["times"][i])
])
"""
stdout_writer.writerow([
"{:.9f}".format(data["packings"][i]),
"{:.9f}".format(my_mean(data["pressures_collision"][i])),
"{:.9f}".format(my_means_std(data["pressures_collision"][i]))
])
|
petertodd/python-opentimestamps
|
opentimestamps/core/notary.py
|
Python
|
lgpl-3.0
| 10,936
| 0.002195
|
# Copyright (C) 2016 The OpenTimestamps developers
#
# This file is part of python-opentimestamps.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-opentimestamps including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
"""Timestamp signature verification"""
import opentimestamps.core.serialize
class VerificationError(Exception):
"""Attestation verification errors"""
class TimeAttestation:
"""Time-attesting signature"""
TAG = None
TAG_SIZE = 8
# FIXME: What should this be?
MAX_PAYLOAD_SIZE = 8192
"""Maximum size of a attestation payload"""
def _serialize_payload(self, ctx):
raise NotImplementedError
def serialize(self, ctx):
ctx.write_bytes(self.TAG)
payload_ctx = opentimestamps.core.serialize.BytesSerializationContext()
self._serialize_payload(payload_ctx)
ctx.write_varbytes(payload_ctx.getbytes())
def __eq__(self, other):
"""Implementation of equality operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return False
else:
return NotImplemented
def __lt__(self, other):
"""Implementation of less than operator
WARNING: The exact behavior of this isn't yet well-defined enough to be
used for consensus-critical applications.
"""
if isinstance(other, TimeAttestation):
assert self.__class__ is not other.__class__ # should be implemented by subclass
return self.TAG < other.TAG
else:
return NotImplemented
@classmethod
def deserialize(cls, ctx):
tag = ctx.read_bytes(cls.TAG_SIZE)
serialized_attestation = ctx.read_varbytes(cls.MAX_PAYLOAD_SIZE)
import opentimestamps.core.serialize
payload_ctx = opentimestamps.core.serialize.BytesDeserializationContext(serialized_attestation)
# FIXME: probably a better way to do this...
import opentimestamps.core.dubious.notary
if tag == PendingAttestation.TAG:
r = PendingAttestation.deserialize(payload_ctx)
elif tag == BitcoinBlockHeaderAttestation.TAG:
r = BitcoinBlockHeaderAttestation.deserialize(payload_ctx)
elif tag == opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.TAG:
r = opentimestamps.core.dubious.notary.EthereumBlockHeaderAttestation.deserialize(payload_ctx)
else:
return UnknownAttestation(tag, serial
|
ized_attestation)
# If attestations want to have unspecified fields for future
# upgradabilit
|
y they should do so explicitly.
payload_ctx.assert_eof()
return r
class UnknownAttestation(TimeAttestation):
"""Placeholder for attestations that don't support"""
def __init__(self, tag, payload):
if tag.__class__ != bytes:
raise TypeError("tag must be bytes instance; got %r" % tag.__class__)
elif len(tag) != self.TAG_SIZE:
raise ValueError("tag must be exactly %d bytes long; got %d" % (self.TAG_SIZE, len(tag)))
if payload.__class__ != bytes:
raise TypeError("payload must be bytes instance; got %r" % tag.__class__)
elif len(payload) > self.MAX_PAYLOAD_SIZE:
raise ValueError("payload must be <= %d bytes long; got %d" % (self.MAX_PAYLOAD_SIZE, len(payload)))
# FIXME: we should check that tag != one of the tags that we do know
# about; if it does the operators < and =, and hash() will likely act
# strangely
self.TAG = tag
self.payload = payload
def __repr__(self):
return 'UnknownAttestation(%r, %r)' % (self.TAG, self.payload)
def __eq__(self, other):
if other.__class__ is UnknownAttestation:
return self.TAG == other.TAG and self.payload == other.payload
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is UnknownAttestation:
return (self.tag, self.payload) < (other.tag, other.payload)
else:
super().__eq__(other)
def __hash__(self):
return hash((self.TAG, self.payload))
def _serialize_payload(self, ctx):
# Notice how this is write_bytes, not write_varbytes - the latter would
# incorrectly add a length header to the actual payload.
ctx.write_bytes(self.payload)
# Note how neither of these signatures actually has the time...
class PendingAttestation(TimeAttestation):
"""Pending attestation
Commitment has been recorded in a remote calendar for future attestation,
and we have a URI to find a more complete timestamp in the future.
Nothing other than the URI is recorded, nor is there provision made to add
extra metadata (other than the URI) in future upgrades. The rational here
is that remote calendars promise to keep commitments indefinitely, so from
the moment they are created it should be possible to find the commitment in
the calendar. Thus if you're not satisfied with the local verifiability of
a timestamp, the correct thing to do is just ask the remote calendar if
additional attestations are available and/or when they'll be available.
While we could additional metadata like what types of attestations the
remote calendar expects to be able to provide in the future, that metadata
can easily change in the future too. Given that we don't expect timestamps
to normally have more than a small number of remote calendar attestations,
it'd be better to have verifiers get the most recent status of such
information (possibly with appropriate negative response caching).
"""
TAG = bytes.fromhex('83dfe30d2ef90c8e')
MAX_URI_LENGTH = 1000
"""Maximum legal URI length, in bytes"""
ALLOWED_URI_CHARS = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._/:"
"""Characters allowed in URI's
Note how we've left out the characters necessary for parameters, queries,
or fragments, as well as IPv6 [] notation, percent-encoding special
characters, and @ login notation. Hopefully this keeps us out of trouble!
"""
@classmethod
def check_uri(cls, uri):
"""Check URI for validity
Raises ValueError appropriately
"""
if len(uri) > cls.MAX_URI_LENGTH:
raise ValueError("URI exceeds maximum length")
for char in uri:
if char not in cls.ALLOWED_URI_CHARS:
raise ValueError("URI contains invalid character %r" % bytes([char]))
def __init__(self, uri):
if not isinstance(uri, str):
raise TypeError("URI must be a string")
self.check_uri(uri.encode())
self.uri = uri
def __repr__(self):
return 'PendingAttestation(%r)' % self.uri
def __eq__(self, other):
if other.__class__ is PendingAttestation:
return self.uri == other.uri
else:
super().__eq__(other)
def __lt__(self, other):
if other.__class__ is PendingAttestation:
return self.uri < other.uri
else:
super().__eq__(other)
def __hash__(self):
return hash(self.uri)
def _serialize_payload(self, ctx):
ctx.write_varbytes(self.uri.encode())
@classmethod
def deserialize(cls, ctx):
utf8_uri = ctx.read_varbytes(cls.MAX_URI_LENGTH)
try:
cls.check_uri(utf8_uri)
except ValueError as exp:
raise opentimestamps.core.serialize.DeserializationError("Invalid URI: %r" % exp)
return PendingAttestation(utf8_uri.decode())
class BitcoinBlockHeaderAttestation(TimeAttestation):
"""Signed by the Bitcoin blockchain
The commitment digest will be the merkleroot of the blockhea
|
engla/kupfer
|
kupfer/textutils.py
|
Python
|
gpl-3.0
| 2,681
| 0.001617
|
# encoding: utf-8
def _unicode_truncate(ustr, length, encoding="UTF-8"):
"Truncate @ustr to specific encoded byte length"
bstr = ustr.encode(encoding)[:length]
return bstr.decode(encoding, 'ignore')
def extract_title_body(text, maxtitlelen=60):
"""Prepare @text: Return a (title, body) tuple
@text: A user-submitted paragraph or otherwise snippet of text. We
try to detect an obvious title and then return the title and the
following body. Otherwise we extract a title from the first words,
and return the full text as body.
@maxtitlelen: A unitless measure of approximate
|
length of title.
The default value yields a resulting title of approximately 60 ascii
characters, or 20 asian characters.
>>> extract_title_body("Short Text")
('Short Text', ''
|
)
>>> title, body = extract_title_body(u"執筆方針については、項目名の付け方、"
... "フォーマットや表記上の諸問題に関して多くの方針が存在している。")
>>> print(title)
執筆方針については、項目名の付け方、フォ
>>> print(body) # doctest: +ELLIPSIS
執筆方針については、項目名の付け方、フォ...して多くの方針が存在している。
"""
# if you don't make real tests, it's not not worth doing it at all.
if not text.strip():
return text, ""
def split_first_line(text):
"""Take first non-empty line of text"""
lines = iter(text.splitlines())
for l in lines:
l = l.strip()
if not l:
continue
rest = "\n".join(lines)
return l, rest
# We use the UTF-8 encoding and truncate due to it:
# this is a good heuristic for ascii vs "wide characters"
# it results in taking fewer characters if they are asian, which
# is exactly what we want
def split_first_words(text, maxlen):
text = text.lstrip()
first_text = _unicode_truncate(text, maxlen)
words = first_text.split()
if len(words) > 3:
words = words[:-1]
first_words = " ".join(words[:-1])
if text.startswith(first_words):
first_text = first_words
rest_text = text[len(first_text):]
return first_text, rest_text
firstline, rest = split_first_line(text)
if len(firstline.encode("UTF-8")) > maxtitlelen:
firstline, rest = split_first_words(text, maxtitlelen)
else:
return firstline, rest
if rest.strip():
return firstline, text
else:
return text, ""
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Goodideax/CS249
|
new.py
|
Python
|
bsd-3-clause
| 14,678
| 0.014784
|
#! /usr/bin/env python
import re
import math
import collections
import numpy as np
import time
import operator
from scipy.io import mmread, mmwrite
from random import randint
from sklearn import cross_validation
from sklearn import linear_model
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing as pp
from sklearn.svm import SVR
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.decomposition import ProbabilisticPCA, KernelPCA
from sklearn.decomposition import NMF
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet
import scipy.stats as stats
from sklearn import tree
from sklearn.feature_selection import f_regression
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, f1_score
from sklearn.gaussian_process import GaussianProcess
import features
# working directory
dir = '.'
label_index = 770
# load train data
d
|
ef load_train_fs():
# In the validation process, the training data was randomly shuffled firstly.
# For the prediction process, there is no need to shuffle the dataset.
# Owing to out of memory problem, Gaussian process only use part of training data, th
|
e prediction of gaussian process
# may be a little different from the model,which the training data was shuffled.
train_fs = np.genfromtxt(open(dir + '/train_v2_1000.csv','rb'), delimiter=',', skip_header=1)
col_mean = stats.nanmean(train_fs, axis=0)
inds = np.where(np.isnan(train_fs))
train_fs[inds] = np.take(col_mean, inds[1])
train_fs[np.isinf(train_fs)] = 0
return train_fs
# load test data
def load_test_fs():
test_fs = np.genfromtxt(open(dir + '/test_v2_1000.csv','rb'), delimiter=',', skip_header = 1)
col_mean = stats.nanmean(test_fs, axis=0)
inds = np.where(np.isnan(test_fs))
test_fs[inds] = np.take(col_mean, inds[1])
test_fs[np.isinf(test_fs)] = 0
return test_fs
# extract features from test data
def test_type(test_fs):
x_Test = test_fs[:,range(1, label_index)]
return x_Test
# extract features from train data
def train_type(train_fs):
train_x = train_fs[:,range(1, label_index)]
train_y= train_fs[:,-1]
return train_x, train_y
# transform the loss to the binary form
def toLabels(train_y):
labels = np.zeros(len(train_y))
labels[train_y>0] = 1
return labels
# generate the output file based to the predictions
def output_preds(preds):
out_file = dir + '/output_1000.csv'
fs = open(out_file,'w')
fs.write('id,loss\n')
for i in range(len(preds)):
if preds[i] > 100:
preds[i] = 100
elif preds[i] < 0:
preds[i] = 0
strs = str(i+105472) + ',' + str(np.float(preds[i]))
fs.write(strs + '\n');
fs.close()
return
# get the top feature indexes by invoking f_regression
def getTopFeatures(train_x, train_y, n_features=100):
f_val, p_val = f_regression(train_x,train_y)
f_val_dict = {}
p_val_dict = {}
for i in range(len(f_val)):
if math.isnan(f_val[i]):
f_val[i] = 0.0
f_val_dict[i] = f_val[i]
if math.isnan(p_val[i]):
p_val[i] = 0.0
p_val_dict[i] = p_val[i]
sorted_f = sorted(f_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
sorted_p = sorted(p_val_dict.iteritems(), key=operator.itemgetter(1),reverse=True)
feature_indexs = []
for i in range(0,n_features):
feature_indexs.append(sorted_f[i][0])
return feature_indexs
# generate the new data, based on which features are generated, and used
def get_data(train_x, feature_indexs, feature_minus_pair_list=[], feature_plus_pair_list=[],
feature_mul_pair_list=[], feature_divide_pair_list = [], feature_pair_sub_mul_list=[],
feature_pair_plus_mul_list = [],feature_pair_sub_divide_list = [], feature_minus2_pair_list = [],feature_mul2_pair_list=[],
feature_sub_square_pair_list=[], feature_square_sub_pair_list=[],feature_square_plus_pair_list=[]):
sub_train_x = train_x[:,feature_indexs]
for i in range(len(feature_minus_pair_list)):
ind_i = feature_minus_pair_list[i][0]
ind_j = feature_minus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i]-train_x[:,ind_j]))
for i in range(len(feature_plus_pair_list)):
ind_i = feature_plus_pair_list[i][0]
ind_j = feature_plus_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] + train_x[:,ind_j]))
for i in range(len(feature_mul_pair_list)):
ind_i = feature_mul_pair_list[i][0]
ind_j = feature_mul_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] * train_x[:,ind_j]))
for i in range(len(feature_divide_pair_list)):
ind_i = feature_divide_pair_list[i][0]
ind_j = feature_divide_pair_list[i][1]
sub_train_x = np.column_stack((sub_train_x, train_x[:,ind_i] / train_x[:,ind_j]))
for i in range(len(feature_pair_sub_mul_list)):
ind_i = feature_pair_sub_mul_list[i][0]
ind_j = feature_pair_sub_mul_list[i][1]
ind_k = feature_pair_sub_mul_list[i][2]
sub_train_x = np.column_stack((sub_train_x, (train_x[:,ind_i]-train_x[:,ind_j]) * train_x[:,ind_k]))
return sub_train_x
# use gbm classifier to predict whether the loan defaults or not
def gbc_classify(train_x, train_y):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], features.feature_pair_sub_list
,features.feature_pair_plus_list, features.feature_pair_mul_list, features.feature_pair_divide_list[:20],
features.feature_pair_sub_mul_list[:20])
labels = toLabels(train_y)
gbc = GradientBoostingClassifier(n_estimators=3000, max_depth=8)
gbc.fit(sub_x_Train, labels)
return gbc
# use svm to predict the loss, based on the result of gbm classifier
def gbc_svr_predict_part(gbc, train_x, train_y, test_x, feature_pair_sub_list,
feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list,
feature_pair_sub_mul_list, feature_pair_sub_list_sf, feature_pair_plus_list2):
feature_indexs = getTopFeatures(train_x, train_y)
sub_x_Train = get_data(train_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
sub_x_Test = get_data(test_x, feature_indexs[:16], feature_pair_sub_list
,feature_pair_plus_list, feature_pair_mul_list, feature_pair_divide_list[:20], feature_pair_sub_mul_list[:20])
pred_labels = gbc.predict(sub_x_Test)
pred_probs = gbc.predict_proba(sub_x_Test)[:,1]
ind_test = np.where(pred_probs>0.55)[0]
ind_train = np.where(train_y > 0)[0]
ind_train0 = np.where(train_y == 0)[0]
preds_all = np.zeros([len(sub_x_Test)])
flag = (sub_x_Test[:,16] >= 1)
ind_tmp0 = np.where(flag)[0]
ind_tmp = np.where(~flag)[0]
sub_x_Train = get_data(train_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Test = get_data(test_x, feature_indexs[:100], feature_pair_sub_list_sf
,feature_pair_plus_list2[:100], feature_pair_mul_list[:40], feature_pair_divide_list, feature_pair_sub_mul_list)
sub_x_Train[:,101] = np.log(1-sub_x_Train[:,101])
sub_x_Test[ind_tmp,101] = np.log(1-sub_x_Test[ind_tmp,101])
scaler = pp.StandardScaler()
scaler.fit(sub_x_Train)
sub_x_Train = scaler.transform(sub_x_Train)
sub_x_Test[ind_tmp] = scaler.transform(sub_x_Test[ind_tmp])
svr = SVR(C=16, kernel='rbf', gamma = 0.000122)
svr.fit(sub_x_Train[ind_train], np.log(train_y
|
gltn/stdm
|
stdm/ui/lookup_value_selector.py
|
Python
|
gpl-2.0
| 4,591
| 0
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : lookup value selector
Description : Enables the selection of lookup values from a
lookup entity.
Date : 09/February/2017
copyright : (C) 2017 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import (
QStandardItem,
QStandardItemModel
)
from qgis.PyQt.QtWidgets import (
QApplication,
QDialog
)
from stdm.settings import current_profile
from stdm.ui.gui_utils import GuiUtils
from stdm.ui.notification import NotificationBar
WIDGET, BASE = uic.loadUiType(
GuiUtils.get_ui_file_path('ui_lookup_value_selector.ui'))
class LookupValueSelector(WIDGET, BASE):
"""
A dialog that enables to select a value and code from a lookup.
.. versionadded:: 1.5
"""
def __init__(self, parent, lookup_entity_name, profile=None):
"""
Initializes LookupValueSelector.
:param parent: The parent of the dialog.
:type parent: QWidget
:param lookup_entity_name: The lookup entity name
:type lookup_entity_name: String
:param profile: The current profile object
:type profile: Object
"
|
""
QDialog.__init__(self, parent, Qt.WindowTitleHint |
Qt.WindowCloseButtonHint)
self.setupUi(self)
self.value_and_code = None
if profile is None:
self._profile = current_profile()
else:
self._profile = profile
self.lookup_entity = self._profile.entity_by_name(
'{}_{}'.format(se
|
lf._profile.prefix, lookup_entity_name)
)
self.notice = NotificationBar(self.notice_bar)
self._view_model = QStandardItemModel()
self.value_list_box.setModel(self._view_model)
header_item = QStandardItem(lookup_entity_name)
self._view_model.setHorizontalHeaderItem(0, header_item)
self.populate_value_list_view()
self.selected_code = None
self.selected_value_code = None
self.value_list_box.clicked.connect(self.validate_selected_code)
def populate_value_list_view(self):
"""
Populates the lookup values and codes.
"""
self.value_and_code = self.lookup_entity.values
for value, code in self.value_and_code.items():
u_value = str(value)
code_value = self.lookup_entity.values[u_value]
value_code = QStandardItem('{} ({})'.format(
code_value.value, code.code
)
)
value_code.setData(code.code)
self._view_model.appendRow(value_code)
def validate_selected_code(self):
"""
Validate the selected code for the presence of Code or not.
"""
self.notice.clear()
self.selected_code_value()
if self.selected_code == '':
notice = QApplication.tr(self, 'The selected value has no code.')
self.notice.insertWarningNotification(notice)
def selected_code_value(self):
"""
Get the selected lookup value.
"""
index = self.value_list_box.currentIndex()
item = self._view_model.itemFromIndex(index)
self.selected_code = item.data()
self.selected_value_code = item.text()
def accept(self):
"""
Overridden QDialog accept method.
"""
self.selected_code_value()
self.done(1)
def reject(self):
"""
Overridden QDialog accept method.
"""
self.selected_code = None
self.selected_value_code = None
self.done(0)
|
trustedanalytics/spark-tk
|
python/sparktk/frame/ops/box_cox.py
|
Python
|
apache-2.0
| 2,662
| 0.003096
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
|
See the License fo
|
r the specific language governing permissions and
# limitations under the License.
#
def box_cox(self, column_name, lambda_value=0.0, box_cox_column_name=None):
"""
Calculate the box-cox transformation for each row on a given column of the current frame
Parameters
----------
:param column_name: Name of the column to perform the transformation on
:param lambda_value: Lambda power parameter. Default is 0.0
:param box_cox_column_name: Optional column name for the box_cox value
:return: (Frame) returns a frame with a new column storing the box-cox transformed value
Calculate the box-cox transformation for each row in column 'column_name' of a frame using the lambda_value.
Box-cox transformation is computed by the following formula:
boxcox = log(y); if lambda=0,
boxcox = (y^lambda -1)/lambda ; else
where log is the natural log
Examples
--------
>>> data = [[7.7132064326674596],[0.207519493594015],[6.336482349262754],[7.4880388253861181],[4.9850701230259045]]
>>> schema = [("input", float)]
>>> my_frame = tc.frame.create(data, schema)
>>> my_frame.inspect()
[#] input
===================
[0] 7.71320643267
[1] 0.207519493594
[2] 6.33648234926
[3] 7.48803882539
[4] 4.98507012303
Compute the box-cox transformation on the 'input' column
>>> my_frame.box_cox('input',0.3)
A new column gets added to the frame which stores the box-cox transformation for each row
>>> my_frame.inspect()
[#] input input_lambda_0.3
=====================================
[0] 7.71320643267 2.81913279907
[1] 0.207519493594 -1.25365381375
[2] 6.33648234926 2.46673638752
[3] 7.48803882539 2.76469126003
[4] 4.98507012303 2.06401101556
"""
self._scala.boxCox(column_name, lambda_value, self._tc.jutils.convert.to_scala_option(box_cox_column_name))
|
asedunov/intellij-community
|
python/testData/inspections/PyRedeclarationInspection/localVariable.py
|
Python
|
apache-2.0
| 46
| 0.021739
|
def test_local_variab
|
le():
x = 1
|
x = 2
|
mbj4668/pyang
|
pyang/plugins/ietf.py
|
Python
|
isc
| 7,093
| 0.001551
|
"""IETF usage guidelines plugin
See RFC 8407
"""
import optparse
import sys
import re
from pyang import plugin
from pyang import statements
from pyang import error
from pyang.error import err_add
from pyang.plugins import lint
def pyang_plugin_init():
plugin.register_plugin(IETFPlugin())
class IETFPlugin(lint.LintPlugin):
def __init__(self):
self.found_2119_keywords = False
self.found_8174 = False
self.found_tlp = False
self.mmap = {}
lint.LintPlugin.__init__(self)
self.namespace_prefixes = ['urn:ietf:params:xml:ns:yang:']
self.modulename_prefixes = ['ietf', 'iana']
def add_opts(self, optparser):
optlist = [
optparse.make_option("--ietf",
dest="ietf",
action="store_true",
help="Validate the module(s) according to " \
"IETF rules."),
|
optparse.make_option("--ietf-help",
dest="ietf_help",
action="store_true",
help="Print help on the IETF checks and exit"),
]
optparser.add_options(optlist)
def setup_ctx(self, ctx):
if ctx.opts.ietf_help:
print_help()
sys.exit(0)
if not ctx.opts.ietf:
return
self._setup_ctx(ctx)
statements.add_validation_fun(
'grammar', ['description'],
lambda ctx, s: self.v_chk_description(ctx, s))
# register our error codes
error.add_error_code(
'IETF_MISSING_RFC8174', 4,
'the module seems to use RFC 2119 keywords, but the required'
+ ' text from RFC 8174 is not found or is not correct'
+ ' (see pyang --ietf-help for details).')
error.add_error_code(
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', 4,
'RFC 8407: 3.1: '
+ 'The IETF Trust Copyright statement seems to be'
+ ' missing or is not correct'
+ ' (see pyang --ietf-help for details).')
error.add_error_code(
'IETF_MISSING_RFC_TEXT', 4,
'RFC 8407: Appendix B: '
+ 'The text about which RFC this module is part of seems to be'
+ ' missing or is not correct'
+ ' (see pyang --ietf-help for details).')
def pre_validate_ctx(self, ctx, modules):
for mod in modules:
self.mmap[mod.arg] = {
'found_2119_keywords': False,
'found_8174': False}
def v_chk_description(self, ctx, s):
if s.i_module.arg not in self.mmap:
return
arg = re.sub(r'\s+', ' ', s.arg)
if s.parent.keyword == 'module' or s.parent.keyword == 'submodule':
m = re_rfc8174.search(arg)
if m is not None:
self.mmap[s.i_module.arg]['found_8174'] = True
arg = arg[:m.start()] + arg[m.end():]
m = re_tlp.search(arg)
if m is None:
err_add(ctx.errors, s.pos,
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', ())
else:
# the statement was changed to "Revised BSD License" in
# september 2021. allow both for old docs; require "Revised"
# for new.
y = int(m.group(1))
if y >= 2022 and arg.find("Simplified") > 0:
err_add(ctx.errors, s.pos,
'IETF_MISSING_TRUST_LEGAL_PROVISIONING', ())
if s.parent.arg.startswith('ietf-'):
m = re_ietf_rfc.search(arg)
if m is None:
err_add(ctx.errors, s.pos,
'IETF_MISSING_RFC_TEXT', ())
if not self.mmap[s.i_module.arg]['found_2119_keywords']:
if re_2119_keywords.search(arg) is not None:
self.mmap[s.i_module.arg]['found_2119_keywords'] = True
self.mmap[s.i_module.arg]['description_pos'] = s.pos
def post_validate_ctx(self, ctx, modules):
if not ctx.opts.ietf:
return
for mod in modules:
if (self.mmap[mod.arg]['found_2119_keywords']
and not self.mmap[mod.arg]['found_8174']):
pos = self.mmap[mod.arg]['description_pos']
err_add(ctx.errors, pos, 'IETF_MISSING_RFC8174', ())
def print_help():
print("""
Validates the module or submodule according to the IETF rules found
in RFC 8407.
The module's or submodule's description statement must contain the
following text:
Copyright (c) <year> IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject to
the license terms contained in, the Revised BSD License set
forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(https://trustee.ietf.org/license-info).
An IETF module (but not an IANA module) must also contain the
following text:
This version of this YANG module is part of RFC XXXX
(https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself
for full legal notices.
If any description statement in the module or submodule contains
RFC 2119 key words, the module's or submodule's description statement
must contain the following text:
The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL
NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED',
'MAY', and 'OPTIONAL' in this document are to be interpreted as
described in BCP 14 (RFC 2119) (RFC 8174) when, and only when,
they appear in all capitals, as shown here.
""")
rfc8174_str = \
r"""The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL
NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED',
'MAY', and 'OPTIONAL' in this document are to be interpreted as
described in BCP 14 \(RFC 2119\) \(RFC 8174\) when, and only when,
they appear in all capitals, as shown here."""
re_rfc8174 = re.compile(re.sub(r'\s+', ' ', rfc8174_str))
tlp_str = \
r"""Copyright \(c\) ([0-9]+) IETF Trust and the persons identified as
authors of the code\. All rights reserved\.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the (Revised|Simplified) BSD License
set forth in Section 4\.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
\(https?://trustee.ietf.org/license-info\)\."""
re_tlp = re.compile(re.sub(r'\s+', ' ', tlp_str))
ietf_rfc_str = \
r"""This version of this YANG module is part of
RFC .+(\s+\(https?://www.rfc-editor.org/info/rfc.+\))?; see
the RFC itself for full legal notices\."""
re_ietf_rfc = re.compile(re.sub(r'\s+', ' ', ietf_rfc_str))
re_2119_keywords = re.compile(
r"\b(MUST|REQUIRED|SHOULD|SHALL|RECOMMENDED|MAY|OPTIONAL)\b")
|
|
android-art-intel/Nougat
|
art-extension/tools/checker/common/archs.py
|
Python
|
apache-2.0
| 663
| 0
|
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicabl
|
e law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Lice
|
nse for the specific language governing permissions and
# limitations under the License.
archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
|
cyyber/QRL
|
src/qrl/grpcProxy.py
|
Python
|
mit
| 9,400
| 0.002766
|
# coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import simplejson as json
import grpc
from google.protobuf.json_format import MessageToJson
from qrl.core import config
from qrl.core.AddressState import AddressState
from qrl.crypto.xmss import XMSS
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.TransferTransaction import TransferTransaction
from pyqrllib.pyqrllib import hstr2bin, bin2hstr
from qrl.generated import qrl_pb2_grpc, qrl_pb2, qrlmining_pb2, qrlmining_pb2_grpc
from flask import Flask, Response, request
from jsonrpc.backend.flask import api
app = Flask(__name__)
def read_slaves(slaves_filename):
with open(slaves_filename, 'r') as f:
slave_data = json.load(f)
slave_data[0] = bytes(hstr2bin(slave_data[0]))
return slave_data
def get_addr_state(addr: bytes) -> AddressState:
stub = get_public_stub()
response = stub.GetAddressState(request=qrl_pb2.GetAddressStateReq(address=addr))
return AddressState(response.state)
def set_unused_ots_key(xmss, addr_state, start=0):
for i in range(start, 2 ** xmss.height):
if not addr_state.ots_key_reuse(i):
xmss.set_ots_index(i)
return True
return False
def valid_payment_permission(public_stub, master_address_state, payment_xmss, json_slave_txn):
access_type = master_address_state.get_slave_permission(payment_xmss.pk)
if access_type == -1:
tx = Transaction.from_json(json_slave_txn)
public_stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
return None
if access_type == 0:
return True
return False
def get_unused_payment_xmss(public_stub):
global payment_slaves
global payment_xmss
master_address = payment_slaves[0]
master_address_state = get_addr_state(master_address)
if payment_xmss:
addr_state = get_addr_state(payment_xmss.address)
if set_unused_ots_key(payment_xmss, addr_state, payment_xmss.ots_index):
if valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return payment_xmss
else:
payment_xmss = None
if not payment_xmss:
unused_ots_found = False
for slave_seed in payment_slaves[1]:
xmss = XMSS.from_extended_seed(slave_seed)
addr_state = get_addr_state(xmss.address)
if set_unused_ots_key(xmss, addr_state): # Unused ots_key_found
payment_xmss = xmss
unused_ots_found = True
break
if not unused_ots_found: # Unused ots_key_found
return None
if not valid_payment_permission(public_stub, master_address_state, payment_xmss, payment_slaves[2]):
return None
return payment_xmss
@app.route('/api/<api_method_name>')
def api_proxy(api_method_name):
"""
Proxy JSON RPC requests to the gRPC server as well as converts back gRPC response
to JSON.
:param api_method_name:
:return:
"""
stub = qrl_pb2_grpc.PublicAPIStub(grpc.insecure_channel('{}:{}'.format(config.user.public_api_host,
config.user.public_api_port)))
public_api = qrl_pb2.DESCRIPTOR.services_by_name['PublicAPI']
api_method = public_api.FindMethodByName(api_method_name)
api_request = getattr(qrl_pb2, api_method.input_type.name)()
for arg in request.args:
if arg not in api_method.input_type.fields_by_name:
raise Exception('Invalid args %s', arg)
data_type = type(getattr(api_request, arg))
if data_type == bool and request.args[arg].lower() == 'false':
continue
value = data_type(request.args.get(arg, type=data_type))
setattr(api_request, arg, value)
resp = getattr(stub, api_method_name)(api_request, timeout=10)
return Response(response=MessageToJson(resp, sort_keys=True), status=200, mimetype='application/json')
def get_mining_stub():
global mining_stub
return mining_stub
def get_public_stub():
global public_stub
return public_stub
@api.dispatcher.add_method
def getlastblockheader(height=0):
stub = get_mining_stub()
request = qrlmining_pb2.GetLastBlockHeaderReq(height=height)
grpc_response = stub.GetLastBlockHeader(request=request, timeout=10)
block_header = {
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'timestamp': grpc_response.timestamp,
'reward': grpc_response.reward,
'hash': grpc_response.hash,
'depth': grpc_response.depth
}
resp = {
"block_header": block_header,
"status": "OK"
}
return resp
@api.dispatcher.add_method
def getblockheaderbyheight(height):
return getlastblockheader(height)
@api.dispatcher.add_method
def getblocktemplate(reserve_size, wallet_address):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockToMineReq(wallet_address=wallet_address.encode())
grpc_response = stub.GetBlockToMine(request=request, timeout=10)
resp = {
'blocktemplate_blob': grpc_response.blocktemplate_blob,
'difficulty': grpc_response.difficulty,
'height': grpc_response.height,
'reserved_offset': grpc_response.reserved_offset,
'seed_hash': grpc_response.seed_hash,
'status': 'OK'
}
return resp
@api.dispatcher.add_method
def submitblock(blob):
stub = get_mining_stub()
request = qrlmining_pb2.SubmitMinedBlockReq(blob=bytes(hstr2bin(blob)))
response = stub.SubmitMinedBlock(request=request, timeout=10)
if response.error:
raise Exception # Mining pool expected exception when block submission fails
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def getblockminingcompatible(height):
stub = get_mining_stub()
request = qrlmining_pb2.GetBlockMiningCompatibleReq(height=height)
response = stub.GetBlockMiningCompatible(request=request, timeout=10)
return MessageToJson(response, sort_keys=True)
@api.dispatcher.add_method
def transfer(destinations, fee, mixin, unlock_time):
if len(destinations) > config.dev.transaction_multi_output_limit:
raise Exception('Payment Failed: Amount exceeds the allowed
|
limit')
addrs_to = []
amounts = []
for tx in destinations:
addrs_to.append(bytes(hstr2bin(tx['address'][1:]))) # Skipping 'Q'
amounts.append(tx['amount'])
stub = get_public_stub()
xmss = get_unused_payment_xmss(stub)
if not xmss:
raise Exception('Payment Failed: No Unused Payment XMSS fou
|
nd')
tx = TransferTransaction.create(addrs_to=addrs_to,
amounts=amounts,
message_data=None,
fee=fee,
xmss_pk=xmss.pk,
master_addr=payment_slaves[0])
tx.sign(xmss)
response = stub.PushTransaction(request=qrl_pb2.PushTransactionReq(transaction_signed=tx.pbdata))
if response.error_code != 3:
raise Exception('Transaction Submission Failed, Response Code: %s', response.error_code)
response = {'tx_hash': bin2hstr(tx.txhash)}
return response
app.add_url_rule('/json_rpc', 'api', api.as_view(), methods=['POST'])
def parse_arguments():
parser = argparse.ArgumentParser(description='QRL node')
parser.add_argument('--qrldir', '-d', dest='qrl_dir', default=config.user.qrl_dir,
help="Use a different directory for node data/configuration")
parser.add_argument('--network-type', dest='network_type', choices=['mainnet', 'testnet'],
default='mainnet', required=False, help="Runs QRL Testnet Node")
return parser.parse_args()
def main():
args = parse_arguments()
qrl_dir_post_fix = ''
copy_files = []
if args.network_type == 'testnet':
qrl_dir_post_fix = '-testnet'
|
jcrudy/glm-sklearn
|
glmsklearn/glm.py
|
Python
|
bsd-3-clause
| 9,972
| 0.017048
|
import statsmodels.api
import statsmodels.genmod.families.family
import numpy as np
from sklearn.metrics import r2_score
class GLM(object):
'''
A scikit-learn style wrapper for statsmodels.api.GLM. The purpose of this class is to
make generalized linear models compatible with scikit-learn's Pipeline objects.
family : instance of subclass of statsmodels.genmod.families.family.Family
The family argument determines the distribution family to use for GLM fitting.
xlabels : iterable of strings, optional (empty by default)
The xlabels argument can be used to assign names to data columns. This argument is not
generally needed, as names can be captured automatically from most standard data
structures. If included, must have length n, where n is the number of features. Note
that column order is used to compute term values and make predictions, not column names.
'''
def __init__(self, family, add_constant=True):
self.family = family
self.add_constant = add_constant
def _scrub_x(self, X, offset, exposure, **kwargs):
'''
Sanitize input predictors and extract column names if appropriate.
'''
no_labels = False
if 'xlabels' not in kwargs and 'xlabels' not in self.__dict__:
#Try to get xlabels from input data (for example, if X is a pandas DataFrame)
try:
self.xlabels = list(X.columns)
except AttributeError:
try:
self.xlabels = list(X.design_info.column_names)
except AttributeError:
try:
self.xlabels = list(X.dtype.names)
except TypeError:
no_labels = True
elif 'xlabels' not in self.__dict__:
self.xlabels = kwargs['xlabels']
#Convert to internally used data type
X = np.asarray(X,dtype=np.float64)
m,n = X.shape
if offset is not None:
offset = np.asarray(offset,dtype=np.float64)
offset = offset.reshape(offset.shape[0])
if exposure is not None:
exposure = np.asarray(exposure,dtype=np.float64)
exposure = exposure.reshape(exposure.shape[0])
#Make up labels if none were found
if no_labels:
self.xlabels = ['x'+str(i) for i in range(n)]
return X, offset, exposure
def _scrub(self, X, y, offset, exposure, **kwargs):
'''
Sanitize input data.
'''
#Check whether X is the output of patsy.dmatrices
if y is None and type(X) is tuple:
y, X = X
#Handle X separately
X, offset, exposure = self._scrub_x(X, offset, exposure, **kwargs)
#Convert y to internally used data type
y = np.asarray(y,dtype=np.float64)
y = y.reshape(y.shape[0])
#Make sure dimensions match
if y.shape[0] != X.shape[0]:
raise ValueError('X and y do not have compatible dimensions.')
return X, y, offset, exposure
def fit(self, X, y = None, offset = None, exposure = None, xlabels = None):
'''
Fit a GLM model to the input data X and y.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of featu
|
res
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, a patsy
DesignMatrix, or a tuple of patsy DesignMatrix objects as output by patsy.dmatrices.
y : array-like, optional (default=None), shape = [m] where m is the number of samples
The training response. The y parameter can be a numpy array, a pandas DataFrame with one
column, a Patsy DesignMatr
|
ix, or can be left as None (default) if X was the output of a
call to patsy.dmatrices (in which case, X contains the response).
xlabels : iterable of strings, optional (default=None)
Convenient way to set the xlabels parameter while calling fit. Ignored if None (default).
See the GLM class for an explanation of the xlabels parameter.
'''
#Format and label the data
if xlabels is not None:
self.set_params(xlabels=xlabels)
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Do the actual work
model = statsmodels.api.GLM(y, X, self.family, offset=offset, exposure=exposure)
result = model.fit()
self.coef_ = result.params
return self
def predict(self, X, offset = None, exposure = None):
'''
Predict the response based on the input data X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Linear transformation
eta = self.transform(X, offset, exposure)
#Nonlinear transformation
y_hat = self.family.fitted(eta)
return y_hat
def transform(self, X, offset = None, exposure = None):
'''
Perform a linear transformation of X.
Parameters
----------
X : array-like, shape = [m, n] where m is the number of samples and n is the number of features
The training predictors. The X parameter can be a numpy array, a pandas DataFrame, or a
patsy DesignMatrix.
'''
#Format the data
X, offset, exposure = self._scrub_x(X, offset, exposure)
#Add a constant column
if self.add_constant:
X = statsmodels.api.add_constant(X, prepend=True)
#Compute linear combination
eta = np.dot(X,self.coef_)
#Apply offset and exposure
if offset is not None:
eta += offset
if exposure is not None:
eta += np.log(exposure)
return eta
def score(self, X, y = None, offset = None, exposure = None, xlabels = None):
X, y, offset, exposure = self._scrub(X,y,offset,exposure,**self.__dict__)
y_pred = self.predict(X, offset=offset, exposure=exposure)
return r2_score(y, y_pred)
def get_params(self, deep = False):
return {}
def __repr__(self):
return self.__class__.__name__ + '()'
def __str__(self):
return self.__class__.__name__ + '()'
class GLMFamily(GLM):
family = NotImplemented
def __init__(self, add_constant=True):
super(GLMFamily,self).__init__(family=self.__class__.family(), add_constant=add_constant)
class BinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Binomial
class GammaRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gamma
class GaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Gaussian
class InverseGaussianRegressor(GLMFamily):
family = statsmodels.genmod.families.family.InverseGaussian
class NegativeBinomialRegressor(GLMFamily):
family = statsmodels.genmod.families.family.NegativeBinomial
class PoissonRegressor(GLMFamily):
family = statsmodels.genmod.families.family.Poisson
# def fit(self, X, y = None, exposure = None, xlabels = None):
# '''
# Fit a GLM model to the input data X and y.
#
#
# Parameters
# ----------
# X : array-like, shape = [m, n] where m is the number of samples and n is the number of feature
|
seertha/WSN_XBee
|
Software/RPI/Display_lcd/nodos_conectados.py
|
Python
|
mit
| 1,363
| 0.02788
|
#Prueba para mostrar los nodos conectados a la red
from base_datos import db
import time
from datetime import timedelta, datetime,date
dir_base="/media/CasaL/st/Documentos/proyectoXbee/WSN_XBee/basesTest/xbee_db02.db"
d=timedelta(minutes=-10)
#now=datetime.now()
#calculo=now+d
#print(calculo.strftime("%H:%M:%S"))
#hoy=datetime.now()
#miFecha=date(hoy.year,7,13)
#miHoraFecha=datetime(2017,7,13,20,13)
#print(miFecha.strftime("%Y/%m/%d"))
#print(miHoraFecha)
conFechaHora='''SELECT fecha_hora FROM datos ORDER BY fecha_hora DESC LIMIT 1'''
base=db(dir_base)
ultimoRegistro=base.consultaSimp(conFech
|
aHora)[0][0]
aux1=ultimoRegistro.split(" ")
horaReg=aux1[0].split(":")
fe
|
chaReg=aux1[1].split("/")
aux_ini=datetime(int(fechaReg[2]),int(fechaReg[1]),int(fechaReg[0]),int(horaReg[0]),int(horaReg[1]),int(horaReg[2]))
aux_final=aux_ini+d
hora_inicio=aux_ini.strftime("%H:%M:%S %d/%m/%Y")
hora_final=aux_final.strftime("%H:%M:%S %d/%m/%Y")
print (hora_final)
#print("Hora inicio: {} Hora final: {}".format(ref_ini,ref_ini+d))
respuesta=base.consultaDat('''SELECT nodo_id FROM datos WHERE fecha_hora
BETWEEN ? and ?''',(hora_final,hora_inicio))
lista_nodos=[]
for e in respuesta:
if e[0] not in lista_nodos:
lista_nodos.append(e[0])
nodos_conn=len(lista_nodos)
print("Existen {} nodos conectados a la red".format(nodos_conn))
|
gofed/gofed-ng
|
common/service/storageService.py
|
Python
|
gpl-3.0
| 1,413
| 0.001415
|
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under
|
the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that
|
it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
from threading import Lock
from service import Service
class StorageService(Service):
@classmethod
def on_startup(cls, config, system_json):
# TODO: config is not accessible when local
cls._system = None # We don't want to use system
cls._config = config
cls._lock = Lock()
cls.signal_startup(config.get(cls.get_service_name()))
if __name__ == "__main__":
sys.exit(1)
|
oblique-labs/pyVM
|
rpython/rlib/rmd5.py
|
Python
|
mit
| 14,169
| 0.019409
|
# -*- coding: iso-8859-1 -*-
"""
RPython implementation of MD5 checksums.
See also the pure Python implementation in lib_pypy/md5.py, which might
or might not be faster than this one on top of CPython.
This is an implementation of the MD5 hash function,
as specified by RFC 1321. It was implemented using Bruce Schneier's
excellent book "Applied Cryptography", 2nd ed., 1996.
This module tries to follow the API of the CPython md5 module.
Long history:
By Dinu C. Gherman. BEWARE: this comes with no guarantee whatsoever
about fitness and/or other properties! Specifically, do not use this
in any production code! License is Python License! (Re-licensing
under the MIT would be great, though)
Special thanks to Aurelian Coman who fixed some nasty bugs!
Modernised by J. Hallén and L. Creighton for Pypy.
Converted to RPython by arigo.
"""
from rpython.rlib.rarithmetic import r_uint, r_ulonglong
if r_uint.BITS == 32:
def _rotateLeft(x, n):
"Rotate x (32 bit) left n bits circularly."
return (x << n) | (x >> (32-n))
else:
def _rotateLeft_emulator(x, n):
x &= 0xFFFFFFFF
return (x << n) | (x >> (32-n))
# ----- start of custom code, think about something better... -----
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
eci = ExternalCompilationInfo(post_include_bits=["""
static unsigned long pypy__rotateLeft(unsigned long x, long n) {
unsigned int x1 = x; /* arithmetic directly on int */
int n1 = n;
return (x1 << n1) | (x1 >> (32-n1));
}
"""])
_rotateLeft = rffi.llexternal(
"pypy__rotateLeft", [lltype.Unsigned, lltype.Signed], lltype.Unsigned,
_callable=_rotateLeft_emulator, compilation_info=eci,
_nowrapper=True, elidable_function=True)
# we expect the function _rotateLeft to be actually inlined
def _state2string(a, b, c, d):
return ''.join([
chr(a&0xFF), chr((a>>8)&0xFF), chr((a>>16)&0xFF), chr((a>>24)&0xFF),
chr(b&0xFF), chr((b>>8)&0xFF), chr((b>>16)&0xFF), chr((b>>24)&0xFF),
chr(c&0xFF), chr((c>>8)&0xFF), chr((c>>16)&0xFF), chr((c>>24)&0xFF),
chr(d&0xFF), chr((d>>8)&0xFF), chr((d>>16)&0xFF), chr((d>>24)&0xFF),
])
def _state2hexstring(a, b, c, d):
hx = '0123456789abcdef'
return ''.join([
hx[(a>>4)&0xF], hx[a&0xF], hx[(a>>12)&0xF], hx[(a>>8)&0xF],
hx[(a>>20)&0xF], hx[(a>>16)&0xF], hx[(a>>28)&0xF], hx[(a>>24)&0xF],
hx[(b>>4)&0xF], hx[b&0xF], hx[(b>>12)&0xF], hx[(b>>8)&0xF],
hx[(b>>20)&0xF], hx[(b>>16)&0xF], hx[(b>>28)&0xF], hx[(b>>24)&0xF],
hx[(c>>4)&0xF], hx[c&0xF], hx[(c>>12)&0xF], hx[(c>>8)&0xF],
hx[(c>>20)&0xF], hx[(c>>16)&0xF], hx[(c>>28)&0xF], hx[(c>>24)&0xF],
hx[(d>>4)&0xF], hx[d&0xF], hx[(d>>12)&0xF], hx[(d>>8)&0xF],
hx[(d>>20)&0xF], hx[(d>>16)&0xF], hx[(d>>28)&0xF], hx[(d>>24)&0xF],
])
def _string2uintlist(s, start, count, result):
"""Build a list of count r_uint's by unpacking the string
s[start:start+4*count] in little-endian order.
"""
for i in range(count):
p = start + i * 4
x = r_uint(ord(s[p]))
x |= r_uint(ord(s[p+1])) << 8
x |= r_uint(ord(s[p+2])) << 16
x |= r_uint(ord(s[p+3])) << 24
result[i] = x
# ======================================================================
# The real MD5 meat...
#
# Implemented after "Applied Cryptography", 2nd ed., 1996,
# pp. 436-441 by Bruce Schneier.
# ======================================================================
# F, G, H and I are basic MD5 functions.
def F(x, y, z):
return (x & y) | ((~x) & z)
def G(x, y, z):
return (x & z) | (y & (~z))
def H(x, y, z):
return x ^ y ^ z
def I(x, y, z):
return y ^ (x | (~z))
def XX(func, a, b, c, d, x, s, ac):
"""Wrapper for call distribution to functions F, G, H and I.
This replaces functions FF, GG, HH and II from "Appl. Crypto."
Rotation is separate from addition to prevent recomputation
(now summed-up in one f
|
unction).
"""
res = a + func(b, c, d)
res = res + x
res = res + ac
res = _rotateLeft(res, s)
res = res + b
return res
XX._annspecialcase_ = 'specialize:arg(0)' # performance hint
class RMD5(object):
"""RPython-level MD5 object.
"""
def __init__(self, initialdata=''):
self._init()
self.update(initialdata)
def _init(self):
"""Set this object to an initial empty state.
"""
se
|
lf.count = r_ulonglong(0) # total number of bytes
self.input = "" # pending unprocessed data, < 64 bytes
self.uintbuffer = [r_uint(0)] * 16
# Load magic initialization constants.
self.A = r_uint(0x67452301L)
self.B = r_uint(0xefcdab89L)
self.C = r_uint(0x98badcfeL)
self.D = r_uint(0x10325476L)
def _transform(self, inp):
"""Basic MD5 step transforming the digest based on the input.
Note that if the Mysterious Constants are arranged backwards
in little-endian order and decrypted with the DES they produce
OCCULT MESSAGES!
"""
# 'inp' is a list of 16 r_uint values.
a, b, c, d = A, B, C, D = self.A, self.B, self.C, self.D
# Round 1.
S11, S12, S13, S14 = 7, 12, 17, 22
a = XX(F, a, b, c, d, inp[ 0], S11, r_uint(0xD76AA478L)) # 1
d = XX(F, d, a, b, c, inp[ 1], S12, r_uint(0xE8C7B756L)) # 2
c = XX(F, c, d, a, b, inp[ 2], S13, r_uint(0x242070DBL)) # 3
b = XX(F, b, c, d, a, inp[ 3], S14, r_uint(0xC1BDCEEEL)) # 4
a = XX(F, a, b, c, d, inp[ 4], S11, r_uint(0xF57C0FAFL)) # 5
d = XX(F, d, a, b, c, inp[ 5], S12, r_uint(0x4787C62AL)) # 6
c = XX(F, c, d, a, b, inp[ 6], S13, r_uint(0xA8304613L)) # 7
b = XX(F, b, c, d, a, inp[ 7], S14, r_uint(0xFD469501L)) # 8
a = XX(F, a, b, c, d, inp[ 8], S11, r_uint(0x698098D8L)) # 9
d = XX(F, d, a, b, c, inp[ 9], S12, r_uint(0x8B44F7AFL)) # 10
c = XX(F, c, d, a, b, inp[10], S13, r_uint(0xFFFF5BB1L)) # 11
b = XX(F, b, c, d, a, inp[11], S14, r_uint(0x895CD7BEL)) # 12
a = XX(F, a, b, c, d, inp[12], S11, r_uint(0x6B901122L)) # 13
d = XX(F, d, a, b, c, inp[13], S12, r_uint(0xFD987193L)) # 14
c = XX(F, c, d, a, b, inp[14], S13, r_uint(0xA679438EL)) # 15
b = XX(F, b, c, d, a, inp[15], S14, r_uint(0x49B40821L)) # 16
# Round 2.
S21, S22, S23, S24 = 5, 9, 14, 20
a = XX(G, a, b, c, d, inp[ 1], S21, r_uint(0xF61E2562L)) # 17
d = XX(G, d, a, b, c, inp[ 6], S22, r_uint(0xC040B340L)) # 18
c = XX(G, c, d, a, b, inp[11], S23, r_uint(0x265E5A51L)) # 19
b = XX(G, b, c, d, a, inp[ 0], S24, r_uint(0xE9B6C7AAL)) # 20
a = XX(G, a, b, c, d, inp[ 5], S21, r_uint(0xD62F105DL)) # 21
d = XX(G, d, a, b, c, inp[10], S22, r_uint(0x02441453L)) # 22
c = XX(G, c, d, a, b, inp[15], S23, r_uint(0xD8A1E681L)) # 23
b = XX(G, b, c, d, a, inp[ 4], S24, r_uint(0xE7D3FBC8L)) # 24
a = XX(G, a, b, c, d, inp[ 9], S21, r_uint(0x21E1CDE6L)) # 25
d = XX(G, d, a, b, c, inp[14], S22, r_uint(0xC33707D6L)) # 26
c = XX(G, c, d, a, b, inp[ 3], S23, r_uint(0xF4D50D87L)) # 27
b = XX(G, b, c, d, a, inp[ 8], S24, r_uint(0x455A14EDL)) # 28
a = XX(G, a, b, c, d, inp[13], S21, r_uint(0xA9E3E905L)) # 29
d = XX(G, d, a, b, c, inp[ 2], S22, r_uint(0xFCEFA3F8L)) # 30
c = XX(G, c, d, a, b, inp[ 7], S23, r_uint(0x676F02D9L)) # 31
b = XX(G, b, c, d, a, inp[12], S24, r_uint(0x8D2A4C8AL)) # 32
# Round 3.
S31, S32, S33, S34 = 4, 11, 16, 23
a = XX(H, a, b, c, d, inp[ 5], S31, r_uint(0xFFFA3942L)) # 33
d = XX(H, d, a, b, c, inp[ 8], S32, r_uint(0x8771F681L)) # 34
c = XX(H, c, d, a, b, inp[11], S33, r_uint(0x6D9D6122L)) # 35
b = XX(H, b, c, d, a, inp[14], S34, r_uint(0xFDE5380CL)) # 36
a = XX(H, a, b, c, d, inp[ 1], S31, r_uint(0xA4BEEA44L)) # 37
d =
|
arkadoel/directORM
|
python/salida/directORM/forProveedores.py
|
Python
|
gpl-2.0
| 3,696
| 0.002976
|
import sqlite3
import directORM
class Proveedor:
def __init__(self):
self.idProveedor = -1
self.nombre = ''
self.email = ''
self.tlf_fijo = ''
self.tlf_movil = ''
self.tlf_fijo2 = ''
self.tlf_movil2 = ''
self.banco = ''
self.cuenta_bancaria = ''
self.direccion = ''
self.foto_logo = ''
class TbPro
|
veedores:
INSERT = '''
|
insert into Proveedores
( nombre, email, tlf_fijo, tlf_movil, tlf_fijo2, tlf_movil2, banco, cuenta_bancaria, direccion, foto_logo)
values (?,?,?,?,?,?,?,?,?,?)
'''
DELETE = 'delete from Proveedores where idProveedor = ?'
SELECT = 'select * from Proveedores'
UPDATE = '''
update Proveedores set
nombre = ?,
email = ?,
tlf_fijo = ?,
tlf_movil = ?,
tlf_fijo2 = ?,
tlf_movil2 = ?,
banco = ?,
cuenta_bancaria = ?,
direccion = ?,
foto_logo = ?
where idProveedor = ?
'''
def __init__(self):
self.gestorDB = directORM.Db()
def remove(self, proveedor ):
sql = self.DELETE
self.gestorDB.ejecutarSQL(sql, (proveedor.idProveedor))
def get_proveedor(self, idProveedor=None):
sql = self.SELECT + " where idProveedor=" + str(idProveedor) +";"
fila = self.gestorDB.consultaUnicaSQL(sql)
if fila is None:
return None
else:
o = self.mapear_objeto(fila)
return o
def save(self, proveedor=None):
if proveedor is not None:
if self.get_proveedor(proveedor.idProveedor) is None:
sql = self.INSERT
self.gestorDB.ejecutarSQL(sql, (
proveedor.nombre,
proveedor.email,
proveedor.tlf_fijo,
proveedor.tlf_movil,
proveedor.tlf_fijo2,
proveedor.tlf_movil2,
proveedor.banco,
proveedor.cuenta_bancaria,
proveedor.direccion,
proveedor.foto_logo))
else:
sql = self.UPDATE
self.gestorDB.ejecutarSQL(sql, (
proveedor.nombre,
proveedor.email,
proveedor.tlf_fijo,
proveedor.tlf_movil,
proveedor.tlf_fijo2,
proveedor.tlf_movil2,
proveedor.banco,
proveedor.cuenta_bancaria,
proveedor.direccion,
proveedor.foto_logo,
proveedor.idProveedor))
def mapear_objeto(self, fila=None):
if fila is None:
return None
else:
o = Proveedor()
o.idProveedor = fila['idProveedor']
o.nombre = fila['nombre']
o.email = fila['email']
o.tlf_fijo = fila['tlf_fijo']
o.tlf_movil = fila['tlf_movil']
o.tlf_fijo2 = fila['tlf_fijo2']
o.tlf_movil2 = fila['tlf_movil2']
o.banco = fila['banco']
o.cuenta_bancaria = fila['cuenta_bancaria']
o.direccion = fila['direccion']
o.foto_logo = fila['foto_logo']
return o
def get_proveedores(self, filtro=None):
if filtro is None:
sql = self.SELECT
else:
sql = self.SELECT + " where " + filtro
filas = self.gestorDB.consultaSQL(sql)
objetos = list()
for fila in filas:
o = self.mapear_objeto(fila)
objetos.append(o)
return objetos
|
hnikolov/pihdf
|
examples/hsd_inc/src/hsd_inc_beh.py
|
Python
|
mit
| 340
| 0.002941
|
def hsd_inc_b
|
eh(rxd, txd):
'''|
| Specify the behavior, describe data processing; there is no notion
| of clock. Access the in/out interfaces via get() and append()
| methods. The "hsd_inc_beh" function does not return values.
|________'''
if rxd.hasPacket():
data = rxd.get() + 1
txd.append(dat
|
a)
|
miracl/amcl
|
version3/c/config64.py
|
Python
|
apache-2.0
| 19,998
| 0.069207
|
import os
import subprocess
import sys
deltext=""
if sys.platform.startswith("linux") :
deltext="rm"
copytext="cp"
if sys.platform.startswith("darwin") :
deltext="rm"
copytext="cp"
if sys.platform.startswith("win") :
deltext="del"
copytext="copy"
def run_in_shell(cmd):
subprocess.check_call(cmd, shell=True)
def replace(namefile,oldtext,newtext):
f = open(namefile,'r')
filedata = f.read()
f.close()
newdata = filedata.replace(oldtext,newtext)
f = open(namefile,'w')
f.write(newdata)
f.close()
def rsaset(tb,tff,nb,base,ml) :
bd=tb+"_"+base
fnameh="config_big_"+bd+".h"
run_in_shell(copytext+" config_big.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"@NB@",nb)
replace(fnameh,"@BASE@",base)
fnameh="config_ff_"+tff+".h"
run_in_shell(copytext+" config_ff.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"@ML@",ml)
fnamec="big_"+bd+".c"
fnameh="big_"+bd+".h"
run_in_shell(copytext+" big.c "+fnamec)
run_in_shell(copytext+" big.h "+fnameh)
replace(fnamec,"XXX",bd)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ff_"+tff+".c"
fnameh="ff_"+tff+".h"
run_in_shell(copytext+" ff.c "+fnamec)
run_in_shell(copytext+" ff.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="rsa_"+tff+".c"
fnameh="rsa_"+tff+".h"
run_in_shell(copytext+" rsa.c "+fnamec)
run_in_shell(copytext+" rsa.h "+fnameh)
replace(fnamec,"WWW",tff)
replace(fnamec,"XXX",bd)
replace(fnameh,"WWW",tff)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
def curveset(tb,tf,tc,nb,base,nbt,m8,mt,ct,pf,stw,sx,ab,cs) :
bd=tb+"_"+base
fnameh="config_big_"+bd+".h"
run_in_shell(copyte
|
xt+" config_big.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"@NB@",nb)
replace(fnameh,"@BASE@",base)
fnameh="config_field_"+tf+".h"
run_in_shell(copytext+" config_field.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"@NBT@",nbt)
replace(fnameh,"@M8@",m8)
replace(fnameh,"@MT@",mt)
ib=int(base)
inb=int(nb)
inbt=int(nbt)
sh
|
=ib*(1+((8*inb-1)//ib))-inbt
if sh > 30 :
sh=30
replace(fnameh,"@SH@",str(sh))
fnameh="config_curve_"+tc+".h"
run_in_shell(copytext+" config_curve.h "+fnameh)
replace(fnameh,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"@CT@",ct)
replace(fnameh,"@PF@",pf)
replace(fnameh,"@ST@",stw)
replace(fnameh,"@SX@",sx)
replace(fnameh,"@CS@",cs)
replace(fnameh,"@AB@",ab)
fnamec="big_"+bd+".c"
fnameh="big_"+bd+".h"
run_in_shell(copytext+" big.c "+fnamec)
run_in_shell(copytext+" big.h "+fnameh)
replace(fnamec,"XXX",bd)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp_"+tf+".c"
fnameh="fp_"+tf+".h"
run_in_shell(copytext+" fp.c "+fnamec)
run_in_shell(copytext+" fp.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
run_in_shell("gcc -O3 -std=c99 -c rom_field_"+tf+".c")
fnamec="ecp_"+tc+".c"
fnameh="ecp_"+tc+".h"
run_in_shell(copytext+" ecp.c "+fnamec)
run_in_shell(copytext+" ecp.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecdh_"+tc+".c"
fnameh="ecdh_"+tc+".h"
run_in_shell(copytext+" ecdh.c "+fnamec)
run_in_shell(copytext+" ecdh.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
run_in_shell("gcc -O3 -std=c99 -c rom_curve_"+tc+".c")
if pf != "NOT" :
fnamec="fp2_"+tf+".c"
fnameh="fp2_"+tf+".h"
run_in_shell(copytext+" fp2.c "+fnamec)
run_in_shell(copytext+" fp2.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp4_"+tf+".c"
fnameh="fp4_"+tf+".h"
run_in_shell(copytext+" fp4.c "+fnamec)
run_in_shell(copytext+" fp4.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
if cs == "128" :
fnamec="fp12_"+tf+".c"
fnameh="fp12_"+tf+".h"
run_in_shell(copytext+" fp12.c "+fnamec)
run_in_shell(copytext+" fp12.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecp2_"+tc+".c"
fnameh="ecp2_"+tc+".h"
run_in_shell(copytext+" ecp2.c "+fnamec)
run_in_shell(copytext+" ecp2.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="pair_"+tc+".c"
fnameh="pair_"+tc+".h"
run_in_shell(copytext+" pair.c "+fnamec)
run_in_shell(copytext+" pair.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="mpin_"+tc+".c"
fnameh="mpin_"+tc+".h"
run_in_shell(copytext+" mpin.c "+fnamec)
run_in_shell(copytext+" mpin.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="bls_"+tc+".c"
fnameh="bls_"+tc+".h"
run_in_shell(copytext+" bls.c "+fnamec)
run_in_shell(copytext+" bls.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
if cs == "192" :
fnamec="fp8_"+tf+".c"
fnameh="fp8_"+tf+".h"
run_in_shell(copytext+" fp8.c "+fnamec)
run_in_shell(copytext+" fp8.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="fp24_"+tf+".c"
fnameh="fp24_"+tf+".h"
run_in_shell(copytext+" fp24.c "+fnamec)
run_in_shell(copytext+" fp24.h "+fnameh)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnamec,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
replace(fnameh,"ZZZ",tc)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="ecp4_"+tc+".c"
fnameh="ecp4_"+tc+".h"
run_in_shell(copytext+" ecp4.c "+fnamec)
run_in_shell(copytext+" ecp4.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="pair192_"+tc+".c"
fnameh="pair192_"+tc+".h"
run_in_shell(copytext+" pair192.c "+fnamec)
run_in_shell(copytext+" pair192.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="mpin192_"+tc+".c"
fnameh="mpin192_"+tc+".h"
run_in_shell(copytext+" mpin192.c "+fnamec)
run_in_shell(copytext+" mpin192.h "+fnameh)
replace(fnamec,"ZZZ",tc)
replace(fnamec,"YYY",tf)
replace(fnamec,"XXX",bd)
replace(fnameh,"ZZZ",tc)
replace(fnameh,"YYY",tf)
replace(fnameh,"XXX",bd)
run_in_shell("gcc -O3 -std=c99 -c "+fnamec)
fnamec="bls192_"+tc+".
|
GooogIe/VarasTG
|
plugins/btc.py
|
Python
|
gpl-3.0
| 751
| 0.050599
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Btc plugin for Varas
Author: Neon & A Sad Loner
Last modified: November 2016
"""
import urllib2
from plugin import Plugin
name = 'Bitcoin'
class Bitcoin(Plugin):
def __init__(self):
Plugin.__init__(self,"bitcoin","<wallet> Return current balance from a Bitcoin wallet",
|
"A Sad Loners",1.0)
def run(self,address):
#1btc = 100000000satoshi
print "https://blockchain.info/it/q/addressbalance/"+address
try:
api = urllib2.urlopen("https://blockchain.info/it/q/addressbalance/"+address)
except:
return "Unknown Error"
resp = api.read()
satoshi = float(re
|
sp)
btc = satoshi/100000000
return "Balance: " + str(btc)
|
erigones/esdc-ce
|
vms/middleware.py
|
Python
|
apache-2.0
| 1,676
| 0.00537
|
from logging import getLogger
from vms.models import Dc, DummyDc
logger = getLogger(__name__)
class DcMiddleware(object):
"""
Attach dc attribute to each request.
"""
# noinspection PyMethodMayBeStatic
def process_request(self, request):
dc = getattr(request, 'dc', None)
if not dc or dc.is_dummy:
if request.path.startswith('/api/'):
return # Managed by ExpireTokenAuthentication and request_data decorator
if request.user.is_authenticated():
# Set request.dc for logged in user
request.dc = Dc.objects.get_by_id(request.user.current_dc_id)
# Whenever we set a DC we have to set request.dc_user_permissions right after request.dc is available
request.dc_user_permissions = request.dc.get_user_permissions(request.user)
# Log this request only for authenticated users
logger.debug('"%s %s" user="%s" dc="%s" permissions=%s', request.method, request.path,
request.user.username, request.dc.name, request.dc_user_permissions)
else:
try:
# This will get DC also for external views to login and registration pages according to URL
request.dc = Dc.objects.get_by_site(request.META['HTT
|
P_HOST'])
except (KeyError, Dc.DoesNotExist):
request.dc = DummyDc()
# Whenever we set a DC we have to set request.dc_user_permissions right after request.dc is available
request.dc_user_permis
|
sions = frozenset() # External users have no permissions
|
cheelee/ChannelWorm
|
channelworm/fitter/examples/EGL-19-2.py
|
Python
|
mit
| 6,529
| 0.013019
|
"""
Example of using cwFitter to generate a HH model for EGL-19 Ca2+ ion channel
Based on experimental data from doi:10.1083/jcb.200203055
"""
import os.path
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
sys.path.appe
|
nd('../../..')
from channelworm.fitter import *
if __name__ == '__main__':
userData = dict()
cwd=os.getcwd()
csv_path = os.path.dirname(cwd)+'/examples/egl-19-data/egl-19-IClamp-IV.csv'
ref = {'fig':'2B','doi':'10.1083/jcb.200203055'}
x_var = {'type':'Voltage','unit':'V','toSI':1}
y_var = {'type':'Current','unit':'A/F
|
','toSI':75e-12}
IV = {'ref':ref,'csv_path':csv_path,'x_var':x_var,'y_var':y_var}
userData['samples'] = {'IV':IV}
# csv_path_IC_100 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-100pA.csv'
# csv_path_IC_200 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-200pA.csv'
# csv_path_IC_300 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-300pA.csv'
# csv_path_IC_400 = os.path.dirname(cwd)+'egl-19-data//egl-19-IClamp-400pA.csv'
# x_var_IC = {'type':'Time','unit':'s','toSI':1}
# y_var_IC = {'type':'Voltage','unit':'V','toSI':1}
# traces_IC = [{'amp':100e-12,'csv_path':csv_path_IC_100,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':200e-12,'csv_path':csv_path_IC_200,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':300e-12,'csv_path':csv_path_IC_300,'x_var':x_var_IC,'y_var':y_var_IC},
# {'amp':400e-12,'csv_path':csv_path_IC_400,'x_var':x_var_IC,'y_var':y_var_IC}]
# ref_IC = {'fig':'3B','doi':'10.1083/jcb.200203055'}
# IClamp = {'ref':ref_IC,'traces':traces_IC}
# userData['samples'] = {'IClamp':IClamp,'IV':IV}
myInitiator = initiators.Initiator(userData)
sampleData = myInitiator.get_sample_params()
bio_params = myInitiator.get_bio_params()
sim_params = myInitiator.get_sim_params()
myEvaluator = evaluators.Evaluator(sampleData,sim_params,bio_params)
# bio parameters for SLO-2
bio_params['cell_type'] = 'ADAL'
bio_params['channel_type'] = 'EGL-19'
bio_params['ion_type'] = 'Ca'
bio_params['val_cell_params'][0] = 75e-12 # C_mem DOI: 10.1074/jbc.M605814200
bio_params['val_cell_params'][1] = 75e-10 # area DOI: 10.1101/pdb.top066308
bio_params['gate_params'] = {'vda': {'power': 2}}
bio_params['channel_params'] = ['g_dens','e_rev']
bio_params['unit_chan_params'] = ['S/m2','V']
bio_params['min_val_channel'] = [1, 40e-3]
bio_params['max_val_channel'] = [10, 70e-3]
bio_params['channel_params'].extend(['v_half_a','k_a','T_a'])
bio_params['unit_chan_params'].extend(['V','V','s'])
bio_params['min_val_channel'].extend([-10e-3, 4e-3, 0.0001])
bio_params['max_val_channel'].extend([ 30e-3, 20e-3, 2e-3])
# Simulation parameters for EGL-19 I/V
sim_params['v_hold'] = -70e-3
sim_params['I_init'] = 0
sim_params['pc_type'] = 'VClamp'
sim_params['deltat'] = 1e-5
sim_params['duration'] = 0.03
sim_params['start_time'] = 0.002
sim_params['end_time'] = 0.022
sim_params['protocol_start'] = -40e-3
sim_params['protocol_end'] = 80e-3
sim_params['protocol_steps'] = 10e-3
opt = '-pso'
# opt = '-ga'
# opt = None
if len(sys.argv) == 2:
opt = sys.argv[1]
if 'IV' in sampleData and opt is not None:
while True:
q = raw_input("\n\nTry fitting curves (y,n):")
if q == "n":
break # stops the loop
elif q == "y":
# Find initial guess for parameters using curve_fit, leastsq
popt = None
best_candidate = np.asarray(bio_params['min_val_channel']) + np.asarray(bio_params['max_val_channel']) / 2
best_candidate_params = dict(zip(bio_params['channel_params'],best_candidate))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
if 'IV' in sampleData:
popt , p0 = mySimulator.optim_curve(params= bio_params['channel_params'],
best_candidate= best_candidate,
target= [sampleData['IV']['V'],sampleData['IV']['I']])
print 'Params after IV minimization:'
print p0
IV_fit_cost = myEvaluator.iv_cost(popt)
print 'IV cost:'
print IV_fit_cost
if 'VClamp' in sampleData:
VClamp_fit_cost = myEvaluator.vclamp_cost(popt)
print 'VClamp cost:'
print VClamp_fit_cost
vData = np.arange(-0.040, 0.080, 0.001)
Iopt = mySimulator.iv_act(vData,*popt)
plt.plot([x*1 for x in bestSim['V_ss']],bestSim['I_ss'], label = 'Initial parameters', color='y')
plt.plot([x*1 for x in sampleData['IV']['V']],sampleData['IV']['I'], '--ko', label = 'sample data')
plt.plot([x*1 for x in vData],Iopt, color='r', label = 'Fitted to IV curve')
plt.legend()
plt.title("IV Curve Fit")
plt.xlabel('V (mV)')
plt.ylabel('I (A)')
plt.show()
if popt is not None:
if opt == '-pso':
bio_params['min_val_channel'][0:4] = popt[0:4] - abs(popt[0:4]/2)
bio_params['max_val_channel'][0:4] = popt[0:4] + abs(popt[0:4]/2)
else:
bio_params['min_val_channel'][0:4] = popt[0:4]
bio_params['max_val_channel'][0:4] = popt[0:4]
best_candidate_params = dict(zip(bio_params['channel_params'],popt))
cell_var = dict(zip(bio_params['cell_params'],bio_params['val_cell_params']))
mySimulator = simulators.Simulator(sim_params,best_candidate_params,cell_var,bio_params['gate_params'])
bestSim = mySimulator.patch_clamp()
myModelator = modelators.Modelator(bio_params,sim_params)
myModelator.compare_plots(sampleData,bestSim,show=True)
myModelator.ss_plots(bestSim,show=True)
|
hayderimran7/tempest
|
tempest/api/baremetal/admin/test_chassis.py
|
Python
|
apache-2.0
| 3,455
| 0
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest_lib import exceptions as lib_exc
from tempest.api.baremetal.admin import base
from tempest.common.utils import data_utils
from tempest import test
class TestChassis(base.BaseBaremetalTest):
"""Tests for chassis."""
@classmethod
def resource_setup(cls):
super(TestChassis, cls).resource_setup()
_, cls.chassis = cls.create_chassis()
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in six.iteritems(expected):
if key not in ('created_at', 'updated_at'):
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
@test.idempotent_id('7c5a2e09-699c-44be-89ed-2bc189992d42')
def test_create_chassis(self):
descr = data_utils.rand_name('test-chassis')
_, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis['description'], descr)
@test.idempotent_id('cabe9c6f-dc16-41a7-b6b9-0a90c212edd5')
def test_create_chassis_unicode_description(self):
# Use a unicode string for testing:
# 'We ♡ OpenStack in Ukraine'
descr = u'В Україні ♡ OpenStack!'
_, chassis = self.create_chassis(description=descr)
self.assertEqual(chassis
|
['description'], descr)
@test.idempotent_id('c84644df-31c4-49db-a307-8942881f41c0')
def test_show_chassis(self):
_, chassis = self.client.show_chassis(self.chassis['uuid'])
self._assertExpected(self.chassis, chassis)
@test.idempotent_id('29c9cd3f-19b5-417b-9864-99512c3b33b3')
def test_list_chassis(self):
_, body =
|
self.client.list_chassis()
self.assertIn(self.chassis['uuid'],
[i['uuid'] for i in body['chassis']])
@test.idempotent_id('5ae649ad-22d1-4fe1-bbc6-97227d199fb3')
def test_delete_chassis(self):
_, body = self.create_chassis()
uuid = body['uuid']
self.delete_chassis(uuid)
self.assertRaises(lib_exc.NotFound, self.client.show_chassis, uuid)
@test.idempotent_id('cda8a41f-6be2-4cbf-840c-994b00a89b44')
def test_update_chassis(self):
_, body = self.create_chassis()
uuid = body['uuid']
new_description = data_utils.rand_name('new-description')
_, body = (self.client.update_chassis(uuid,
description=new_description))
_, chassis = self.client.show_chassis(uuid)
self.assertEqual(chassis['description'], new_description)
@test.idempotent_id('76305e22-a4e2-4ab3-855c-f4e2368b9335')
def test_chassis_node_list(self):
_, node = self.create_node(self.chassis['uuid'])
_, body = self.client.list_chassis_nodes(self.chassis['uuid'])
self.assertIn(node['uuid'], [n['uuid'] for n in body['nodes']])
|
huran2014/huran.github.io
|
wot_gateway/usr/lib/python2.7/urlparse.py
|
Python
|
gpl-2.0
| 14,414
| 0.002081
|
"""Parse (absolute and relative) URLs.
urlparse module is based upon the following RFC specifications.
RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding
and L. Masinter, January 2005.
RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter
and L.Masinter, December 1999.
RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T.
Berners-Lee, R. Fielding, and L. Masinter, August 1998.
RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zwinski, July 1998.
RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June
1995.
RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M.
McCahill, December 1994
RFC 3986 is considered the current standard and any future changes to
urlparse module should conform with it. The urlparse module is
currently not entirely compliant with this RFC due to defacto
scenarios for parsing, and for backward compatibility purposes, some
parsing quirks from older RFCs are retained. The testcases in
test_urlparse.py provides a good indicator of parsing behavior.
"""
__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag",
"urlsplit", "urlunsplit", "parse_qs", "parse_qsl"]
# A classification of schemes ('' means apply by default)
uses_relative = ['ftp', 'http', 'gopher', 'nntp', 'imap',
'wais', 'file', 'https', 'shttp', 'mms',
'prospero', 'rtsp', 'rtspu', '', 'sftp',
'svn', 'svn+ssh']
uses_netloc = ['ftp', 'http', 'gopher', 'nntp', 'telnet',
'imap', 'wais', 'file', 'mms', 'https', 'shttp',
'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', '',
'svn', 'svn+ssh', 'sftp','nfs','git', 'git+ssh']
non_hierarchical = ['gopher', 'hdl', 'mailto', 'news',
'telnet', 'wais', 'imap', 'snews', 'sip', 'sips']
uses_params = ['ftp', 'hdl', 'prospero', 'http', 'imap',
'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips',
'
|
mms', '', 'sftp']
uses_query = ['http', 'wais', '
|
imap', 'https', 'shttp', 'mms',
'gopher', 'rtsp', 'rtspu', 'sip', 'sips', '']
uses_fragment = ['ftp', 'hdl', 'http', 'gopher', 'news',
'nntp', 'wais', 'https', 'shttp', 'snews',
'file', 'prospero', '']
# Characters valid in scheme names
scheme_chars = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789'
'+-.')
MAX_CACHE_SIZE = 20
_parse_cache = {}
def clear_cache():
"""Clear the parse cache."""
_parse_cache.clear()
class ResultMixin(object):
"""Shared methods for the parsed result objects."""
@property
def username(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
userinfo = userinfo.split(":", 1)[0]
return userinfo
return None
@property
def password(self):
netloc = self.netloc
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)[1]
return None
@property
def hostname(self):
netloc = self.netloc.split('@')[-1]
if '[' in netloc and ']' in netloc:
return netloc.split(']')[0][1:].lower()
elif ':' in netloc:
return netloc.split(':')[0].lower()
elif netloc == '':
return None
else:
return netloc.lower()
@property
def port(self):
netloc = self.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
return int(port, 10)
else:
return None
from collections import namedtuple
class SplitResult(namedtuple('SplitResult', 'scheme netloc path query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
class ParseResult(namedtuple('ParseResult', 'scheme netloc path params query fragment'), ResultMixin):
__slots__ = ()
def geturl(self):
return urlunparse(self)
def urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
tuple = urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = tuple
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
return ParseResult(scheme, netloc, url, params, query, fragment)
def _splitparams(url):
if '/' in url:
i = url.find(';', url.rfind('/'))
if i < 0:
return url, ''
else:
i = url.find(';')
return url[:i], url[i+1:]
def _splitnetloc(url, start=0):
delim = len(url) # position of end of domain part of url, default is end
for c in '/?#': # look for delimiters; the order is NOT important
wdelim = url.find(c, start) # find first of this delim
if wdelim >= 0: # if found
delim = min(delim, wdelim) # use earliest delim position
return url[start:delim], url[delim:] # return (domain, rest)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
try:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path
_testportnum = int(url[i+1:])
except ValueError:
scheme, url = url[:i].lower(), url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and scheme in uses_fragment and '#' in url:
url, fragment = url.split('#', 1)
if scheme in uses_query and '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
def urlunparse(data):
"""Put a parsed URL back together again. This may result in a
slightly different, but equivalent URL, if the URL that was parsed
originally had redundant delimiters, e.g. a ? with an empty query
(the draft states that these are equivalent)."""
scheme, netloc, url, params, query, fragment = data
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, n
|
amsn/amsn2
|
amsn2/plugins/core.py
|
Python
|
gpl-2.0
| 2,184
| 0.009615
|
# plugins module for amsn2
"""
Plugins with amsn2 will be a subclass of the aMSNPlugin() class.
When this module is initially imported it should loa
|
d the plugins from the last session. Done in the init() proc.
Then the GUI should call plugins.loadPlugin(name) or plugins.unLoadPlugin(name) in order to deal with plugins.
"""
# init()
# Called when the plugins module is imported (only for the first time).
# Should find plugins and populate a list ready for getPlugins().
# Should also auto-update all plugins.
def init(): pass
# loadPlugin(plugi
|
n_name)
# Called (by the GUI or from init()) to load a plugin. plugin_name as set in plugin's XML (or from getPlugins()).
# This loads the module for the plugin. The module is then responsible for calling plugins.registerPlugin(instance).
def loadPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# unLoadPlugin(plugin_name)
# Called to unload a plugin. Name is name as set in plugin's XML.
def unLoadPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# registerPlugin(plugin_instance)
# Saves the instance of the plugin, and registers it in the loaded list.
def registerPlugin(plugin_instance):
"""
@type plugin_instance: L{amsn2.plugins.developers.aMSNPlugin}
"""
pass
# getPlugins()
# Returns a list of all available plugins, as in ['Plugin 1', 'Plugin 2']
def getPlugins(): pass
# getPluginsWithStatus()
# Returns a list with a list item for each plugin with the plugin's name, and Loaded or NotLoaded either way.
# IE: [['Plugin 1', 'Loaded'], ['Plugin 2', 'NotLoaded']]
def getPluginsWithStatus(): pass
# getLoadedPlugins()
# Returns a list of loaded plugins. as in ['Plugin 1', 'Plugin N']
def getLoadedPlugins(): pass
# findPlugin(plugin_name)
# Retruns the running instance of the plugin with name plugin_name, or None if not found.
def findPlugin(plugin_name):
"""
@type plugin_name: str
"""
pass
# saveConfig(plugin_name, data)
def saveConfig(plugin_name, data):
"""
@type plugin_name: str
@type data: object
"""
pass
# Calls the init procedure.
# Will only be called on the first import (thanks to python).
init()
|
sam-m888/gprime
|
gprime/plug/menu/__init__.py
|
Python
|
gpl-2.0
| 1,579
| 0
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; ei
|
ther version 2 of the License, or
# (at your ) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
#
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen/plug/menu/__init__.py
"""
The menu package for allowing plugins to specify options in a generic way.
"""
from ._menu import Menu
from ._option import Option
from ._string import StringOption
from ._color import ColorOption
from ._number import NumberOption
from ._text import TextOption
from ._boolean import BooleanOption
from ._enumeratedlist import EnumeratedListOption
from ._filter import FilterOption
from ._person import PersonOption
from ._family import FamilyOption
from ._note import NoteOption
from ._media import MediaOption
from ._personlist import PersonListOption
from ._placelist import PlaceListOption
from ._surnamecolor import SurnameColorOption
from ._destination import DestinationOption
from ._style import StyleOption
from ._booleanlist import BooleanListOption
|
joaormatos/anaconda
|
mmfparser/data/chunkloaders/actions/__init__.py
|
Python
|
gpl-3.0
| 749
| 0.001335
|
# Copyright (c) Mathias Kaerlev 2012.
# This file is part of Anaconda.
# Anaconda is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Anaconda is distributed in th
|
e hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE
|
SS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Anaconda. If not, see <http://www.gnu.org/licenses/>.
from mmfparser.data.chunkloaders.actions.names import *
|
bobbyphilip/learn_python
|
google-python-exercises/basic/wordcount.py
|
Python
|
apache-2.0
| 3,007
| 0.006651
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import s
|
ys
def parse_file(filename):
word_dict= {}
file = open(filename)
for f in file:
words = f.split()
for word in words:
word = word.lower()
|
if word in word_dict:
value = word_dict.get(word)
word_dict[word] = value+1
else:
word_dict[word] = 1
file.close()
return word_dict
def print_words(filename):
word_dict = parse_file(filename)
keys = sorted(word_dict.keys())
for key in keys:
print key,word_dict[key]
def print_top(filename):
word_dict = parse_file(filename)
top_list = sorted(word_dict.items(),key=value_sort,reverse=True)
count =20
if len(top_list)<count:
count = len(top_list)
for word_tuple in top_list[0:count]:
print word_tuple[0],word_tuple[1]
def value_sort(word_tuple):
return word_tuple[1]
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
|
yezune/kicomav
|
Engine/plugins/macro.py
|
Python
|
gpl-2.0
| 19,013
| 0.017935
|
# -*- coding:utf-8 -*-
"""
Copyright (C) 2013 Nurilab.
Author: Kei Choi([email protected])
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
__revision__ = '$LastChangedRevision: 2 $'
__author__ = 'Kei Choi'
__version__ = '1.0.0.%d' % int( __revision__[21:-2] )
__contact__ = '[email protected]'
import os # ÆÄÀÏ »èÁ¦¸¦ À§ÇØ import
import zlib
import hashlib
import struct, mmap
import kernel
import kavutil
import glob
# ¸ÅÅ©·Î ŸÀÔ
X95M = 1
X97M = 2
W95M = 3
W97M = 4
SIGTOOL = False
def IsPrint(char) :
c = ord(char)
if c > 0x20 and c < 0x80 :
return True
else :
return False
def ExtractMacroData_W95M(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
version = struct.unpack('<H', data[2:2+2])[0]
if version > 0xc0 : raise SystemError
exist_macro = struct.unpack('<L', data[0x11C:0x11C+4])[0]
if exist_macro <= 2 : raise SystemError
mac_pos = struct.unpack('<L', data[0x118:0x118+4])[0]
if ord(data[mac_pos]) != 0xFF : raise SystemError
while ord(data[mac_pos + 1]) != 0x01 : # chHplmcd
ch = ord(data[mac_pos + 1])
val = struct.unpack('<H', data[mac_pos+2:mac_pos+4])[0]
if ch == 0x02 : mac_pos += val * 0x4 # chHplacd
elif ch == 0x03 : mac_pos += val * 0xE # chHplkme
elif ch == 0x04 : mac_pos += val * 0xE # chHplkmeBad
elif ch == 0x05 : mac_pos += val * 0xC # chHplmud
elif ch == 0x12 : mac_pos += 2 # chUnnamedToolbar
elif ch == 0x40 : raise SystemError # chTcgEnd
else : raise SystemError
mac_pos += 3
mac_num = struct.unpack('<H', data[mac_pos+2:mac_pos+4])[0]
mac_pos += 4
# print mac_num # ¸ÅÅ©·Î °³¼ö
mac_info = 0 # ¸ÅÅ©·Î ÁÖ¿ä Á¤º¸ °³¼ö
all_code = []
for i in range(mac_num) :
if ord(data[mac_pos + (mac_info * 0x18)]) == 0x55 :
pos = mac_pos + (mac_info * 0x18)
w95m_key = ord(data[pos + 1])
w95m_len = struct.unpack('<L', data[pos+0x0C:pos+0x0C+4])[0]
w95m_pos = struct.unpack('<L', data[pos+0x14:pos+0x14+4])[0]
# print hex(w95m_key), hex(w95m_len), hex(w95m_pos)
if w95m_key != 0 :
w95m_code = ''
for j in range(w95m_len) :
ch = ord(data[w95m_pos + j]) ^ w95m_key
w95m_code += chr(ch)
else :
w95m_code = data[w95m_pos:w95m_pos + w95m_len]
all_code.append(w95m_code)
mac_info += 1
mac_data = all_code
except :
pass
return mac_data
def ExtractMacroData_X95M(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
if ord(data[0]) != 0x01 : raise SystemError
mac_pos = struct.unpack('<L', data[10:10+4])[0]
mac_pos += ( 14L + 14L )
if data_size < mac_pos : raise SystemError
t = struct.unpack('<L', data[mac_pos:mac_pos+4])[0]
mac_pos += t + 28L + 18L - 14L;
if data_size < mac_pos : raise SystemError
mac_pos = struct.unpack('<L', data[mac_pos:mac_pos+4])[0]
mac_pos += 0x3C
if data_size < mac_pos : raise SystemError
# ¸ÅÅ©·Î Á¤º¸ À§Ä¡±îÁö µµÂø
if ord(data[mac_pos]) != 0xFE or ord(data[mac_pos+1]) != 0xCA :
raise SystemError
# ¸ÅÅ©·Î ¼Ò½º ÄÚµåÀÇ ÁÙ ¼ö ¾ò±â
mac_lines = struct.unpack('<H', data[mac_pos+4:mac_pos+6])[0]
if mac_lines == 0 : raise SystemError
mac_pos = mac_pos + 4L + (mac_lines * 12L)
if data_size < mac_pos : raise SystemError
mac_len = struct.unpack('<L', data[mac_pos+6:mac_pos+10])[0]
mac_pos += 10
# print 'ok :', hex(mac_pos), mac_lines, mac_len
# ¸ÅÅ©·Î ´ã±ä ¿µ¿ª ÃßÃâ
if data_size < (mac_pos + mac_len) : raise SystemError
mac_data = data[mac_pos:mac_pos + mac_len]
except :
pass
return mac_data
def ExtractMacroData_Macro97(data) :
mac_data = None
data_size = len(data)
try :
if data_size < 0x200 : raise SystemError
if ord(data[0]) != 0x01 : raise SystemError # ¸ÅÅ©
|
·Î ¾Æ´Ô
if ord(data[9]) == 0x01 and ord(data[10]) == 0x01 :
# ¿¢¼¿ 97 or ¿öµå 97
mac_pos = struct.unpack('<L', data[0xB:0xB+4])[0] + 0x4F
mac_pos += (struct.unpack('<H', data[ma
|
c_pos:mac_pos+2])[0] * 16) + 2
mac_pos += struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 10
mac_pos += struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 81
mac_pos = struct.unpack('<L', data[mac_pos:mac_pos+4])[0] + 60
else :
# ¿¢¼¿ 2000 or ¿öµå 2000 ÀÌ»ó
mac_pos = struct.unpack('<L', data[25:25+4])[0]
mac_pos = (mac_pos - 1) + 0x3D
if ord(data[mac_pos]) != 0xFE or ord(data[mac_pos+1]) != 0xCA :
raise SystemError
mac_lines = struct.unpack('<H', data[mac_pos+4:mac_pos+6])[0]
if mac_lines == 0 : raise SystemError
mac_pos = mac_pos + 6L + (mac_lines * 12L);
Len = struct.unpack('<L', data[mac_pos+6:mac_pos+10])[0]
Off = mac_pos + 10
'''
print 'Macro off :', hex(Off)
print 'Macro len :', Len
fp = open('w97m.dmp', 'wb')
fp.write(data[Off:Off+Len])
fp.close()
'''
mac_data = data[Off:Off+Len]
except :
pass
return mac_data
def GetMD5_Macro(data, target_macro) :
global SIGTOOL
ret = None
try :
max = 0
buf = ''
for i in range(len(data)) :
c = data[i]
if IsPrint(c) :
max += 1
else :
if max > 3 :
if SIGTOOL == True :
print data[i-max:i] # ÆÐÅÏ »ý¼º½Ã ÂüÁ¶ (sigtool)
buf += data[i-max:i]
max = 0
md5 = hashlib.md5()
md5.update(buf)
fmd5 = md5.hexdigest().decode('hex')
if SIGTOOL == True :
str_macro = ['', 'x95m', 'x97m', 'w95m', 'w97m']
print '[%s] %s:%s:%s:' % (str_macro[target_macro], len(buf), md5.hexdigest(), len(data)) # ÆÐÅÏ ÃßÃâ (sigtool)
ret = (len(buf), fmd5, len(data))
except :
pass
return ret
#---------------------------------------------------------------------
# KavMain Ŭ·¡½º
# ŰÄÞ¹é½Å ¿£Áø ¸ðµâÀÓÀ» ³ªÅ¸³»´Â Ŭ·¡½ºÀÌ´Ù.
# ÀÌ Å¬·¡½º°¡ ¾øÀ¸¸é ¹é½Å ¿£Áø Ä¿³Î ¸ðµâ¿¡¼ ·ÎµùÇÏÁö ¾Ê´Â´Ù.
#---------------------------------------------------------------------
class KavMain :
#-----------------------------------------------------------------
# init(self, plugins)
# ¹é½Å ¿£Áø ¸ðµâÀÇ ÃʱâÈ ÀÛ¾÷À» ¼öÇàÇÑ´Ù.
#-----------------------------------------------------------------
def init(self, plugins) : # ¹é½Å ¸ðµâ ÃʱâÈ
try :
self.plugins = plugins
self.x95m_ptn = []
self.x95m_iptn = {}
self.x97m_ptn = []
self.x97m_iptn = {}
self.w95m_ptn = []
self.w95m_iptn = {}
self.w97m_ptn = []
self.w97m_iptn = {}
self.__signum__ = 0
self.__date__ = 0
self.__time__ = 0
self.max_date = 0
if self.__LoadDB__(X95M) == 1 : raise SystemError
|
rebolinho/liveit.repository
|
script.video.F4mProxy/lib/f4mUtils/datefuncs.py
|
Python
|
gpl-2.0
| 2,355
| 0.005096
|
# Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
import os
#Functions for manipulating datetime objects
#CCYY-MM-DDThh:mm:ssZ
def parseDateClass(s):
year, month, day = s.split("-")
day, tail = day[:2], day[2:]
hour, minute, second = tail[1:].split(":")
second = second[:2]
year, month, day = int(year), int(month), int(day)
hour, minute, second = int(hour), int(minute), int(second)
return createDateClass(year, month, day, hour, minute, second)
if os.name != "java":
from datetime import datetime, timedelta
#Helper functions for working with a date/time class
def createDateClass(year, month, day, hour, minute, second):
return datetime(year, month, day, hour, minute, second)
def printDateClass(d):
#Split off fractional seconds, append 'Z'
return d.isoformat().split(".")[0]+"Z"
def getNow():
return datetime.utcnow()
def getHoursFromNow(hours):
return datetime.utcnow() + timedelta(hours=hours)
def getMinutesFromNow(minutes):
return datetime.utcnow() + timedelta(minutes=minutes)
def isDateClassExpired(d):
return d < datetime.utcnow()
def isDateClassBefore(d1, d2):
return d1 < d2
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import
|
jarray
|
def createDateClass(year, month, day, hour, minute, second):
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.set(year, month-1, day, hour, minute, second)
return c
def printDateClass(d):
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \
(d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \
d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND))
def getNow():
c = java.util.Calendar.getInstance()
c.setTimeZone(java.util.TimeZone.getTimeZone("UTC"))
c.get(c.HOUR) #force refresh?
return c
def getHoursFromNow(hours):
d = getNow()
d.add(d.HOUR, hours)
return d
def isDateClassExpired(d):
n = getNow()
return d.before(n)
def isDateClassBefore(d1, d2):
return d1.before(d2)
|
mimepp/umspx
|
htdocs/umsp/plugins/eyetv/eyetv-controller.py
|
Python
|
gpl-3.0
| 1,293
| 0.037123
|
#!/usr/bin/env python
#-*- coding: UTF-8 -*-
from BaseHTTPServer import BaseHTTPRe
|
questHandler, HTTPServer
import subprocess, time
last_ch = 0
class TvServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
global last_ch
cmd = self.path.split('/')
if 'favicon.ico' in cmd:
return
ch = int(cmd[1])
if not ch or ch < 1:
ch = 1
if ch == last_ch:
return
last_ch = ch
p = subprocess.Popen("killall VLC",shell=True)
time.sleep(0.5)
cmd = "/Applications/VLC.app/Contents/MacOS/VLC -
|
I dummy eyetv:// --sout='#std{access=http,mux=ts,dst=<your ip>:8484}' --sout-keep --autocrop --intf dummy --eyetv-channel=%s" % ch
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,close_fds=True)
time.sleep(0.5)
self.send_response(301)
self.send_header("Location", "http://<your ip>:8484?t=%f" % time.time())
self.end_headers()
return
def do_POST(self):
pass
return
def main():
try:
server = HTTPServer(('',8485),TvServerHandler)
print 'server started'
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down'
server.socket.close()
if __name__ == '__main__':
main()
|
ddimensia/RaceCapture_App
|
autosportlabs/uix/track/racetrackview.py
|
Python
|
gpl-3.0
| 1,499
| 0.007338
|
import kivy
kivy.require('1.9.1')
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.app import Builder
from kivy.metrics import dp
from kivy.graphics import Color, Line
from autosportlabs.racecapture.geo.geopoint import GeoPoint
from autosportlabs.uix.track.trackmap import TrackMapView
from utils import *
Builder.load_file('autosportlabs/uix/track/racetrackview.kv')
class RaceTrackView(BoxLayout):
def __init__(self, **kwargs):
super(RaceTrackView, self).__init__(**kwargs)
def loadTrack(self, track):
self.initMap(track)
def initMap(self, track):
self.ids.trackmap.setTrackPoints(track.map_points)
def remove_reference_mark(self, key):
self.ids.trackmap.remove_marker(key)
def add_reference_mark(self, key, color):
trackmap = self.ids.trackmap
if trackmap.get_marker(key) is None:
trackmap.add_marker(key, color)
def update_reference_mark(self, key, geo_point):
self.ids.trackmap.update_marker(key, geo_point)
def add_map_path(self, key, path, color):
self.ids.trackmap.add_path(key, path, color)
def remove_map_p
|
ath(self, key):
self.ids.trackmap.remove_path(key)
def add_heat_va
|
lues(self, key, heat_values):
self.ids.trackmap.add_heat_values(key, heat_values)
def remove_heat_values(self, key):
self.ids.trackmap.remove_heat_values(key)
|
bx5974/sikuli
|
sikuli-script/src/test/python/test_hotkey.py
|
Python
|
mit
| 1,110
| 0.034234
|
import unittest
from sikuli import *
from java.awt.event import KeyEvent
from javax.swing
|
import JFrame
not_pressed = True
WAIT_TIME = 4
def pressed(event):
global not_pressed
not_pressed = False
print "hotkey pressed! %d %d" %(event.modifiers,event.keyCode)
class TestHotkey(unittest.TestCase):
def testAddHotkey(self):
self.assertTrue(Env.addHotkey(Key.F6, 0, pressed))
def testAddHotkeyReal(self):
#f = JFrame("hello")
global no
|
t_pressed
Env.addHotkey(Key.F6, 0, pressed)
self.assertTrue(not_pressed)
count = 0
while not_pressed and count < WAIT_TIME:
count += 1
wait(1)
keyDown(Key.F6)
keyUp(Key.F6)
self.assertFalse(not_pressed)
#f.dispose()
def testRemoveHotkey(self):
self.assertFalse(Env.removeHotkey(Key.F7, 0))
self.assertTrue(Env.addHotkey(Key.F7, 0, pressed))
self.assertTrue(Env.removeHotkey(Key.F7, 0))
def setUp(self):
global not_pressed
not_pressed = True
@classmethod
def tearDownClass(self):
print "clean up"
Env.cleanUp()
|
Capricoinofficial/Capricoin
|
contrib/bitrpc/bitrpc.py
|
Python
|
mit
| 7,842
| 0.038128
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:22713")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:22713")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Capricoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Capricoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmati
|
ons (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
prin
|
t access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
NicovincX2/Python-3.5
|
Physique/Physique quantique/Mécanique quantique/principe_de_superposition_lineaire.py
|
Python
|
gpl-3.0
| 1,519
| 0.003333
|
# -*- coding: utf-8 -*-
import os
"""
Illustration d'un exercice de TD visant à montrer l'évolution t
|
emporelle de la
densité de probabilité pour la superposition équiprobable d'un état n=1 et
d'un état n quelconque (à fixer) pour le puits quantique infini.
Par souci de simplicité, on se débrouille pour que E_1/hbar = 1
"""
import numpy as np # Boîte à outils numériques
import matplotlib.pyplot as plt # Boîte à outils graphiques
from matplotlib import animation # Pour l'animation progressive
# Second état n obser
|
ver (à fixer)
n = 2
# On met tous les paramètres à 1 (ou presque)
t0 = 0
dt = 0.1
L = 1
hbar = 1
h = hbar * 2 * np.pi
m = (2 * np.pi)**2
E1 = h**2 / (8 * m * L**2)
En = n * E1
x = np.linspace(0, L, 1000)
def psi1(x, t):
return np.sin(np.pi * x / L) * np.exp(1j * E1 * t / hbar)
def psin(x, t):
return np.sin(n * np.pi * x / L) * np.exp(1j * En * t / hbar)
def psi(x, t):
return 1 / L**0.5 * (psi1(x, t) + psin(x, t))
fig = plt.figure()
line, = plt.plot(x, abs(psi(x, t0))**2)
plt.title('$t={}$'.format(t0))
plt.ylabel('$|\psi(x,t)|^2$')
plt.xlabel('$x$')
plt.plot(x, abs(psi1(x, t0))**2, '--', label='$|\psi_1|^2$')
plt.plot(x, abs(psin(x, t0))**2, '--', label='$|\psi_{}|^2$'.format(n))
plt.legend()
def init():
pass
def animate(i):
t = i * dt + t0
line.set_ydata(abs(psi(x, t))**2)
plt.title('$t={}$'.format(t))
anim = animation.FuncAnimation(fig, animate, frames=1000, interval=20)
plt.show()
os.system("pause")
|
gazpachoking/Flexget
|
flexget/components/managed_lists/lists/regexp_list/cli.py
|
Python
|
mit
| 5,302
| 0.003584
|
from __future__ import unicode_literals, division, absolute_import
import re
from argparse import ArgumentParser, ArgumentTypeError
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import options
from flexget.event import event
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console
from flexget.utils.database import Session
from . import db
def do_cli(manager, options):
"""Handle regexp-list cli"""
action_map = {
'all': action_all,
'list': action_list,
'add': action_add,
'del': action_del,
'purge': action_purge,
}
action_map[options.regexp_action](options)
def action_all(options):
""" Show all regexp lists """
lists = db.get_regexp_lists()
header = ['#', 'List Name']
table_data = [header]
for regexp_list in lists:
table_data.append([regexp_list.id, regexp_list.name])
table = TerminalTable(options.table_type, table_data)
try:
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_list(options):
"""List regexp list"""
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
header = ['Regexp']
table_data = [header]
regexps = db.get_regexps_by_list_id(
regexp_list.id, order_by='added', descending=True, session=session
)
for regexp in regexps:
regexp_row = [regexp.regexp or '']
table_data.append(regexp_row)
try:
table = TerminalTable(options.table_type, table_data)
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_add(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}, creating'.format(options.list_name))
regexp_list = db.create_list(options.list_name, session=session)
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if not regexp:
console("Adding regexp {} to list {}".format(options.regexp, regexp_list.name))
db.add_to_list_by_name(regexp_list.name, options.regexp, session=session)
console(
'Successfully added regexp {} to regexp list {} '.format(
options.regexp, regexp_list.name
)
)
else:
console("Regexp {} already exists in list {}".format(options.regexp, regexp_list.name))
def action_del(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if regexp:
console('Removing regexp {} from list {}'.format(options.regexp, options.list_name))
session.delete(regexp)
else:
console(
'Could not find regexp {} in list {}'.format(
options.movie_title, options.list_name
)
)
return
def action_purge(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
console('Deleting list %s' % options.list_name)
session.delete(regexp_list)
def regexp_type(regexp):
try:
re.compile(regexp)
return regexp
except re.error as e:
raise ArgumentTypeError(e)
@event('options.register')
def register_parser_arguments():
# Common option to be used in multiple subparsers
regexp_parser = ArgumentParser(add_help=False)
regexp_parser.add_argument('regexp', type=regexp_type, help="The regexp")
list_name_parser = ArgumentParser(add_help=False)
list_name_parser.add_argument(
'list_name', nargs='?', help='Name o
|
f regexp list to operate on', default='regexps'
)
# Register subcommand
parser = options.register_command('regexp-list', do_cli, help='View and manage regexp lists')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='regexp_action')
|
subparsers.add_parser('all', parents=[table_parser], help='Shows all existing regexp lists')
subparsers.add_parser(
'list', parents=[list_name_parser, table_parser], help='List regexp from a list'
)
subparsers.add_parser(
'add', parents=[list_name_parser, regexp_parser], help='Add a regexp to a list'
)
subparsers.add_parser(
'del', parents=[list_name_parser, regexp_parser], help='Remove a regexp from a list'
)
subparsers.add_parser(
'purge', parents=[list_name_parser], help='Removes an entire list. Use with caution!'
)
|
luzeduardo/antonov225
|
flyer/flyerapp/migrations/0008_auto_20150630_1859.py
|
Python
|
gpl-2.0
| 924
| 0.002165
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('flyerapp', '0007_auto_20150629_1135'),
]
operations = [
|
migrations.AddField(
model_name='schedule',
name='logic_delete',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='flight',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180047), null=True, verbose_name=b'da
|
te published'),
),
migrations.AlterField(
model_name='schedule',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 6, 30, 18, 59, 57, 180807), null=True, verbose_name=b'date published'),
),
]
|
hackupc/backend
|
applications/apps.py
|
Python
|
mit
| 506
| 0.001976
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ApplicationsConfig(AppConfig):
name = 'applications'
def ready(self):
super(ApplicationsConfig, self).ready()
from applications.signals import create_draft_application, clean_draft_application, \
auto_delete_file_on_change, auto_delete_
|
file_on_delete
create_draft_application
clean_draft_application
auto_delete_f
|
ile_on_change
auto_delete_file_on_delete
|
pyfa-org/eos
|
eos/eve_obj/custom/self_skillreq/__init__.py
|
Python
|
lgpl-3.0
| 3,165
| 0.000632
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from logging import getLogger
from eos.const.eos import EffectBuildStatus
from e
|
os.const.eve import AttrId
from eos.const.eve import EffectId
from eos.eve_obj.effect import EffectFactory
from .modifier import make_drone_dmg_modifiers
from .modifier import make_missile_dmg_modifiers
from .modifier import make_missile_rof_modifiers
logger = getLogger(__name__)
def add_missile_rof_modifiers(effect):
if effect.modifiers:
msg = 'missile self skillreq rof effect has modifiers, overwriting them'
logger.warning(msg)
|
effect.modifiers = make_missile_rof_modifiers()
effect.build_status = EffectBuildStatus.custom
def _add_missile_dmg_modifiers(effect, attr_id):
if effect.modifiers:
msg = f'missile self skillreq damage effect {effect.id} has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_missile_dmg_modifiers(attr_id)
effect.build_status = EffectBuildStatus.custom
def add_missile_dmg_modifiers_em(effect):
_add_missile_dmg_modifiers(effect, AttrId.em_dmg)
def add_missile_dmg_modifiers_therm(effect):
_add_missile_dmg_modifiers(effect, AttrId.therm_dmg)
def add_missile_dmg_modifiers_kin(effect):
_add_missile_dmg_modifiers(effect, AttrId.kin_dmg)
def add_missile_dmg_modifiers_expl(effect):
_add_missile_dmg_modifiers(effect, AttrId.expl_dmg)
def add_drone_dmg_modifiers(effect):
if effect.modifiers:
msg = 'drone self skillreq dmg effect has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_drone_dmg_modifiers()
effect.build_status = EffectBuildStatus.custom
EffectFactory.register_instance_by_id(
add_missile_rof_modifiers,
EffectId.self_rof)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_em,
EffectId.missile_em_dmg_bonus)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_therm,
EffectId.missile_therm_dmg_bonus)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_kin,
EffectId.missile_kin_dmg_bonus2)
EffectFactory.register_instance_by_id(
add_missile_dmg_modifiers_expl,
EffectId.missile_expl_dmg_bonus)
EffectFactory.register_instance_by_id(
add_drone_dmg_modifiers,
EffectId.drone_dmg_bonus)
|
andrefbsantos/Tuxemon
|
tuxemon/core/components/log.py
|
Python
|
gpl-3.0
| 1,644
| 0.000608
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY W
|
ARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <[email protected]>
#
#
# core.components.log Logging module.
#
#
import sys
import logging
from . import config as Config
# read the configuration file
c
|
onfig = Config.Config()
loggers = {}
# Set up logging if the configuration has it enabled
if config.debug_logging == "1":
for logger_name in config.loggers:
# Enable logging
logger = logging.getLogger(logger_name)
logger.setLevel(int(config.debug_level))
log_hdlr = logging.StreamHandler(sys.stdout)
log_hdlr.setLevel(logging.DEBUG)
log_hdlr.setFormatter(logging.Formatter("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
logger.addHandler(log_hdlr)
loggers[logger_name] = logger
|
intuinno/vistalk
|
yelpvis/models.py
|
Python
|
mit
| 881
| 0.026107
|
from django.db import models
from django.core.urlresolvers import reverse
from jsonfield import JSONField
import collections
# Create your models here.
class YelpvisState(models.Model):
title=models.CharField(max_length=255)
slug=models.SlugField(unique=True,max_length=255)
description = models.CharField(max_length=255)
|
content=models.TextField()
published=models.BooleanField(default=True)
created=models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-created']
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('blog:post', args=[self.slug])
class Y
|
elpvisCommentState(models.Model):
content=models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
vis_state = JSONField()
class Meta:
ordering = ['-pub_date']
def __unicode__(self):
return self.content
|
fernandog/Medusa
|
tests/legacy/db_tests.py
|
Python
|
gpl-3.0
| 1,788
| 0.001119
|
# coding=UTF-8
# Author: Dennis Lutter <[email protected]>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
"""Test show database functionality."""
from __future__ import print_function
import threading
from tests.legacy import test_lib as test
class DBBasicTests(test.AppTestDBCase):
"""Perform basic database tests."""
def setUp(self):
"""Unittest set up."""
super(DBBasicTests, self).setUp()
self.db = test.db.DBConnection()
def test_select(self):
self.db.select("SELECT * FROM tv_episodes WHERE showid
|
= ? AND location != ''", [0000])
class DBMultiTests(test.AppTestDBCase):
"""Perform multi-threaded test of the database."""
def setUp(self):
"""Unittest set up."""
super(DBMultiTests, self).setUp()
self.db = test.db.DBConnection()
def select(self):
"""Select from the database."""
self.db.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [0000])
def test_threaded(self):
"""Test multi-threaded selection from th
|
e database."""
for _ in range(4):
thread = threading.Thread(target=self.select)
thread.start()
|
dstrockis/outlook-autocategories
|
lib/unit_tests/test__helpers.py
|
Python
|
apache-2.0
| 6,951
| 0
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_PropertyMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage._helpers import _PropertyMixin
return _PropertyMixin
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _derivedClass(self, path=None):
class Derived(self._get_target_class()):
client = None
@property
def path(self):
return path
return Derived
def test_path_is_abstract(self):
mixin = self._make_o
|
ne()
self.assertRaises(NotImplementedError, lambda: mixin.path)
def test_client_is_abstract(self):
mixin = self._make_one()
self.assertRaises(NotImplementedError, lambda: mixin.client)
def test_reload(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure cha
|
nges is not a set, so we can observe a change.
derived._changes = object()
derived.reload(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'noAcl'})
# Make sure changes get reset by reload.
self.assertEqual(derived._changes, set())
def test__set_properties(self):
mixin = self._make_one()
self.assertEqual(mixin._properties, {})
VALUE = object()
mixin._set_properties(VALUE)
self.assertEqual(mixin._properties, VALUE)
def test__patch_property(self):
derived = self._derivedClass()()
derived._patch_property('foo', 'Foo')
self.assertEqual(derived._properties, {'foo': 'Foo'})
def test_patch(self):
connection = _Connection({'foo': 'Foo'})
client = _Client(connection)
derived = self._derivedClass('/path')()
# Make sure changes is non-empty, so we can observe a change.
BAR = object()
BAZ = object()
derived._properties = {'bar': BAR, 'baz': BAZ}
derived._changes = set(['bar']) # Ignore baz.
derived.patch(client=client)
self.assertEqual(derived._properties, {'foo': 'Foo'})
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/path')
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
# Since changes does not include `baz`, we don't see it sent.
self.assertEqual(kw[0]['data'], {'bar': BAR})
# Make sure changes get reset by patch().
self.assertEqual(derived._changes, set())
class Test__scalar_property(unittest.TestCase):
def _call_fut(self, fieldName):
from google.cloud.storage._helpers import _scalar_property
return _scalar_property(fieldName)
def test_getter(self):
class Test(object):
def __init__(self, **kw):
self._properties = kw.copy()
do_re_mi = self._call_fut('solfege')
test = Test(solfege='Latido')
self.assertEqual(test.do_re_mi, 'Latido')
def test_setter(self):
class Test(object):
def _patch_property(self, name, value):
self._patched = (name, value)
do_re_mi = self._call_fut('solfege')
test = Test()
test.do_re_mi = 'Latido'
self.assertEqual(test._patched, ('solfege', 'Latido'))
class Test__base64_md5hash(unittest.TestCase):
def _call_fut(self, bytes_to_sign):
from google.cloud.storage._helpers import _base64_md5hash
return _base64_md5hash(bytes_to_sign)
def test_it(self):
from io import BytesIO
BYTES_TO_SIGN = b'FOO'
BUFFER = BytesIO()
BUFFER.write(BYTES_TO_SIGN)
BUFFER.seek(0)
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(SIGNED_CONTENT, b'kBiQqOnIz21aGlQrIp/r/w==')
def test_it_with_stubs(self):
import mock
class _Buffer(object):
def __init__(self, return_vals):
self.return_vals = return_vals
self._block_sizes = []
def read(self, block_size):
self._block_sizes.append(block_size)
return self.return_vals.pop()
BASE64 = _Base64()
DIGEST_VAL = object()
BYTES_TO_SIGN = b'BYTES_TO_SIGN'
BUFFER = _Buffer([b'', BYTES_TO_SIGN])
MD5 = _MD5(DIGEST_VAL)
patch = mock.patch.multiple(
'google.cloud.storage._helpers',
base64=BASE64, md5=MD5)
with patch:
SIGNED_CONTENT = self._call_fut(BUFFER)
self.assertEqual(BUFFER._block_sizes, [8192, 8192])
self.assertIs(SIGNED_CONTENT, DIGEST_VAL)
self.assertEqual(BASE64._called_b64encode, [DIGEST_VAL])
self.assertEqual(MD5._called, [None])
self.assertEqual(MD5.hash_obj.num_digest_calls, 1)
self.assertEqual(MD5.hash_obj._blocks, [BYTES_TO_SIGN])
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MD5Hash(object):
def __init__(self, digest_val):
self.digest_val = digest_val
self.num_digest_calls = 0
self._blocks = []
def update(self, block):
self._blocks.append(block)
def digest(self):
self.num_digest_calls += 1
return self.digest_val
class _MD5(object):
def __init__(self, digest_val):
self.hash_obj = _MD5Hash(digest_val)
self._called = []
def __call__(self, data=None):
self._called.append(data)
return self.hash_obj
class _Base64(object):
def __init__(self):
self._called_b64encode = []
def b64encode(self, value):
self._called_b64encode.append(value)
return value
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
DreamerBear/awesome-py3-webapp
|
www/biz/__init__.py
|
Python
|
gpl-3.0
| 181
| 0
|
#!/usr/bin/env python
|
3
# -*- coding: utf-8 -*-
# @Date : 2017/10/18 17:13
# @Author : xxc727xxc ([email protected])
# @Version : 1.0.0
i
|
f __name__ == '__main__':
pass
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0025_auto__add_field_diskoffering_available_size_kb.py
|
Python
|
bsd-3-clause
| 11,926
| 0.00763
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DiskOffering.available_size_kb'
db.add_column(u'physical_diskoffering', 'available_size_kb',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DiskOffering.available_size_kb'
db.delete_column(u'physical_diskoffering', 'available_size_kb')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.relat
|
ed.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
|
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_n
|
erwan-lemonnier/klue-microservice
|
pymacaron/resources.py
|
Python
|
bsd-2-clause
| 1,638
| 0.003053
|
from pymacaron.log import pymlogger
import multiprocessing
from math import ceil
from pymacaron.config import get_config
log = pymlogger(__name__)
# Calculate resources available on this container hardware.
# Used by pymacaron-async, pymacaron-gcp and pymacaron-docker
def get_gunicorn_worker_count(cpu_count=None):
"""Return the number of gunicorn worker to
|
run on this container hardware"""
if cpu_count:
return cpu_count * 2 + 1
return multiprocessing.cpu_count() * 2 + 1
def get_celery_worker_count(cpu_count=None):
"""Return the number of celery workers to run on this container hardware"""
conf = get_config()
if hasattr(conf, 'worker_count'):
# Start worker_count parrallel celery workers
return conf.worker_count
if cpu_count:
return cpu_count * 2
c = multiproc
|
essing.cpu_count() * 2
# Minimum worker count == 2
if c < 2:
c == 2
return c
# Memory required, in Mb, by one gunicorn or celery worker:
GUNICORN_WORKER_MEM = 400
CELERY_WORKER_MEM = 200
def get_memory_limit(default_celery_worker_count=None, cpu_count=None):
"""Return the memory in Megabytes required to run pymacaron on this container hardware"""
# Let's calculate how much memory this pymacaron config requires for 1 container
celery_count = default_celery_worker_count
if not celery_count:
celery_count = get_celery_worker_count(cpu_count=cpu_count)
return ceil(get_gunicorn_worker_count(cpu_count=cpu_count) * GUNICORN_WORKER_MEM + celery_count * CELERY_WORKER_MEM)
def get_celery_worker_memory_limit():
return CELERY_WORKER_MEM * 1024
|
freedomboxtwh/Plinth
|
plinth/modules/help/help.py
|
Python
|
agpl-3.0
| 2,917
| 0
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Help module for Plinth.
"""
import os
from apt.cache import Cache
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _, ugettext_lazy
from stronghold.decorators import public
from plinth import cfg, __version__
def init():
"""Initialize the Help module"""
menu = cfg.main_menu.add_urlname(ugettext_lazy('Documentation'),
'glyphicon-book', 'help:index')
menu.add_urlname(ugettext_lazy('Where to Get Help'), 'glyphicon-search',
'help:index_explicit', 5)
menu.add_urlname(ugettext_lazy('Manual'), 'glyphicon-info-sign',
'help:manual', 10)
menu.add_urlname(ugettext_lazy('About'), 'glyphicon-star', 'help:about',
100)
@public
def index(request):
"""Serve the index page"""
return TemplateResponse(request, 'help_index.html',
{'title': _('Documentation and FAQ')})
@public
def about(request):
"""Serve the about page"""
cache = Cache()
plinth = cache['plinth']
context = {
'title': _('About {box_name}').format(box_name=_(cfg.box_name)),
'version': __version__,
'new_version': not plinth.candidate.is_installed
}
return TemplateResponse(request, 'help_about.html', context)
@public
def manual(request):
"""Serve the manual page from the 'doc' directory"""
try:
with open(os.path.join(cfg.doc_dir, 'freedombox-manual.part.html'),
'r', encoding='utf-8') as input_file:
|
content = input_file.read()
except IOError:
raise Http404
return TemplateResponse(
request, 'help_manual.html',
{'title': _('{box_name} Manual').format(box_name=_(cfg.bo
|
x_name)),
'content': content})
def status_log(request):
"""Serve the last 100 lines of plinth's status log"""
num_lines = 100
with open(cfg.status_log_file, 'r') as log_file:
data = log_file.readlines()
data = ''.join(data[-num_lines:])
context = {
'num_lines': num_lines,
'data': data
}
return TemplateResponse(request, 'statuslog.html', context)
|
jwvhewitt/dmeternal
|
old_game/container.py
|
Python
|
gpl-2.0
| 8,515
| 0.00916
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Anne Archibald <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
class ContainerError(ValueError):
"""Error signaling something went wrong with container handling"""
pass
class Container(object):
"""A container is an object that manages objects it contains.
The objects in a container each have a .container attribute that
points to the container. This attribute is managed by the container
itself.
This class is a base class that provides common container functionality,
to be used to simplify implementation of list and dict containers.
"""
def _set_container(self, item):
if hasattr( item, "container" ) and item.container not in (None,self):
# raise ContainerError("Item %s was added to container %s but was already in container %s" % (item, self, item.container))
item.container.remove( item )
item.container = self
def _unset_container(self, item):
if item.container is not self:
raise ContainerError("Item %s was removed from container %s but was not in it" % (item, self))
item.container = None
def _set_container_multi(self, items):
"""Put items in the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._set_container(i)
r.append(i)
r = None
finally: # Make sure items don't get added to this if any fail
if r is not None:
for i in r:
try:
self._unset_container(i)
except ContainerError:
pass
def _unset_container_multi(self, items):
"""Remove items from the container in an all-or-nothing way"""
r = []
try:
for i in items:
self._unset_container(i)
r.append(i)
r = None
finally:
if r is not None:
for i in r:
try:
self._set_container(i)
except ContainerError:
pass
class ContainerList(list,Container):
"""A ContainerList is a list whose children know they're in it.
Each element in the ContainerList has a .container attribute which points
to the ContainerList itself. This container pointer is maintained automatically.
"""
def __init__(self, items=[], owner=None):
list.__init__(self, items)
self._set_container_multi(items)
self.owner = owner
def __repr__(self):
return "<CL %s>" % list.__repr__(self)
def append(self, item):
self._set_container(item)
list.append(self,item)
def extend(self, items):
self._set_container_multi(items)
list.extend(self,items)
def insert(self, i, item):
self._set_container(item)
list.insert(self,i,item)
def remove(self, item):
self._unset_container(item)
list.remove(self,item)
def pop(self, i=-1):
self._unset_container(self[i])
return list.pop(self,i)
# These don't work because they make the elements part of more than one list, or one list more than once
def __add__(self, other):
raise NotIm
|
plementedError
def __radd__(self, other):
raise NotImplementedError
def __imul__(self,other):
raise NotImplementedError
def __m
|
ul__(self, other):
raise NotImplementedError
def __rmul__(self,other):
raise NotImplementedError
# only works if other is not also a Container
def __iadd__(self, other):
self.extend(other)
return self
def __setitem__(self, key, value):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
try:
self._set_container_multi(value)
except ContainerError:
self._set_container_multi(self[key])
raise
else:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
self._set_container(self[key])
raise
list.__setitem__(self,key,value)
def __delitem__(self, key):
# FIXME: check slices work okay
if isinstance(key, slice):
self._unset_container_multi(self[key])
else:
self._unset_container(self[key])
list.__delitem__(self,key)
# Needed for python2, forbidden for python3
def __delslice__(self,i,j):
del self[slice(i,j,None)]
class ContainerDict(dict,Container):
"""A ContainerDict is a dict whose children know they're in it.
Each element in the ContainerDict has a .container attribute which points
to the ContainerDict itself. This container pointer is maintained automatically.
"""
def __init__(self, contents=None, **kwargs):
if contents is None:
dict.__init__(self, **kwargs)
else:
dict.__init__(self, contents, **kwargs)
self._set_container_multi(list(self.values()))
def __repr__(self):
return "<CD %s>" % dict.__repr__(self)
def __setitem__(self, key, value):
if key in self:
self._unset_container(self[key])
try:
self._set_container(value)
except ContainerError:
if key in self:
self._set_container(self[key])
raise
dict.__setitem__(self,key,value)
def __delitem__(self, key):
if key in self:
self._unset_container(self[key])
dict.__delitem__(self,key)
def pop(self, key):
if key in self:
self._unset_container(self[key])
return dict.pop(self,key)
def popitem(self):
key, value = dict.popitem(self)
self._unset_container(value)
return key, value
def setdefault(self, key, default=None):
if key not in self:
self._set_container(default)
dict.setdefault(self, key, default)
def update(self, other):
for (k,v) in list(other.items()):
self[k] = v
if __name__=='__main__':
class Gear(object):
def __init__(self, name, container=None):
self.name = name
self.container = container
def __repr__(self):
return "<G "+str(self.name)+">"
gears = [Gear(n) for n in range(10)]
a = Gear("A")
b = Gear("B")
c = Gear("C")
d = Gear("D")
e = Gear("E")
p = ContainerList([a,b,c])
print(p)
try:
p.append(a)
except ContainerError as err:
print(err)
else:
raise AssertionError
print(p[1])
print(p[::2])
p[1] = d
print(p)
p[1] = b
p[::2] = [d,e]
print(p)
del p[:]
p2 = ContainerList([a,b,c])
print(p2)
p2.extend([d,e])
print(p2)
print(p2.pop())
print(p2)
p2.remove(d)
print(p2)
p2 += [d,e]
print(p2)
try:
d = ContainerDict(a=a, b=b, c=c)
except ContainerError as err:
print(err)
else:
raise AssertionError
del p2[:]
d = ContainerDict(a=a, b=b, c=c)
print(d)
print(d["a"])
d["a"] = a
try:
|
MediffRobotics/DeepRobotics
|
DeepLearnMaterials/tutorials/tensorflowTUT/tf5_example2/for_you_to_practice.py
|
Python
|
gpl-3.0
| 640
| 0.010938
|
# View more python tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
# create data
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0
|
.3
### create tensorflo
|
w structure start ###
### create tensorflow structure end ###
# Very important
for step in range(201):
pass
|
avicorp/firstLook
|
src/algorithms/check_input_fields.py
|
Python
|
apache-2.0
| 11,258
| 0.004264
|
# ---Libraries---
# Standard library
import os
import sys
import math
# Third-party libraries
import cv2
import numpy as np
import scipy.ndimage as ndimage
# Private libraries
import compute_OBIFs
import color_BIFs
sys.path.append(os.path.abspath("../"))
import utils
template_png='algorithms/inputFields/template.png'
amount_input_png='algorithms/inputFields/amount_template.png'
date_input_png='algorithms/inputFields/date_template.png'
def searchTemplateCenterPointIn(check, template, searchMap, step=1, threshold=-9999999):
fromIndex = [int(template.shape[0] / 2 + 1), int(template.shape[1] / 2 + 1)]
toIndex = [int(searchMap.shape
|
[0] - template.shape[0] / 2), int(searchMap.shape[1] - template.shape[1] / 2)]
radios = [int(template.shape[0] / 2), int(template.shape[1] / 2)]
maxConv = threshold
maxCenterConv = [0, 0]
for centerConvX in range(fromIndex[0], toIndex[0]):
for centerConvY in range(fromIndex[1], toIndex[1]):
if searchMap[centerConvX, centerConvY] == 1:
convMatrix = check[centerConvX
|
- radios[0]:centerConvX + radios[0] + template.shape[0]%2,
centerConvY - radios[1]:centerConvY + radios[1] + template.shape[1]%2] \
* template
conv = np.sum(convMatrix)
if maxConv < conv:
maxConv = conv
maxCenterConv = [centerConvX, centerConvY]
print maxConv
return maxCenterConv
def normalize(image):
binary = np.array(image, dtype=np.int8, copy=True)
binary[image == 0] = 1
binary[image == 255] = -1
return binary
def binaryTemplate():
img_template = cv2.imread(template_png)
return utils.sanitize(img_template)
def dateTemplate():
img_template = cv2.imread(date_input_png)
return utils.sanitize(img_template)
def amountTemplate():
img_template = cv2.imread(amount_input_png)
return utils.sanitize(img_template)
def binaryTemplateFix():
img_template = cv2.imread(template_png)
return utils.sanitize(img_template, False)
# Extract input fields, the Region Of Interest (ROI), from bank check.
def extract(check):
template = binaryTemplate()
templateRadios = [template.shape[0] / 2, template.shape[1] / 2]
checkMap = np.array(check, dtype=np.int8)
checkMap[check == 0] = 1
checkMap[check > 0] = -1
searchFrom = [check.shape[0] / 2 - 10, check.shape[1] / 2 - 10]
searchTo = [check.shape[0] / 2 + 100, check.shape[1] / 2 + 10]
searchMatrix = np.zeros(check.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(checkMap, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - templateRadios[0] - 1), int(center[0] + templateRadios[0])],
[int(center[1] - templateRadios[1]), int(center[1] + templateRadios[1])]]
roi = check[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]]
return roi
def extractAmount(input_fields, clean = True):
template = amountTemplate()
template[template == -1] = 0
input_fields_map = normalize(input_fields)
amountX = 1018
amountY = 96
searchFrom = [amountY - 50, amountX - 50]
searchTo = [amountY + 50, amountX + 50]
searchMatrix = np.zeros(input_fields.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(input_fields_map, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - template.shape[0]/2), int(center[0] + template.shape[0]/2)],
[int(center[1] - template.shape[1]/2), int(center[1] + template.shape[1]/2)]]
template[template == 0] = -1
template[template == 1] = 0
template[:,0:35] = 0
input_fields_clean = cleanBy(input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]], template)
inputFieldsRectangle[1][1] = input_fields.shape[1] if inputFieldsRectangle[1][1] + 50 > input_fields.shape[1] \
else inputFieldsRectangle[1][1] + 50
inputFieldsRectangle[0][0] -= 20
roi = np.copy(input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]])
if clean:
roi[20:roi.shape[0], 0:input_fields_clean.shape[1]] = input_fields_clean
return roi
def extractDate(input_fields):
template = dateTemplate()
input_fields_map = normalize(input_fields)
amountX = 683
amountY = 190
searchFrom = [amountY - 100, amountX - 100]
searchTo = [amountY + 100, amountX + 100]
searchMatrix = np.zeros(input_fields.shape, np.uint8)
searchMatrix[int(searchFrom[0]):int(searchTo[0]), int(searchFrom[1]):int(searchTo[1])] = 1
center = searchTemplateCenterPointIn(input_fields, template, searchMatrix)
inputFieldsRectangle = [[int(center[0] - 50), int(center[0] + 50)],
[int(center[1] - 113), int(center[1] + 113)]]
roi = input_fields[inputFieldsRectangle[0][0]:inputFieldsRectangle[0][1],
inputFieldsRectangle[1][0]:inputFieldsRectangle[1][1]]
return roi
def clean(check):
input_fields = extract(check)
input_fields_OBIFs = compute_OBIFs.computeOBIFs(input_fields)
empty_input_fields = binaryTemplateFix()
empty_input_fields_OBIFs = compute_OBIFs.computeOBIFs(empty_input_fields)
# input_fields[diff_map_not] = 255
input_fields_clone = cleanBy(input_fields, empty_input_fields)
# clean_input_fields_OBIFs = compute_OBIFs.computeOBIFs(input_fields)
diff_map = np.equal(input_fields_OBIFs, empty_input_fields_OBIFs)
# diff_map_clean = np.equal(input_fields_OBIFs, clean_input_fields_OBIFs)
# diff_map_not = np.not_equal(input_fields_OBIFs, empty_input_fields_OBIFs)
# input_fields_OBIFs[diff_map] = 30
# empty_input_fields_OBIFs[diff_map] = 30
if_obifs_color = color_BIFs.bifs_to_color_image(input_fields_OBIFs)
eif_obifs_color = color_BIFs.bifs_to_color_image(empty_input_fields_OBIFs)
# cif_obifs_color = color_BIFs.bifs_to_color_image(clean_input_fields_OBIFs)
if_obifs_color[diff_map] = 30
if_obifs_color[empty_input_fields_OBIFs == 0] = 30
eif_obifs_color[diff_map] = 30
# cif_obifs_color[diff_map_clean] = 30
cv2.imwrite("obifInput.png", if_obifs_color)
cv2.imwrite("obifEmptyInput.png", eif_obifs_color)
# cv2.imwrite("obifCleanInput.png", cif_obifs_color)
# diff_map[empty_input_fields != 0] = False
return input_fields_clone
def cleanBy(image, template_image):
image_clone = np.copy(image)
image_clone[template_image == 0] = 255
# kernel = np.zeros((5, 5), np.float16)
# kernel[1][1] = 1/6.
# kernel[1][2] = 1/6.
# kernel[1][3] = 1/6.
# kernel[3][2] = 1/6.
# kernel[3][2] = 1/6.
# kernel[3][3] = 1/6.
#
#
# pixel_matrix = ndimage.filters.convolve(image_clone, kernel, mode='constant')
# cv2.imwrite('test1.png', pixel_matrix)
#
# pixel_matrix[template_image != 0] = 255
return image_clone
# Test
# img_template = cv2.imread('inputFields/templateFix1.png')
#
# image = np.array(img_template, dtype=np.uint8)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# ret3, invers1 = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret, invers2 = cv2.threshold(invers1, 127, 255, cv2.THRESH_BINARY_INV)
# blur1 = cv2.GaussianBlur(image, (11, 11), 0)
# blur2 = cv2.GaussianBlur(image, (21, 21), 0)
# blur3 = cv2.GaussianBlur(image, (31, 31), 0)
# blur4 = cv2.GaussianBlur(image, (41, 41), 0)
#
#
# blur1 = np.array(blur1, dtype=np.uint8)
# blur2 = np.array(blur2, dtype=np.uint8)
# blur3 = np.array(blur3, dtype=np.uint8)
# blur4 = np.array(blur4, dtype=np.uint8)
#
# blur1 = cv2.cvtColor(blur1, cv2.COLOR_BGR2GRAY)
# blur2 = cv2.cvtColor(blur2, cv2.COLOR_BGR2GRAY)
# blur3 = cv2.cvtColor(blur3, cv2.COLOR_BGR2GRAY)
# blur4 = cv2.cvtColor(blur4, cv2.COLOR_BGR2G
|
wbinventor/openmc
|
docs/sphinxext/notebook_sphinxext.py
|
Python
|
mit
| 3,717
| 0.001345
|
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
try:
from docutils.utils.error_reporting import ErrorString # the new way
except ImportError:
from docutils.error_reporting import ErrorString # the old way
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from nbconvert import html
class Notebook(Directive):
"""Use nbconvert to insert a notebook into the environment.
This is based on the Raw directive in docutils
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
has_content = False
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# set up encoding
attributes = {'format': 'html'}
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler = self.state.document.settings.input_encoding_error_handler
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_path = os.path.normpath(os.path.join(source_dir,
self.arguments[0]))
nb_path = utils.relative_path(None, nb_path)
# convert notebook to html
exporter = html.HTMLExporter(template_file='full')
output, resources = exporter.from_filename(nb_path)
header = output.split('<head>', 1)[1].split('</head>',1)[0]
body = output.split('<body>', 1)[1].split('</body>',1)[0]
# add HTML5 scoped attribute to header style tags
header = header.replace('<style', '<style scoped="scoped"')
header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n',
'')
header = header.replace("code,pre{", "code{")
# Filter out styles that conflict with the sphinx theme.
filter_strings = [
'navbar',
'body{',
'alert{',
'uneditable-input{',
'collapse{',
]
filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
line_begin = [
'pre{',
'p{margin'
]
filterfunc = lambda x: not any([s in x for s in filter_strings])
header_lines = filter(filterfunc, header.split('\n'))
filterfunc = lambda x: not any([x.startswith(s) for s in line_begin])
header_lines = filter(filterfunc, header_lines)
header = '\n'.join(header_lines)
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.append(header)
lines.append(body)
lines.append('</div>')
text = '\n'.join(lines)
# add dependency
self.state.document.settings.record_dependencies.add(nb_path)
attributes['source'] = nb_path
# create notebook node
nb_node = notebook('', text, **attributes)
(nb_node.sour
|
ce, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
return [nb_node]
class notebook(nodes.raw):
|
pass
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
app.add_node(notebook,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', Notebook)
|
chriscallan/Euler
|
Probs_1_to_50/028_NumberSpiralDiagonals.py
|
Python
|
gpl-3.0
| 3,489
| 0.004013
|
# Problem 28
# Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
#
# 21 22 23 24 25
# 20 7 8 9 10
# 19 6 1 2 11
# 18 5 4 3 12
# 17 16 15 14 13
#
# It can be verified that the sum of the numbers on the diagonals is 101.
#
# What is the sum of the numbers on the diagonals in a 1001 by 1001 spiral formed in the same way?
from math import floor
from enum import Enum
# constants for this problem
grid_rows = 1001 # 1001, for the final problem
grid_cols = grid_rows # make column/row distinction later on easier
max_idx = grid_rows - 1 #just to make life easier later
grid_print = True # use this to print the grid after setting it up, later on
#first setup the grid
my_grid = [[0 for x in range(grid_rows)] for y in range(grid_cols)]
# find the starting point
starting_point = int(grid_rows / 2) # subtract 1 to account for zero-based indexing
# these are "grid maintenance" types of things
grid_filled = False
fill_directions = Enum('direction', 'right down left up')
current_direction = fill_directions.right
grid_value = 1 # start off at zero so that the outside 'while' loop works for all iterations
# start the offsets off with a minimal value
row_offset = 0
col_offset = 0
# working variables, these will be used to keep track of the current positione
candidate_col = starting_point
candidate_row = starting_point
my_grid[candidate_row][candidate_col] = grid_value
grid_value += 1 # this seeds the center of our spiral
while not grid_filled:
try:
if current_direction == fill_directions.right and my_grid[candidate_row][candidate_col + 1] == 0:
candidate_col += 1 # offset by one column
if my_grid[candidate_row + 1][candidate_col] == 0:
current_direction = fill_directions.down
elif current_direction == fill_directions.down and my_grid[candidate_row + 1][candidate_col] == 0:
candidate_ro
|
w += 1
if my_grid[candidate_row][candidate_col - 1] == 0:
current_direction = fill_directions.left
elif current_direction == fill_directions.left and my_grid[candidate_row][candidate_col - 1] == 0:
candidate_col -= 1
if my_grid[candidate_row - 1][candidate_col] == 0:
current_direction = fill_directions.up
elif current_direction == fill_directions.up and my_grid[candidate_row - 1][candidate_col] == 0:
c
|
andidate_row -= 1
if my_grid[candidate_row][candidate_col + 1] == 0:
current_direction = fill_directions.right
else:
raise Exception("current_direction wasn't in the enum of possible directions: {0}".format(current_direction))
except IndexError as idxExc:
break
if candidate_row == grid_cols and candidate_row == grid_rows:
grid_filled = True
else:
my_grid[candidate_row][candidate_col] = grid_value
grid_value += 1
if grid_print:
for x in range(grid_rows):
row_val = ""
for y in range(grid_cols):
row_val += "{0}\t".format(my_grid[x][y])
print("row {0}: {1}".format(x, row_val))
current_row = 0
running_total = 0
for i in range(grid_cols):
running_total += my_grid[current_row][i]
running_total += my_grid[current_row][(-i - 1) if i > 0 else -1] if my_grid[current_row][i] != 1 else 0
current_row += 1
print("running_total is: {0}".format(running_total))
|
openatv/enigma2
|
lib/python/Plugins/Extensions/DVDBurn/Process.py
|
Python
|
gpl-2.0
| 37,027
| 0.02536
|
from __future__ import print_function
from __future__ import absolute_import
from Components.Task import Task, Job, DiskspacePrecondition, Condition, ToolExistsPrecondition
from Components.Harddisk import harddiskmanager
from Screens.MessageBox import MessageBox
from .Project import iso639language
import Tools.Notifications
class png2yuvTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Creating menu video")
self.setTool("png2yuv")
self.args += ["-n1", "-Ip", "-f25", "-j", inputfile]
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
def processStderr(self, data):
print("[png2yuvTask]", data[:-1])
class mpeg2encTask(Task):
def __init__(self, job, inputfile, outputfile):
Task.__init__(self, job, "Encoding menu video")
self.setTool("mpeg2enc")
self.args += ["-f8", "-np", "-a2", "-o", outputfile]
self.inputFile = inputfile
self.weighting = 25
def run(self, callback):
Task.run(self, callback)
self.container.readFromFile(self.inputFile)
def processOutputLine(self, line):
print("[mpeg2encTask]", line[:-1])
class spumuxTask(Task):
def __init__(self, job, xmlfile, inputfile, outputfile):
Task.__init__(self, job, "Muxing buttons into menu")
self.setTool("spumux")
self.args += [xmlfile]
self.inputFile = inputfile
self.dumpFile = outputfile
self.weighting = 15
def run(self, callback):
Task.run(self, callback)
self.container.stdoutAvail.remove(self.processStdout)
self.container.dumpToFile(self.dumpFile)
self.container.readFromFile(self.inputFile)
def processStderr(self, data):
print("[spumuxTask]", data[:-1])
class MakeFifoNode(Task):
def __init__(self, job, number):
Task.__init__(self, job, "Make FIFO nodes")
self.setTool("mknod")
nodename = self.job.workspace + "/dvd_title_%d" % number + ".mpg"
self.args += [nodename, "p"]
self.weighting = 10
class LinkTS(Task):
def __init__(self, job, sourcefile, link_name):
Task.__init__(self, job, "Creating symlink for source titles")
self.setTool("ln")
self.args += ["-s", sourcefile, link_name]
self.weighting = 10
class CopyMeta(Task):
def __init__(self, job, sourcefile):
Task.__init__(self, job, "Copy title meta files")
self.setTool("cp")
from os import listdir
path, filename = sourcefile.rstrip("/").rsplit("/", 1)
tsfiles = listdir(path)
for file in tsfiles:
if file.startswith(filename + "."):
self.args += [path + '/' + file]
self.args += [self.job.workspace]
self.weighting = 15
class DemuxTask(Task):
def __init__(self, job, inputfile):
Task.__init__(self, job, "Demux video into ES")
title = job.project.titles[job.i]
self.global_preconditions.append(DiskspacePrecondition(title.estimatedDiskspace))
self.setTool("projectx")
self.args += [inputfile, "-demux", "-set", "ExportPanel.Streamtype.Subpicture=0", "-set", "ExportPanel.Streamtype.Teletext=0", "-out", self.job.workspace]
self.end = 300
self.prog_state = 0
self.weighting = 1000
self.cutfile = self.job.workspace + "/cut_%d.Xcl" % (job.i + 1)
self.cutlist = title.cutlist
self.currentPID = None
self.relevantAudioPIDs = []
self.getRelevantAudioPIDs(title)
self.generated_files = []
self.mplex_audiofiles = {}
self.mplex_videofile = ""
self.mplex_streamfiles = []
if len(self.cutlist) > 1:
self.args += ["-cut", self.cutfile]
def prepare(self):
self.writeCutfile()
def getRelevantAudioPIDs(self, title):
for audiotrack in title.properties.audiotracks:
if audiotrack.active.getValue():
self.relevantAudioPIDs.append(audiotrack.pid.getValue())
def processOutputLine(self, line):
line = line[:-1]
#print "[DemuxTask]", line
MSG_NEW_FILE = "---> new File: "
MSG_PROGRESS = "[PROGRESS] "
MSG_NEW_MP2 = "++> Mpg Audio: PID 0x"
MSG_NEW_AC3 = "++> AC3/DTS Audio: PID 0x"
if line.startswith(MSG_NEW_FILE):
file = line[len(MSG_NEW_FILE):]
if file[0] == "'":
file = file[1:-1]
self.haveNewFile(file)
elif line.startswith(MSG_PROGRESS):
progress = line[len(MSG_PROGRESS):]
self.haveProgress(progress)
elif line.startswith(MSG_NEW_MP2) or line.startswith(MSG_NEW_AC3):
try:
self.currentPID = str(int(line.split(': PID 0x', 1)[1].split(' ', 1)[0], 16))
except ValueError:
print("[DemuxTask]
|
ERROR: couldn't detect Audio PID (projectx too old?)")
def haveNewFile(self, file):
print("[DemuxTask] produced file:", file, self.currentPID)
self.generated_files.append(file)
if self.currentPID in self.relevantAudioPIDs:
self.mplex_audiofiles[self.currentPID] = file
elif file.endswith("m2v"):
self.mplex_videofile = file
def haveProg
|
ress(self, progress):
#print "PROGRESS [%s]" % progress
MSG_CHECK = "check & synchronize audio file"
MSG_DONE = "done..."
if progress == "preparing collection(s)...":
self.prog_state = 0
elif progress[:len(MSG_CHECK)] == MSG_CHECK:
self.prog_state += 1
else:
try:
p = int(progress)
p = p - 1 + self.prog_state * 100
if p > self.progress:
self.progress = p
except ValueError:
pass
def writeCutfile(self):
f = open(self.cutfile, "w")
f.write("CollectionPanel.CutMode=4\n")
for p in self.cutlist:
s = p / 90000
m = s / 60
h = m / 60
m %= 60
s %= 60
f.write("%02d:%02d:%02d\n" % (h, m, s))
f.close()
def cleanup(self, failed):
print("[DemuxTask::cleanup]")
self.mplex_streamfiles = [self.mplex_videofile]
for pid in self.relevantAudioPIDs:
if pid in self.mplex_audiofiles:
self.mplex_streamfiles.append(self.mplex_audiofiles[pid])
print(self.mplex_streamfiles)
if failed:
import os
for file in self.generated_files:
try:
os.remove(file)
except OSError:
pass
class MplexTaskPostcondition(Condition):
def check(self, task):
if task.error == task.ERROR_UNDERRUN:
return True
return task.error is None
def getErrorMessage(self, task):
return {
task.ERROR_UNDERRUN: ("Can't multiplex source video!"),
task.ERROR_UNKNOWN: ("An unknown error occurred!")
}[task.error]
class MplexTask(Task):
ERROR_UNDERRUN, ERROR_UNKNOWN = list(range(2))
def __init__(self, job, outputfile, inputfiles=None, demux_task=None, weighting=500):
Task.__init__(self, job, "Mux ES into PS")
self.weighting = weighting
self.demux_task = demux_task
self.postconditions.append(MplexTaskPostcondition())
self.setTool("mplex")
self.args += ["-f8", "-o", outputfile, "-v1"]
if inputfiles:
self.args += inputfiles
def setTool(self, tool):
self.cmd = tool
self.args = [tool]
self.global_preconditions.append(ToolExistsPrecondition())
# we don't want the ReturncodePostcondition in this case because for right now we're just gonna ignore the fact that mplex fails with a buffer underrun error on some streams (this always at the very end)
def prepare(self):
self.error = None
if self.demux_task:
self.args += self.demux_task.mplex_streamfiles
def processOutputLine(self, line):
print("[MplexTask] ", line[:-1])
if line.startswith("**ERROR:"):
if line.find("Frame data under-runs detected") != -1:
self.error = self.ERROR_UNDERRUN
else:
self.error = self.ERROR_UNKNOWN
class RemoveESFiles(Task):
def __init__(self, job, demux_task):
Task.__init__(self, job, "Remove temp. files")
self.demux_task = demux_task
self.setTool("rm")
self.weighting = 10
def prepare(self):
self.args += ["-f"]
self.args += self.demux_task.generated_files
self.args += [self.demux_task.cutfile]
class DVDAuthorTask(Task):
def __init__(self, job):
Task.__init__(self, job, "Authoring DVD")
self.weighting = 20
self.setTool("dvdauthor")
self.CWD = self.job.workspace
self.args += ["-x", self.job.workspace + "/dvdauthor.xml"]
self.menupreview = job.menupreview
def processOutputLine(self, line):
print("[DVDAuthorTask] ", line[:-1])
if not self.menupreview and line.startswith("STAT: Processing"):
self.callback(self, [], stay_resident=True)
elif line.startswith("STAT: VOBU"):
try:
progress = int(line.split("MB")[0].spli
|
magenta/magenta
|
magenta/models/improv_rnn/improv_rnn_create_dataset.py
|
Python
|
apache-2.0
| 2,205
| 0.004082
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a dataset of SequenceExamples from NoteSequence protos.
This script will extract melodies and chords from NoteSequence protos and save
them to TensorFlow's SequenceExample protos for input to the improv RNN models.
"""
import os
from magenta.models.improv_rnn import improv_rnn_config_flags
from magenta.models.improv_rnn import improv_rnn_pipeline
from magenta.pipelines import pipeline
import tensorflow.compat.v1 as tf
flags = tf.app.flags
FLAGS = tf.app.flags.FLAGS
flags.DEFINE_string(
'input', None,
'TFRecord to read NoteSequence protos from.')
flags.DEFINE_string(
'output_dir', None,
'Directory to write training and eval TFRecord files. The TFRecord files '
|
'are populated with SequenceExample protos.')
flags.DEFINE_float(
'eval_ratio', 0.1,
'Fraction of input to set aside for eval set. Partition is randomly '
'selected.')
flag
|
s.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def main(unused_argv):
tf.logging.set_verbosity(FLAGS.log)
config = improv_rnn_config_flags.config_from_flags()
pipeline_instance = improv_rnn_pipeline.get_pipeline(
config, FLAGS.eval_ratio)
FLAGS.input = os.path.expanduser(FLAGS.input)
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
pipeline.run_pipeline_serial(
pipeline_instance,
pipeline.tf_record_iterator(FLAGS.input, pipeline_instance.input_type),
FLAGS.output_dir)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
|
ucldc/harvester
|
harvester/post_processing/run_transform_on_couchdb_docs.py
|
Python
|
bsd-3-clause
| 3,271
| 0.002446
|
'''This allows running a bit of code on couchdb docs.
code should take a json python object, modify it and hand back to the code
Not quite that slick yet, need way to pass in code or make this a decorator
'''
import importlib
from harvester.collection_registry_client import Collection
from harvester.couchdb_init import get_couchdb
COUCHDB_VIEW = 'all_provider_docs/by_provider_name'
def run_on_couchdb_by_collection(func, collection_key=None):
'''If collection_key is none, trying to grab all of docs and modify
func is a function that takes a couchdb doc in and returns it modified.
(can take long time - not recommended)
Function should return new document or None if no changes made
'''
_couchdb = get_couchdb()
v = _couchdb.view(COUCHDB_VIEW, include_docs='true', key=collection_key) \
if collection_key else _couchdb.view(COUCHDB_VIEW,
include_docs='true')
doc_ids = []
n = 0
for r in v:
n += 1
doc_new = func(r.doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
doc_ids.append(r.doc['_id'])
if n % 100 == 0:
print '{} docs ran. Last doc:{}\n'.format(n, r.doc['_id'])
return doc_ids
def run_on_couchdb_doc(docid, func):
'''Run on a doc, by doc id'''
_couchdb = get_couchdb()
doc = _couchdb[docid]
mod_name, func_name = func.rsplit('.', 1)
fmod = importlib.import_module(mod_name)
ffunc = getattr(fmod, func_name)
doc_new = ffunc(doc)
if doc_new and doc_new != doc:
_couchdb.save(doc_new)
return True
return False
C_CACHE = {}
def update_collection_description(doc):
cjson = doc['originalRecord']['collection'][0]
# get collection description
if 'description' not in cjson:
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACHE[cjson['@id']] = c
description = c['description'] if c['description'] else c['name']
print('DOC: {} DESCRIP: {}'.format(
doc['_id'], c['description'].encode('utf8')))
doc['originalRecord']['collection'][0]['description'] = description
doc['sourceResource']['collection'][0]['description'] = description
return doc
def add_rights_and_type_to_collection(doc):
cjson = doc['originalRecord']['collection'][0]
|
# get collection description
if cjson['@id'] in C_CACHE:
c = C_CACHE[cjson['@id']]
else:
c = Collection(url_api=cjson['@id'])
C_CACH
|
E[cjson['@id']] = c
doc['originalRecord']['collection'][0]['rights_status'] = c['rights_status']
doc['originalRecord']['collection'][0]['rights_statement'] = c['rights_statement']
doc['originalRecord']['collection'][0]['dcmi_type']=c['dcmi_type']
if 'collection' in doc['sourceResource']:
doc['sourceResource']['collection'][0]['rights_status'] = c['rights_status']
doc['sourceResource']['collection'][0]['rights_statement'] = c['rights_statement']
doc['sourceResource']['collection'][0]['dcmi_type'] = c['dcmi_type']
else:
doc['sourceResource']['collection'] = doc['originalRecord']['collection']
return doc
|
anttttti/Wordbatch
|
wordbatch/pipelines/apply.py
|
Python
|
gpl-2.0
| 2,258
| 0.030558
|
#!python
from __future__ import with_statement
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import pandas as pd
import wordbatch.batcher
def decorator_apply(func, batcher=None, cache=None, vectorize=None):
def wrapper_func(*args, **kwargs):
return Apply(func, args=args[1:], kwargs=kwargs, batcher=batcher, cache=cache, vectorize=vectorize)\
.transform(args[0])
return wrapper_func
def batch_transform(args):
f= args[1]
f_args= args[2]
f_kwargs= args[3]
if args[5] is not None:
from numba import vectorize
return vectorize(args[5], fastmath=True)(f)(*zip(*args[0]))
if args[4] is not None:
from functools import lru_cache
|
f= lru_cache(maxsize=args[4])(f)
#Applying per D
|
ataFrame row is very slow, use ApplyBatch instead
if isinstance(args[0], pd.DataFrame): return args[0].apply(lambda x: f(x, *f_args, **f_kwargs), axis=1)
return [f(row, *f_args, **f_kwargs) for row in args[0]]
class Apply(object):
#Applies a function to each row of a minibatch
def __init__(self, function, batcher=None, args=[], kwargs={}, cache=None, vectorize=None):
if batcher is None: self.batcher= wordbatch.batcher.Batcher()
else: self.batcher= batcher
self.function= function
self.args= [args]
self.kwargs= [kwargs]
self.cache = [cache]
self.vectorize = [vectorize]
def fit(self, data, input_split= False, batcher= None):
return self
def fit_transform(self, data, input_split=False, merge_output=True, minibatch_size=None, batcher=None):
return self.transform(data, input_split, merge_output, minibatch_size, batcher)
def transform(self, data, input_split=False, merge_output=True, minibatch_size=None, batcher=None):
if batcher is None: batcher = self.batcher
return batcher.process_batches(batch_transform, data,
[self.function] + self.args + self.kwargs + self.cache + self.vectorize,
input_split=input_split, merge_output=merge_output,
minibatch_size= minibatch_size)
# import wordbatch.batcher as batcher
# b= batcher.Batcher(minibatch_size=2)#, method="serial")
# import numpy as np
# a= Apply(np.power, b, [2],{})
# print(a.transform([1, 2, 3, 4]))
|
jszymon/pacal
|
tests/examples/makerep.py
|
Python
|
gpl-3.0
| 5,081
| 0.011415
|
import os
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\how_to_use.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l -s --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\problems.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapters678.py")
#os.system("python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\m_\\ecPro\pacal\\trunk\pacal\\examples\\springer_book\\Chapters678.py")
os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal_multiprocess\\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter3_sums.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter4_products.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter5_functions.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapters678.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\springer_book\\Chapter9_applications.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\noncentral.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type pdf D:\\m_\\ecPro\\pacal\\trunk\\pacal\\e
|
xamples\\dependent\\depvars_demo.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\order_stats.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\regression.py")
#os.system("D:\\prog\\Python27\\python D:\prog\pyt
|
hon_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\resistors.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\two_variables\\sum_dependent.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\linreg.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\diffeq_noise.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\dependent\\kalman.py")
#os.system("D:\\prog\\Python27\\python D:\prog\python_packages\pyreport-0.3.4c\pyreport\pyreport.py -e -l --type html D:\\m_\\ecPro\\pacal\\trunk\\pacal\\examples\\noncentral.py")
|
tombstone/models
|
research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor_tf1_test.py
|
Python
|
apache-2.0
| 8,728
| 0.001948
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v1_fpn_feature_extractor.
By using parameterized test decorator, this test serves for both Slim-based and
Keras-based Mobilenet V1 FPN feature extractors in SSD.
"""
import unittest
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v1_fpn_feature_extractor
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf2(), 'Skipping TF1.X only test.')
class SsdMobilenetV1FpnFeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
is_training=True, use_explicit_padding=False,
use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
is_training: whether the network is in training mode.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
del use_keras
min_depth = 32
return (ssd_mobilenet_v1_fpn_feature_extractor.
SSDMobileNetV1FpnFeatureExtractor(
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_depthwise=True,
use_explicit_padding=use_explicit_padding))
def test_extract_features_returns_correct_shapes_256(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_384(self):
image_height = 320
image_width = 320
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_with_dynamic_image_shape(self):
image_height = 256
image_width = 256
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 256), (2, 16, 16, 256),
(2, 8, 8, 256), (2, 4, 4, 256),
(2, 2, 2, 256)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_ma
|
p_shape = [(2, 40, 40, 256), (2, 20, 20, 256),
(2, 10, 10, 256), (2, 5, 5, 256),
(2, 3, 3, 256)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_ex
|
tract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self):
image_height = 256
image_width = 256
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 32, 32, 32), (2, 16, 16, 32),
(2, 8, 8, 32), (2, 4, 4, 32),
(2, 2, 2, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=False,
use_keras=False)
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=False)
def test_extract_features_raises_error_with_invalid_image_size(
self):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=False)
def test_preprocess_returns_correct_value_range(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(2, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=False)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV1'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=False)
def test_variable_count(self):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=False)
self.assertEqual(len(variables), 153)
def test_fused_batchnorm(self):
image_height = 256
image_width = 256
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_he
|
soasme/rio
|
rio/blueprints/api_1.py
|
Python
|
mit
| 139
| 0
|
# -*- coding: utf-8 -*-
"""
rio.blueprints.api_1
~~~~~~~~
|
~~~~~~~~~~~~~
"""
from
|
flask import Blueprint
bp = Blueprint('api_1', __name__)
|
greven/vagrant-django
|
project_name/settings/dev.py
|
Python
|
bsd-3-clause
| 233
| 0.012876
|
from .base import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
MIDDLEWARE_CLASSES += [
|
'de
|
bug_toolbar.middleware.DebugToolbarMiddleware',
]
|
jordanemedlock/psychtruths
|
temboo/core/Library/Utilities/Encoding/Base64Encode.py
|
Python
|
apache-2.0
| 3,302
| 0.00424
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Base64Encode
# Returns the specified text or file as a Base64 encoded string.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Base64Encode(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Base64Encode Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Base64Encode, self).__init__(temboo_session, '/Library/Utilities/Encoding/Base64Encode')
def new_input_set(self):
return Base64EncodeInputSet()
def _make_result_set(self, result, path):
return Base64EncodeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return Base64EncodeChoreographyExecution(session, exec_id, path)
class Base64EncodeInputSet(InputSet):
"""
An InputSet with methods approp
|
riate for specifying the inputs to the Base64Encode
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Text(self, value):
"""
|
Set the value of the Text input for this Choreo. ((conditional, string) The text that should be Base64 encoded. Required unless providing a value for the URL input.)
"""
super(Base64EncodeInputSet, self)._set_input('Text', value)
def set_URL(self, value):
"""
Set the value of the URL input for this Choreo. ((conditional, string) A URL to a hosted file that should be Base64 encoded. Required unless providing a value for the Text input.)
"""
super(Base64EncodeInputSet, self)._set_input('URL', value)
class Base64EncodeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Base64Encode Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Base64EncodedText(self):
"""
Retrieve the value for the "Base64EncodedText" output from this Choreo execution. ((string) The Base64 encoded text.)
"""
return self._output.get('Base64EncodedText', None)
class Base64EncodeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return Base64EncodeResultSet(response, path)
|
marhar/cx_OracleTools
|
cx_PyOracleLib/cx_OracleObject/Statements.py
|
Python
|
bsd-3-clause
| 5,193
| 0.000193
|
"""Define statements for retrieving the data for each of the types."""
CONSTRAINTS = """
select
o.owner,
o.constraint_name,
o.constraint_type,
o.table_name,
o.search_condition,
o.r_owner,
o.r_constraint_name,
o.delete_rule,
o.deferred,
o.deferrable
from %(p_ViewPrefix)s_constraints o
%(p_WhereClause)s
and exists
( select 1
from %(p_ViewPrefix)s_tables
where owner = o.owner
and table_name = o.table_name
)
and (o.generated = 'USER NAME' or o.constraint_type in ('P', 'U'))
order by decode(o.constraint_type, 'P', 1, 'U', 2, 'R', 3, 'C', 4),
o.owner, o.constraint_name"""
CONTEXTS = """
select
namespace,
schema,
package,
type
from dba_context o
%(p_WhereClause)s
order by namespace"""
INDEXES_ANY = """
select
o.owner,
o.index_name,
o.table_name,
o.tablespace_name,
o.uniqueness,
o.initial_extent,
o.next_extent,
o.min_extents,
o.max_extents,
o.pct_increase,
o.index_type,
o.partitioned,
o.temporary,
o.compression,
o.prefix_length,
o.ityp_owner,
o.ityp_name,
o.parameters
from %(p_ViewPrefix)s_indexes o
%(p_WhereClause)s
and o.index_type in ('NORMAL', 'NORMAL/REV', 'IOT - TOP', 'BITMAP',
'FUNCTION-BASED NORMAL', 'FUNCTION-BASED NORMAL/REV',
'DOMAIN')"""
INDEXES = INDEXES_ANY + """
and not exists
( select 1
from %(p_ViewPrefix)s_constraints
where owner = o.owner
and constraint_name = o.index_name
)
order by o.owner, o.index_name"""
INDEX_PARTITIONS = """
select
o.index_owner,
o.partition_name,
o.high_value,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extent,
o.max_extent,
o.pct_increase
from %(p_ViewPrefix)s_ind_partitions o
%(p_WhereClause)s
order by o.partition_position"""
LIBRARIES = """
select
o.owner,
o.library_name,
o.file_spec
from %(p_ViewPrefix)s_libraries o
%(p_WhereClause)s
order by o.owner, o.library_name"""
LOBS = """
select
o.owner,
o.column_name,
o.table_name,
o.segment_name,
o.in_row
from %(p_ViewPrefix)s_lobs o
%(p_WhereClause)s
order by o.column_name"""
ROLES = """
select
o.role,
o.password_required
from dba_roles o
%(p_WhereClause)s
order by o.role"""
SEQUENCES = """
select
o.sequence_owner,
o.sequence_name,
to_char(min_value),
to_char(max_value),
to_char(increment_by),
cycle_flag,
order_flag,
to_char(cache_size),
to_char(last_number)
from %(p_ViewPrefix)s_sequences o
%(p_WhereClause)s
order by o.sequence_owner, o.sequence_name"""
SYNONYMS = """
select
o.owner,
o.synonym_name,
o.table_owner,
o.table_name,
o.db_link
from %(p_ViewPrefix)s_synonyms o
%(p_WhereClause)s
order by decode(o.owner, 'PUBLIC', 0, 1), o.owner, o.synonym_name"""
TABLES = """
select
o.owner,
o.table_name,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extents,
o.max_extents,
o.pct_increase,
o.temporary,
o.partitioned,
o.duration,
o.iot_type
from %(p_ViewPrefix)s_tables o
%(p_WhereClause)s
and secondary = 'N'
order by o.owner, o.table_name"""
TABLE_PARTITIONS = """
select
o.table_owner,
o.partition_name,
o.high_value,
o.tablespace_name,
o.initial_extent,
o.next_extent,
o.min_extent,
o.max_extent,
o.pct_increase
|
from %(p_ViewPrefix)s_tab_partitions o
%(p_WhereClause)s
order by o.partition_position"""
TRIGGERS = """
select
o.owner,
o.trigger_name,
o.table_name,
o.description,
|
o.when_clause,
o.action_type,
o.trigger_body
from %(p_ViewPrefix)s_triggers o
%(p_WhereClause)s
order by o.owner, o.trigger_name"""
USERS = """
select
o.username,
o.default_tablespace,
o.temporary_tablespace
from dba_users o
%(p_WhereClause)s
order by o.username"""
VIEWS = """
select
o.owner,
o.view_name,
o.text
from %(p_ViewPrefix)s_views o
%(p_WhereClause)s
order by o.owner, o.view_name"""
|
heibanke/python_do_something
|
Code/Chapter5/base_classic_new_class.py
|
Python
|
apache-2.0
| 596
| 0.028523
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class A:
#classic class
"""this is class A"""
pass
__slots__=('x','y')
def test(self):
# classic class test
"""this is A.test()"""
print "A class"
class B(object):
#ne
|
w class
"""this is class B"""
__slots__=('x','y')
pass
def test(self):
# new class test
"""this is B.test()"""
print "B class
|
"
if __name__ == '__main__':
a=A()
b=B()
print dir(a)
print dir(b)
#a.x=1
#b.x=1
#help(a)
#help(b)
|
ukBaz/python-bluezero
|
bluezero/peripheral.py
|
Python
|
mit
| 6,857
| 0
|
"""Classes required to create a Bluetooth Peripheral."""
# python-bluezero imports
from bluezero import adapter
from bluezero import advertisement
from bluezero import async_tools
from bluezero import localGATT
from bluezero import GATT
from bluezero import tools
logger = tools.create_module_logger(__name__)
class Peripheral:
"""Create a Bluetooth BLE Peripheral"""
def __init__(self, adapter_address, local_name=None, appearance=None):
self.app = localGATT.Application()
self.srv_mng = GATT.GattManager(adapter_address)
self.services = []
se
|
lf.characteristics = []
self.descriptors = []
self.primary_services = []
self.dongle = adapter.Adapter(adapter_address)
self.local_name = local_name
self.appearance = appearance
self.advert = advertisement.Advertisement(1, 'peripheral')
self.ad_manager = advertisement.Ad
|
vertisingManager(adapter_address)
self.mainloop = async_tools.EventLoop()
def add_service(self, srv_id, uuid, primary):
"""
Add the service information required
:param srv_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this service
:param primary: boolean for if this service should be advertised
"""
self.services.append(localGATT.Service(srv_id, uuid, primary))
if primary:
self.primary_services.append(uuid)
def add_characteristic(self, srv_id, chr_id, uuid, value,
notifying, flags,
read_callback=None, write_callback=None,
notify_callback=None):
"""
Add information for characteristic.
:param srv_id: integer of parent service that was added
:param chr_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this characteristic
:param value: Initial value. list of integers in little endian format
:param notifying: Boolean representing initial state of notifications
:param flags: Defines how the characteristic value can be used. See
Core spec "Table 3.5: Characteristic Properties bit field", and
"Table 3.8: Characteristic Extended. Properties bit field".
Allowed values:
- "broadcast"
- "read"
- "write-without-response"
- "write"
- "notify"
- "indicate"
- "authenticated-signed-writes"
- "extended-properties"
- "reliable-write"
- "writable-auxiliaries"
- "encrypt-read"
- "encrypt-write"
- "encrypt-authenticated-read"
- "encrypt-authenticated-write"
- "secure-read" (Server only)
- "secure-write" (Server only)
- "authorize"
:param read_callback: function to be called when read_value is called
by client. function should return python list of integers
representing new value of characteristic
:param write_callback: function to be called when write_value is called
by client. Function should have two parameters value and options.
value is python list of integers with new value of characteristic.
:param notify_callback: function to be called when notify_start or
notify_stop is called by client. Function should have two
parameters notifying and characteristic. The `characteristic`
is the instantiation of a localGAT.Characteristic class
"""
self.characteristics.append(localGATT.Characteristic(
srv_id, chr_id, uuid, value, notifying, flags,
read_callback, write_callback, notify_callback
))
def add_descriptor(self, srv_id, chr_id, dsc_id, uuid, value, flags):
"""
Add information for the GATT descriptor.
:param srv_id: integer of parent service that was added
:param chr_id: integer of parent characteristic that was added
:param dsc_id: integer between 0 & 9999 as unique reference
:param uuid: The Bluetooth uuid number for this characteristic
:param value: Initial value. list of integers in little endian format
:param flags: Defines how the descriptor value can be used.
Possible values:
- "read"
- "write"
- "encrypt-read"
- "encrypt-write"
- "encrypt-authenticated-read"
- "encrypt-authenticated-write"
- "secure-read" (Server Only)
- "secure-write" (Server Only)
- "authorize"
"""
self.descriptors.append(localGATT.Descriptor(
srv_id, chr_id, dsc_id, uuid, value, flags
))
def _create_advertisement(self):
self.advert.service_UUIDs = self.primary_services
if self.local_name:
self.advert.local_name = self.local_name
if self.appearance:
self.advert.appearance = self.appearance
def publish(self):
"""Create advertisement and make peripheral visible"""
for service in self.services:
self.app.add_managed_object(service)
for chars in self.characteristics:
self.app.add_managed_object(chars)
for desc in self.descriptors:
self.app.add_managed_object(desc)
self._create_advertisement()
if not self.dongle.powered:
self.dongle.powered = True
self.srv_mng.register_application(self.app, {})
self.ad_manager.register_advertisement(self.advert, {})
try:
self.mainloop.run()
except KeyboardInterrupt:
self.mainloop.quit()
self.ad_manager.unregister_advertisement(self.advert)
@property
def on_connect(self):
"""
Callback for when a device connects to the peripheral.
Callback can accept 0, 1, or 2 positional arguments
1: a device.Device instance of the connected target
2: the local adapter address followed by the remote address
"""
return self.dongle.on_connect
@on_connect.setter
def on_connect(self, callback):
self.dongle.on_connect = callback
@property
def on_disconnect(self):
"""
Callback for when a device disconnects from the peripheral.
Callback can accept 0, 1, or 2 positional arguments
1: a device.Device instance of the disconnected target
2: the local adapter address followed by the remote address
"""
return self.dongle.on_disconnect
@on_disconnect.setter
def on_disconnect(self, callback):
self.dongle.on_disconnect = callback
|
axelleonhart/TrainingDjango
|
materiales/apps/clientes/forms.py
|
Python
|
lgpl-3.0
| 2,527
| 0.002777
|
from django import forms
from apps.clientes.models import Cliente
from apps.clientes.choices import SEXO_CHOICES
import re
class ClienteForm(forms.ModelForm):
"""
Se declaran los campos y atributos que se mostraran en el formulario
"""
sexo = forms.ChoiceField(choices=SEXO_CHOICES, required=True)
class Meta:
model = Cliente
fields = [
'nombre',
'sexo',
'direccion',
'email',
'fecha_nac',
]
labels = {
'nombre': 'Nombre',
'sexo': 'Sexo',
|
'direccion': 'Dirección',
'email': 'Email',
'fecha_nac': 'Fecha de Nacimiento',
}
widgets = {
'nombre': forms.TextInput(attrs={'class': 'form-control'}),
'direccion': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.EmailInput(attrs={'class': 'form-control'}),
'fecha_nac': forms.TextInput(attrs={'class
|
': 'form-control'}),
}
def clean_nombre(self):
"""
Valida que el nombre no sea menor a 3 caracteres
"""
nombre_lim = self.cleaned_data
nombre = nombre_lim.get('nombre')
if len(nombre) < 5:
raise forms.ValidationError(
"Debe de tener un mínimo de 5 caracteres")
elif len(nombre) > 15:
raise forms.ValidationError(
"Debe de tener un maxímo de 15 caracteres")
return nombre
def clean_email(self):
"""
Valida que el correo no esta ya registrado
"""
email = self.cleaned_data['email']
if Cliente.objects.filter(email=email).exists():
raise forms.ValidationError("El Email ya esta dado de alta")
if not(re.match('^[(a-z0-9\_\-\.)]+@[(a-z0-9\_\-\.)]+\.[(a-z)]{2,15}$',
email.lower())):
raise forms.ValidationError("No es un email correcto")
return email
def clean_direccion(self):
"""
Valida que la dirección no sea menor a 5 caracteres
"""
direccion = self.cleaned_data['direccion']
if len(direccion) < 5:
raise forms.ValidationError(
"Debe de tener un mínimo de 5 caracteres")
elif len(direccion) > 15:
raise forms.ValidationError(
"Debe de tener un maxímo de 5 caracteres")
return direccion
|
lejenome/my_scripts
|
scan_wifi.py
|
Python
|
gpl-2.0
| 2,384
| 0.001678
|
#!/bin/python3
import sys
import os
import tempfile
import pprint
import logging
from logging import debug, info, warning, error
def process_info(
|
line):
line = line.strip()
arr = line.split(':')
if len(arr) <
|
2:
return None, None
key = arr[0]
val = None
if key == "freq":
val = "{}Hz".format(arr[1].strip())
elif key == "signal":
val = "{}%".format(100 + int(float(arr[1].split()[0])))
elif key == "SSID":
val = arr[1].strip()
elif key == 'WPA':
val = True
elif key == "RSN":
val = True
elif key == "capability" and "Privacy" in arr[1]:
key = "Privacy"
val = True
return key, val
def print_scan(bsss):
for bss in bsss:
info = bsss[bss]
print("{} {}: ssid : {}".format("*" if "associated" in info
else " ",
bss,
info["SSID"]))
print(" signal : {}".format(info["signal"]))
print(" freq : {}".format(info["freq"]))
print(" security : {}".format(
"WPA2" if info.get("RSN", False) else
"WPA" if info.get("WPA", False) else
"WEP" if info.get("Privacy", False) else
"None"))
def main():
wifi_if = "wlp8s0"
if len(sys.argv) == 2:
wifi_if = sys.argv[1]
iw_out = tempfile.mktemp(suffix="iw", prefix="scan_wifi")
debug("iw output file: {}".format(iw_out))
r = os.system("sudo iw dev {} scan > {}".format(wifi_if, iw_out))
if r:
error("Error when scanning {}".format(wifi_if))
sys.exit(1)
f = open(iw_out, 'r')
bsss = dict()
for line in f.readlines():
if line.startswith("BSS "):
cur_bss = line[4:21]
bsss[cur_bss] = dict()
if line.endswith("associated\n"):
bsss[cur_bss]["associated"] = True
elif not cur_bss:
error("Not assosied BSS for cureent line: {}".format(line))
continue
else:
key, val = process_info(line)
if key and val:
bsss[cur_bss][key] = val
print_scan(bsss)
os.remove(iw_out)
if __name__ == "__main__":
# logging.basicConfig(level=logging.DEBUG)
main()
|
Noirello/PyLDAP
|
src/bonsai/active_directory/acl.py
|
Python
|
mit
| 13,344
| 0.000749
|
import struct
import uuid
from enum import IntEnum
from typing import List, Optional, Set
from .sid import SID
class ACEFlag(IntEnum):
""" ACE type-specific control flags. """
OBJECT_INHERIT = 0x01
CONTAINER_INHERIT = 0x02
NO_PROPAGATE_INHERIT = 0x04
INHERIT_ONLY = 0x08
INHERITED = 0x10
SUCCESSFUL_ACCESS = 0x40
FAILED_ACCESS = 0x80
@property
def short_name(self) -> str:
""" The SDDL short name of the flag. """
short_names = {
"OBJECT_INHERIT": "OI",
"CONTAINER_INHERIT": "CI",
"NO_PROPAGATE_INHERIT": "NP",
"IN
|
HERIT_ONLY": "IO",
"INHERITED": "ID",
"SUCCESSFUL_ACCESS": "SA",
"FAILED_ACCESS": "FA",
}
return short_names[self.name]
class ACEType(IntEnum):
""" Type of the ACE. """
ACCE
|
SS_ALLOWED = 0
ACCESS_DENIED = 1
SYSTEM_AUDIT = 2
SYSTEM_ALARM = 3
ACCESS_ALLOWED_COMPOUND = 4
ACCESS_ALLOWED_OBJECT = 5
ACCESS_DENIED_OBJECT = 6
SYSTEM_AUDIT_OBJECT = 7
SYSTEM_ALARM_OBJECT = 8
ACCESS_ALLOWED_CALLBACK = 9
ACCESS_DENIED_CALLBACK = 10
ACCESS_ALLOWED_CALLBACK_OBJECT = 11
ACCESS_DENIED_CALLBACK_OBJECT = 12
SYSTEM_AUDIT_CALLBACK = 13
SYSTEM_ALARM_CALLBACK = 14
SYSTEM_AUDIT_CALLBACK_OBJECT = 15
SYSTEM_ALARM_CALLBACK_OBJECT = 16
SYSTEM_MANDATORY_LABEL = 17
SYSTEM_RESOURCE_ATTRIBUTE = 18
SYSTEM_SCOPED_POLICY_ID = 19
@property
def short_name(self) -> str:
""" The SDDL short name of the type. """
short_names = {
"ACCESS_ALLOWED": "A",
"ACCESS_DENIED": "D",
"SYSTEM_AUDIT": "AU",
"SYSTEM_ALARM": "AL",
"ACCESS_ALLOWED_COMPOUND": "",
"ACCESS_ALLOWED_OBJECT": "OA",
"ACCESS_DENIED_OBJECT": "OD",
"SYSTEM_AUDIT_OBJECT": "OU",
"SYSTEM_ALARM_OBJECT": "OL",
"ACCESS_ALLOWED_CALLBACK": "XA",
"ACCESS_DENIED_CALLBACK": "XD",
"ACCESS_ALLOWED_CALLBACK_OBJECT": "ZA",
"ACCESS_DENIED_CALLBACK_OBJECT": "ZD",
"SYSTEM_AUDIT_CALLBACK": "XU",
"SYSTEM_ALARM_CALLBACK": "XL",
"SYSTEM_AUDIT_CALLBACK_OBJECT": "ZU",
"SYSTEM_ALARM_CALLBACK_OBJECT": "ZL",
"SYSTEM_MANDATORY_LABEL": "ML",
"SYSTEM_RESOURCE_ATTRIBUTE": "RA",
"SYSTEM_SCOPED_POLICY_ID": "SP",
}
return short_names[self.name]
@property
def is_object_type(self) -> bool:
""" Flag for ACE types with objects. """
return self in (
ACEType.ACCESS_ALLOWED_OBJECT,
ACEType.ACCESS_DENIED_OBJECT,
ACEType.SYSTEM_AUDIT_OBJECT,
ACEType.SYSTEM_ALARM_OBJECT,
ACEType.ACCESS_ALLOWED_CALLBACK_OBJECT,
ACEType.ACCESS_DENIED_CALLBACK_OBJECT,
ACEType.SYSTEM_AUDIT_CALLBACK_OBJECT,
ACEType.SYSTEM_ALARM_CALLBACK_OBJECT,
)
class ACERight(IntEnum):
""" The rights of the ACE. """
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x4000000
GENERIC_EXECUTE = 0x20000000
GENERIC_ALL = 0x10000000
MAXIMUM_ALLOWED = 0x02000000
ACCESS_SYSTEM_SECURITY = 0x01000000
SYNCHRONIZE = 0x00100000
WRITE_OWNER = 0x00080000
WRITE_DACL = 0x00040000
READ_CONTROL = 0x00020000
DELETE = 0x00010000
DS_CONTROL_ACCESS = 0x00000100
DS_CREATE_CHILD = 0x00000001
DS_DELETE_CHILD = 0x00000002
ACTRL_DS_LIST = 0x00000004
DS_SELF = 0x00000008
DS_READ_PROP = 0x00000010
DS_WRITE_PROP = 0x00000020
DS_DELETE_TREE = 0x00000040
DS_LIST_OBJECT = 0x00000080
@property
def short_name(self) -> str:
""" The SDDL short name of the access right. """
short_names = {
"GENERIC_READ": "GR",
"GENERIC_WRITE": "GW",
"GENERIC_EXECUTE": "GX",
"GENERIC_ALL": "GA",
"MAXIMUM_ALLOWED": "MA",
"ACCESS_SYSTEM_SECURITY": "AS",
"SYNCHRONIZE": "SY",
"WRITE_OWNER": "WO",
"WRITE_DACL": "WD",
"READ_CONTROL": "RC",
"DELETE": "SD",
"DS_CONTROL_ACCESS": "CR",
"DS_CREATE_CHILD": "CC",
"DS_DELETE_CHILD": "DC",
"ACTRL_DS_LIST": "LC",
"DS_SELF": "SW",
"DS_READ_PROP": "RP",
"DS_WRITE_PROP": "WP",
"DS_DELETE_TREE": "DT",
"DS_LIST_OBJECT": "LO",
}
return short_names[self.name]
class ACLRevision(IntEnum):
""" The ACL revision. """
ACL_REVISION = 0x02
ACL_REVISION_DS = 0x04
class ACE:
"""
A class for the access control entry, that encodes the user rights
afforded to a principal.
:param ACEType ace_type: the type of the ACE.
:param Set[ACEFlag] flags: the set of flags for the ACE.
:param int mask: the access mask to encode the user rights as an int.
:param SID trustee_sid: the SID of the trustee.
:param uuid.UUID|None object_type: a UUID that identifies a property
set, property, extended right, or type of child object.
:param uuid.UUID|None inherited_object_type: a UUID that identifies the
type of child object that can inherit the ACE.
:param bytes application_data: optional application data.
"""
def __init__(
self,
ace_type: ACEType,
flags: Set[ACEFlag],
mask: int,
trustee_sid: SID,
object_type: Optional[uuid.UUID],
inherited_object_type: Optional[uuid.UUID],
application_data: bytes,
) -> None:
self.__type = ace_type
self.__flags = flags
self.__mask = mask
self.__object_type = object_type
self.__inherited_object_type = inherited_object_type
self.__trustee_sid = trustee_sid
self.__application_data = application_data
@classmethod
def from_binary(cls, data: bytes) -> "ACE":
"""
Create an ACE object from a binary blob.
:param bytes data: a little-endian byte ordered byte input.
:returns: A new ACE instance.
:rtype: ACE
:raises TypeError: when the parameter is not bytes.
:raises ValueError: when the input cannot be parsed as an ACE
object.
"""
try:
if not isinstance(data, bytes):
raise TypeError("The `data` parameter must be bytes")
object_type = None
inherited_object_type = None
application_data = None
ace_type, flags, size, mask = struct.unpack("<BBHL", data[:8])
pos = 8
if ACEType(ace_type).is_object_type:
obj_flag = struct.unpack("<I", data[8:12])[0]
pos += 4
if obj_flag & 0x00000001:
object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
if obj_flag & 0x00000002:
inherited_object_type = uuid.UUID(bytes_le=data[pos : pos + 16])
pos += 16
trustee_sid = SID(bytes_le=data[pos:])
pos += trustee_sid.size
application_data = data[pos:size]
this = cls(
ACEType(ace_type),
{flg for flg in ACEFlag if flags & flg},
mask,
trustee_sid,
object_type,
inherited_object_type,
application_data,
)
return this
except struct.error as err:
raise ValueError(f"Not a valid binary ACE, {err}")
def __str__(self):
""" Return the SDDL string representation of the ACE object. """
flags = "".join(
flg.short_name for flg in sorted(self.flags, key=lambda f: f.value)
)
rights = "".join(
rgt.short_name for rgt in sorted(self.rights, key=lambda r: r.value)
)
object_guid = self.object_type if self.object_type else ""
inherit_object_guid = (
self.inherited_object_type if self.inherited_object_type else ""
)
sid = (
|
jun-zhang/device-manager
|
src/lib/ydevicemanager/devices.py
|
Python
|
gpl-2.0
| 8,950
| 0.01676
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# StartOS Device Manager(ydm).
# Copyright (C) 2011 ivali, Inc.
# hechao <[email protected]>, 2011.
__author__="hechao"
__date__ ="$2011-12-20 16:36:20$"
import gc
from xml.parsers import expat
from hwclass import *
class Device:
def __init__(self, dev_xml):
self.description = ''
self.product = ''
self.vendor = ''
self.version = ''
self.businfo = ''
self.logicalname = ''
self.date = ''
self.serial = ''
self.capacity = ''
self.width = ''
self.clock = ''
self.slot = ''
self.size = ''
self.config = {}
self.capability = []
self.attr = {}
self.dev_type = {}
self.pcid = {}
self._parser = expat.ParserCreate()
self._parser.buffer_size = 102400
self._parser.StartElementHandler = self.start_handler
self._parser.CharacterDataHandler = self.data_handler
self._parser.EndElementHandler = self.end_handler
self._parser.returns_unicode = False
fd = file(dev_xml)
self._parser.ParseFile(fd)
fd.close()
def start_handler(self, tag, attrs):
self.flag = tag
if tag == "node":
self.attr = attrs
elif tag == "setting":
self.config.setdefault(attrs["id"], attrs["value"])
elif tag == "capability":
self.capability.append(attrs["id"])
def data_handler(self, data):
if(data == '\n'):
return
if(data.isspace()):
return
if self.flag == "description":
self.description = data.strip()
elif self.flag == "product":
self.product = data.strip()
elif self.flag == "vendor":
self.vendor = data.strip()
elif self.flag == "businfo":
self.businfo = data.strip()
elif self.flag == "logicalname":
self.logicalname = data.strip()
elif self.flag == "version":
self.version = data.strip()
elif self.flag == "date":
self.date = data.strip()
elif self.flag == "serial":
self.serial = data.strip()
elif self.flag == "capacity":
self.capacity = data.strip()
elif self.flag == "width":
self.width = data.strip()
elif self.flag == "clock":
self.clock = data.strip()
elif self.flag == "slot":
self.slot = data.strip()
elif self.flag == "size":
self.size = data.strip()
def end_handler(self, tag):
if tag == "node":
if self.attr["class"] == "system":
system = System(self.description, self.product, self.vendor, self.version, \
self.serial, self.width, self.config, self.capability)
self.dev_type.setdefault((0, "system"), []).append(system)
elif self.attr["id"].split(":")[0] == "cpu" and self.attr["class"] == "processor":
cpu = Cpu(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.slot, self.size, self.capacity, self.width, self.clock, self.config, self.capability)
self.dev_type.setdefault((1, "cpu"), []).append(cpu)
elif self.attr["id"].split(":")[0] == "cache" and self.attr["class"] == "memory":
cache = Cache(self.description, self.product, self.vendor, self.version, self.slot, self.size)
self.dev_type.setdefault((1, "cpu"), []).append(cache)
elif (self.attr["id"] == "core" or self.attr["id"] == "board") and self.attr["class"] == "bus":
motherboard = Motherboard(self.desc
|
ription, self.product, self.vendor, self.version, self.serial)
self.dev_type.setdefault((2, "motherboard"), []).append(motherboard)
elif self.attr["id"] == "firmware" and self.attr["class"] == "memory":
bio
|
s = Bios(self.description, self.product, self.vendor, self.version, \
self.date, self.size, self.capability)
self.dev_type.setdefault((2, "motherboard"), []).append(bios)
elif self.attr["id"].split(":")[0] == "memory" and self.attr["class"] == "memory":
memory = Memory(self.description, self.product, self.vendor, self.version, \
self.slot, self.size)
self.dev_type.setdefault((3, "memory"), []).append(memory)
elif self.attr["id"].split(":")[0] == "bank" and self.attr["class"] == "memory":
bank = Bank(self.description, self.product, self.vendor, self.version, \
self.serial, self.slot, self.size, self.width, self.clock)
self.dev_type.setdefault((3, "memory"), []).append(bank)
elif self.attr["id"].split(":")[0] == "display" and self.attr["class"] == "display":
display = Display(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((4, "display"), []).append(display)
self.pcid[display.pcid] = "display"
if get_monitor():
monitor = Monitor("", "", "", "")
self.dev_type.setdefault((5, "monitor"), [monitor])#.append(monitor)
elif self.attr["id"].split(":")[0] == "disk" and self.attr["class"] == "disk":
disk = Disk(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.size, self.config, self.capability)
self.dev_type.setdefault((6, "disk"), []).append(disk)
elif self.attr["id"].split(":")[0] == "cdrom" and self.attr["class"] == "disk":
cdrom = Cdrom(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.config, self.capability)
self.dev_type.setdefault((7, "cdrom"), []).append(cdrom)
elif self.attr["class"] == "storage" and self.attr["handle"]:
storage = Storage(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.config, self.capability)
self.dev_type.setdefault((8, "storage"), []).append(storage)
elif (self.attr["class"] == "network") or (self.attr["id"].split(":")[0] == "bridge" \
and self.attr["class"] == "bridge"):
network = Network(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.capacity, self.config, self.capability)
self.dev_type.setdefault((9, "network"), []).append(network)
self.pcid[network.pcid] = "network"
elif self.attr["class"] == "multimedia":
media = Multimedia(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((10, "multimedia"), []).append(media)
self.pcid[media.pcid] = "multimedia"
elif self.attr["class"] == "input":
imput = Imput(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((11, "input"), []).append(imput)
self.pcid[imput.pcid] = "input"
elif self.attr["id"].split(":")[0] != "generic" and self.attr["class"] == "generic":
generic = Generic(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(generic)
self.pcid[generic.pcid] = "generic"
elif self.attr["id"].split(":")[0] != "communication" and self.attr["class"] == "communication":
modem = Modem(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(modem)
elif self.attr["id"].split(":")[0] == "battery"
|
endlessm/chromium-browser
|
components/policy/tools/template_writers/writers/android_policy_writer_unittest.py
|
Python
|
bsd-3-clause
| 3,381
| 0.001479
|
#!/usr/bin/env python
# Copyright (c) 2015 The
|
Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for writers.android_policy_writer'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../..'))
import unittest
from xml.dom import minidom
from writers import writer_unittest_common
from writers import android_policy_writer
class
|
AndroidPolicyWriterUnittest(writer_unittest_common.WriterUnittestCommon):
'''Unit tests to test assumptions in Android Policy Writer'''
def testPolicyWithoutItems(self):
# Test an example policy without items.
policy = {
'name': '_policy_name',
'caption': '_policy_caption',
'desc': 'This is a long policy caption. More than one sentence '
'in a single line because it is very important.\n'
'Second line, also important'
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(), '<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">This is a long policy caption. More '
'than one sentence in a single line because it is very '
'important.\nSecond line, also important'
'</string>'
'</resources>')
def testPolicyWithItems(self):
# Test an example policy without items.
policy = {
'name':
'_policy_name',
'caption':
'_policy_caption',
'desc':
'_policy_desc_first.\nadditional line',
'items': [{
'caption': '_caption1',
'value': '_value1',
}, {
'caption': '_caption2',
'value': '_value2',
},
{
'caption': '_caption3',
'value': '_value3',
'supported_on': [{
'platform': 'win'
}, {
'platform': 'win7'
}]
},
{
'caption':
'_caption4',
'value':
'_value4',
'supported_on': [{
'platform': 'android'
}, {
'platform': 'win7'
}]
}]
}
writer = android_policy_writer.GetWriter({})
writer.Init()
writer.BeginTemplate()
writer.WritePolicy(policy)
self.assertEquals(
writer._resources.toxml(), '<resources>'
'<string name="_policy_nameTitle">_policy_caption</string>'
'<string name="_policy_nameDesc">_policy_desc_first.\n'
'additional line</string>'
'<string-array name="_policy_nameEntries">'
'<item>_caption1</item>'
'<item>_caption2</item>'
'<item>_caption4</item>'
'</string-array>'
'<string-array name="_policy_nameValues">'
'<item>_value1</item>'
'<item>_value2</item>'
'<item>_value4</item>'
'</string-array>'
'</resources>')
if __name__ == '__main__':
unittest.main()
|
dirtchild/weatherPi
|
weatherSensors/windDirection.py
|
Python
|
gpl-3.0
| 2,252
| 0.030195
|
#!/usr/bin/env python
# reads data from wind direction thingy (see README)
# labels follow those set out in the Wunderground PWS API:
# http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol
#
# SOURCES:
# RETURNS: two objects for humidity and temperature
# CREATED: 2017-08-02
# ORIGINAL SOURCE: https://github.com/dirtchild/weatherPi [please do not remove this line]
# MODIFIED: see https://github.com/dirtchild/weatherPi
from SensorData import SensorReading
import time
import Adafruit_ADS1x15
import convertors
import windDirection
import sys
sys.path.append("../")
from conf
|
ig import *
def getReading():
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 16
# the channel on the ADC to use
CHANNEL = 0
# Create an A
|
DS1115 ADC (16-bit) instance and do stuff with it
adc = Adafruit_ADS1x15.ADS1115()
adc.start_adc(CHANNEL, gain=GAIN)
start = time.time()
value = 0
totalVoltage = 0
cnt = 0
#DEBUG
#print("[PRE]adc.get_last_result()[",adc.get_last_result(),"]")
while (time.time() - start) <= 5.0:
# will sometimes give negative results
thisRead = -1
while thisRead < 1:
thisRead = adc.get_last_result()
#DEBUG: finding they are about a decimal place out
#DEBUG: hacky
#DEBUG
#print(cnt,": thisRead[",thisRead,"]")
totalVoltage += thisRead / 10 #DEBUG: /10 to get it into a measurable range. this is bad and wrong
cnt += 1
time.sleep(0.5)
#DEBUG
#print("[POST]adc.get_last_result()[",adc.get_last_result(),"]")
# Stop continuous conversion. After this point you can't get data from get_last_result!
adc.stop_adc()
avgVoltage = totalVoltage / cnt
#DEBUG
#print("avgVoltage[",avgVoltage,"] = totalVoltage[",totalVoltage,"] / cnt[",cnt,"] (G:[",GAIN,"] C:[",CHANNEL,"])")
return(SensorReading("winddir", "winddir", convertors.voltToDeg(avgVoltage,WIND_READ_VOLT,WIND_DIR_MOUNT_ADJ), "degree angle"))
# for testing
def main():
print(windDirection.getReading())
if __name__ == "__main__": main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.