repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
luca-heltai/ePICURE
applications/lib/progress_bar.py
Python
gpl-2.0
1,682
0.030321
from lib.font import * import sys import fcntl import termios import struct class progress_bar(object): def __init__(self, tot=100, lenght=10): self.cp='/-\|' self.bar_lenght = lenght self.tot = tot def startprogress(self, title): """Creates a progress bar 40 chars long on the console and moves cursor back to beginning with BS character""" sys.stdout.write(title + ": [" + "-" * self.bar_lenght +
"]" + chr(8) * (self.bar_lenght+1)) sys.stdout.flush() def progress(self, x): """Sets progress bar to a certain percentage x. Progress is given as whole percentage, i.e. 50% done is given by x = 50""" y = int(x)%4 z = int((x/float(self.tot))*self.bar_lenght
) sys.stdout.write("#" * z + self.cp[y] +"-" * (self.bar_lenght-1 - z) + "] "+ bold(str(int(x))+"/"+str(self.tot)) + chr(8) * (self.bar_lenght+4+len(str(int(x)))+len(str(self.tot)) )) sys.stdout.flush() def endprogress(self): """End of progress bar; Write full bar, then move to next line""" sys.stdout.write("#" * self.bar_lenght + "]\n") sys.stdout.flush() class all_line_progress_bar(object): def __init__(self): self.COLS = struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234'))[1] def progress(self,current, total): prefix = '%d / %d' % (current, total) bar_start = ' [' bar_end = '] ' bar_size = self.COLS - len(prefix + bar_start + bar_end) amount = int(current / (total / float(bar_size))) remain = bar_size - amount bar = '#' * amount + ' ' * remain return bold(prefix) + bar_start + bar + bar_end def bar(self, current, total): sys.stdout.write(self.progress(current,total) + '\r') sys.stdout.flush()
regmi/codenode-unr
codenode/external/jsonrpc/types.py
Python
bsd-3-clause
1,860
0.013978
def _types_gen(T): yield T if hasattr(T, 't'): for l in T.t: yield l if hasattr(l, 't'): for ll in _types_gen(l): yield ll class Type(type): """ A rudimentary extension to `type` that provides polymorphic types for run-time type checking of JSON data types. IE: assert type(u'') == String assert type('') == String assert type('') == Any assert Any.kind('') == String assert Any.decode('str') == String assert Any.kind({}) == Object """ def __init__(self, *args, **kwargs): type.__init__(self, *args, **kwargs) def __eq__(self, other): for T in _types_gen(self): if isinstance(other, Type): if T in other.t: return True if type.__eq__(T, other): return True return False def __str__(self): return getattr(self, '_name', 'unknown') def N(self, n): self._name = n return self def I(self, *args): self.t = list(args) return self def kind(self, t): if type(t) is Type: return t ty = lambda t: type(t) if type(t) is type: ty = lambda t: t return reduce( lambda L, R: R if (hasattr(R, 't') and ty(t) == R) else L, filter(lambda T: T is not Any, _types_gen(self))) def decode(self, n): return reduce( lambda L, R: R if (str(R) == n) else L, _types_gen(self)) # JSON primatives and data types Object = Type('Object', (object,), {}).I(dict).N('obj') Number = Type('Number', (object,), {}).I(int, long).N('num') Boolean
= Type('Boolean', (object,), {}).I(bool).N('bit') String = Type('String', (object,), {}).I(str, unicode).N('str') Array = Type('Array', (object,), {}).I(list, set, tuple).N('arr') Nil = Type('Nil', (obje
ct,), {}).I(type(None)).N('nil') Any = Type('Any', (object,), {}).I( Object, Number, Boolean, String, Array, Nil).N('any')
pwong-mapr/private-hue
apps/sqoop/src/sqoop/api/job.py
Python
apache-2.0
8,088
0.011375
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, so
ftware # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under th
e License. try: import json except ImportError: import simplejson as json import logging import socket from django.http import HttpResponse from django.utils.translation import ugettext as _ from sqoop import client, conf from sqoop.client.exception import SqoopException from decorators import get_job_or_exception from desktop.lib.exceptions import StructuredException from desktop.lib.rest.http_client import RestException from exception import handle_rest_exception from utils import list_to_dict from django.views.decorators.cache import never_cache __all__ = ['get_jobs', 'create_job', 'update_job', 'job', 'jobs', 'job_clone', 'job_delete', 'job_start', 'job_stop', 'job_status'] LOG = logging.getLogger(__name__) @never_cache def get_jobs(request): response = { 'status': 0, 'errors': None, 'jobs': [] } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) jobs = c.get_jobs() response['jobs'] = list_to_dict(jobs) except RestException, e: response.update(handle_rest_exception(e, _('Could not get jobs.'))) return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def create_job(request): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } if 'job' not in request.POST: raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400) d = json.loads(request.POST['job']) job = client.Job.from_dict(d) try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.create_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not create job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def update_job(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } if 'job' not in request.POST: raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving job'), data={'errors': 'job is missing.'}, error_code=400) job.update_from_dict(json.loads(request.POST['job'])) try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.update_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not update job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache def jobs(request): if request.method == 'GET': return get_jobs(request) elif request.method == 'POST': return create_job(request) else: raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405) @never_cache @get_job_or_exception() def job(request, job): response = { 'status': 0, 'errors': None, 'job': None } if request.method == 'GET': response['job'] = job.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") elif request.method == 'POST': return update_job(request, job) else: raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405) @never_cache @get_job_or_exception() def job_clone(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } job.id = -1 job.name = '%s-copy' % job.name try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['job'] = c.create_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not clone job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_delete(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'job': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) c.delete_job(job) except RestException, e: response.update(handle_rest_exception(e, _('Could not delete job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_start(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.start_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not start job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = [e.to_dict()] return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_stop(request, job): if request.method != 'POST': raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.stop_job(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not stop job.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json") @never_cache @get_job_or_exception() def job_status(request, job): if request.method != 'GET': raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405) response = { 'status': 0, 'errors': None, 'submission': None } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['submission'] = c.get_job_status(job).to_dict() except RestException, e: response.update(handle_rest_exception(e, _('Could not get job status.'))) except SqoopException, e: response['status'] = 100 response['errors'] = e.to_dict() return HttpResponse(json.dumps(response), mimetype="application/json")
Darkade/udlap
6to/simplex.py
Python
apache-2.0
14,976
0.049195
#!/usr/bin/env python # -*- coding: latin-1 -*- import sys #################### <FUNCIONES> #################################################################### ##<Obtener el vector C>################################################################################################### def GetVectorC(eq): C=[] j=0 for i in range(len(eq)): #Recorre toda la cadena de la restriccion if eq[i]=="+" or eq[i]=="-": #Indicador de que se ha encontrado un signo de adicion,_ #es decir acontinuacion hay un coeficiente j=i if eq[i]=="X" or eq[i]=="x": #Agrega al vector y el coeficiente que se encontro C.append(float(eq[j:i])) j=i return C ##</Obtener el vector C>################################################################################################### ##<Encontrar Zj-Cj>################################################################################################### def GetZjCj(A,C,Cb): ZC=[] #Inicializa un vector ZC que contendra los Zj - Cj for i in range(len(C)): #para cada valor en el vector C (al que ya se agregaron las A's y las h's) ZjCj=0 #inicializa una sumatoria for j in range(len(A)): #Multiplica Cb por Yi y resta el Ci correspondiente ZjCj=ZjCj + float(A[j][i]*Cb[j]) ZjCj=ZjCj-C[i] ZC.append(ZjCj) #agrega el resultado y obtenemos nuestro Zj-Cj inicial return ZC #regresa el vector Z-C ##</Encontrar Zj-Cj>################################################################################################### ##<Encontrar Vector Cb>################################################################################################### def GetCB(C,ColBase): Cb=[] #este vector contendra las posiciones for NColBase in ColBase : #en el vector ColBase estan las posiciones de los ei. for i in range(len(C)): #y para cada una de esas posiciones de la columna vamos a if i==NColBase : #recorrer el vector C Cb.append(C[i]) #si estamos en la posicion de e1, esa se agrega primero a CB y asi return Cb #sucesivamente ##</Encontrar Vector Cb>################################################################################################### ##<Creación de la matriz A>################################################################################################### def MatrixA(restricciones,C,M,min_max): A=[] XB=[] D=[] ##agrega a la matrix A los valores de los coeficientes de las retriscciones for rest in restricciones : y=[] j=0 for i in range(len(rest)): #Recorre toda la cadena de la restriccion if rest[i]=="+" or rest[i]=="-": #Indicador de que se ha encontrado un signo de adicion, es decir acontinuacion hay un coeficiente j=i if rest[i]=="X" or rest[i]=="x": #Agrega al vector y el coeficiente que se encontro y.append(float(rest[j:i])) j=i if rest[i]=="<" or rest[i]==">" or rest[i]=="=" : D.append(rest[i:i+2]) #agrega a D la restriccion para agregar las H, A XB.append(float(rest[i+2:])) #Hace arreglo con los valores de las restricciones break if y != [] : A.append(y) ##Agrega a A los coeficientes de las variables de holgura for i in range(len(D)): #Recorre las direcciones de las restricciones if D[i] == "<=": for j in range(len(D)): #Recorre las ecuaciones if j ==i : #Si es la ecuacion correspondiente a la restriccion A[j].append(1) #apendisa 1 C.append(0) else : A[j].append(0) #Otro caso apendiza 0 elif D[i] == ">=": #Análogo for j in range(len(D)): if j == i : A[j].append(-1) C.append(0) else : A[j].append(0) ##Agrega a A los coeficientes de las variables ARTIFICIALES for i in range(len(D)): #Recorre las direcciones de las restricciones if D[i] == "==": for j in range(len(D)): #Recorre las ecuaciones if j ==i : #Si es la ecuacion correspondiente a la restriccion A[j].append(1) #apendisa 1 if min_max == "M" : C.append( -1 * M ) else : C.append( M ) else : A[j].append(0) #Otro caso apendiza 0 elif D[i] == ">=": #Análogo for j in range(len(D)): if j == i : A[j].append(1) if min_max == "M" : C.append( -1 * M ) else : C.append( M ) else : A[j].append(0) return A, XB, C ##</Creacion de la matrix A>###################################################### ##<Imprimir una matrix>########################################################### def MatrixPrint(Matrix): MatrixFormat="" for i in range(len(Matrix)) : for j in range(len(Matrix[i])) : MatrixFormat = MatrixFormat + str(Matrix[i][j]) + ' ' MatrixFormat = MatrixFormat + "\n" return MatrixFormat ##</Imprimir una matrix>########################################################### ##<Imprimir un vector>########################################################### def VectorPrint(Matrix): MatrixFormat="[ " for i in range(len(Matrix)) : if i == (len(Matrix) - 1) : MatrixFormat = MatrixFormat + str(Matrix[i]) else : MatrixFormat = MatrixFormat + str(Matrix[i]) + ', ' return MatrixFormat + " ]" ##</Imprimir un vector>########################################################### ##<Identificar la Base>############################################################ def Base(A): B=[] #inicializa la matriz B. esta matriz contiene las ORDENADAS posiciones de la base canonica CB=[] #Contiene las posiciones de la base ordenadas de derecha a izquierda for j in range(len(A[1])-1,0,-1) : #Recorremos las columnas de la matriz a del final al inicio Bt=[] #Bt contiene la columna i-esima de la matriz A for i in range(len(A)) : #Asignacion de Bt Bt.append(A[i][j]) if Bt.count(1) == 1 : #si en Bt solo se encuentra un 1, es decir Bt=ei se agrega suposicion a la lista CB CB.append(j) if len(CB)>=len(A) : #Condicion de parada, si de derecha a izquierda hay mas vectores ei que los que break #la base canonica del problema contine sale deja de buscar vectores ei for i in range(len(A)): #Recorre los renglores de A for j in CB : #^solo en las columnas en las que se encontraron vectores ei if A[i][j] == 1 : #y ordena para tener la matriz canonica B.append(j) return B ##</Identificar la Base>############################################################ ##<Identificar variables de entrada y salida>############################################################ def EntradaSalida(A,ZC,XB,min_max) : entrada = 0 #iniciamos la entrada, es decir el valor j en cero salida = 0 if min_max == "M" : for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i if ZC[ i ] <= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0] entrada = i #si la posicion i es menor a la posicion anterior se reasigna la entrada else : for i in range(1,len(ZC)) : #recorre todo ZC empezando por la segunda posicion, variando i if ZC[ i ] >= ZC[ entrada ] : #compara i con la posicion de entrada,para la primera vuelta ZC[1] con ZC[0] entrada = i #si la posicion i es menor a la posicion anterior se reasigna for j in range(len(A)) : #protege de dividir por cero if A[ j ][ entrada ] > 0 : salida = j break for j in range(1,len(A)) : #analógo pero con la divicion de XB/Yij, cuando se encuentra el menor se leasigna a if A[ j ][ entrada ] > 0 : #Protege de dividir por cero if XB[ j ]/A[ j ][ entrada ] <= XB[ salida ]/A[ salida ][ entrada ] : salida = j return entrada, salida ##</Identificar variables de entrada y salida>#####
####################################################### ##<Calcular las ecuaciones de transformacion>######################################################
###### def Ecuaciones_Trans(A,XB,ZC,entrada,salida) : if wo == False : print "Entra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:" else : output.write("\n\nEntra: " + str(entrada) + " Sale: " +str(salida) +"\nYij:\n") Yij=[] ##Calcular Y###### for i in range(l
antoinecarme/pyaf
tests/artificial/transf_None/trend_LinearTrend/cycle_12/ar_/test_artificial_128_None_LinearTrend_12__20.py
Python
bsd-3-clause
262
0.087786
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_art
ificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_lengt
h = 12, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0);
google/flight-lab
controller/common/net.py
Python
apache-2.0
1,062
0.00565
# Copyright 2018 Flight Lab authors. # # Licensed under the Apache License, Version 2.0 (the
"License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distr
ibuted under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library for network related helpers.""" import socket def get_ip(): """Get primary IP (the one with a default route) of local machine. This works on both Linux and Windows platforms, and doesn't require working internet connection. """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable s.connect(('10.255.255.255', 1)) return s.getsockname()[0] except: return '127.0.0.1' finally: s.close()
petercable/mi-dataset
mi/dataset/parser/test/test_fuelcell_eng_dcl.py
Python
bsd-2-clause
9,992
0.001601
#!/usr/bin/env python """ @package mi.dataset.parser.test @file mi-dataset/mi/dataset/parser/test/test_fuelcell_eng_dcl.py @author Chris Goodrich @brief Test code for the fuelcell_eng_dcl parser Release notes: initial release """ __author__ = 'cgoodrich' import os from nose.plugins.attrib import attr from mi.core.exceptions import ConfigurationException from mi.dataset.dataset_parser import DataSetDriverConfigKeys from mi.dataset.driver.fuelcell_eng.dcl.resource import RESOURCE_PATH from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParser from mi.dataset.parser.fuelcell_eng_dcl import FuelCellEngDclParticleClassKey,\ FuelCellEngDclDataParticleRecovered,\ FuelCellEngDclDataParticleTelemetered from mi.dataset.test.test_parser import ParserUnitTestCase from mi.logging import log @attr('UNIT', group='mi') class FuelCellEngDclParserUnitTestCase(ParserUnitTestCase): """ fuelcell_eng_dcl Parser unit test suite """ def setUp(self): ParserUnitTestCase.setUp(self) self._recovered_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleRecovered } } self._telemetered_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: { FuelCellEngDclParticleClassKey.ENGINEERING_DATA_PARTICLE_CLASS: FuelCellEngDclDataParticleTelemetered } } self._incomplete_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None } self._bad_parser_config = { DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.fuelcell_eng_dcl', DataSetDriverConfigKeys.PARTICLE_CLASS: None, DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
} def test_simple(self): """ Read
file and verify that all expected particles can be read. Verify that the contents of the particles are correct. This is the happy path. """ log.debug('===== START TEST SIMPLE =====') num_particles_to_request = 25 num_expected_particles = 20 # Test the recovered version log.debug('------ RECOVERED ------') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) self.assert_particles(particles, 'recovered_20141207s.pwrsys.yml', RESOURCE_PATH) # Test the telemetered version log.debug('----- TELEMETERED -----') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._telemetered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) self.assert_particles(particles, 'telemetered_20141207s.pwrsys.yml', RESOURCE_PATH) log.debug('===== END TEST SIMPLE =====') def test_bigfile(self): """ Read file and verify that all expected particles can be read. Verify that the expected number of particles are produced. Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BIGFILE =====') num_particles_to_request = num_expected_particles = 870 with open(os.path.join(RESOURCE_PATH, '20141207.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BIGFILE =====') def test_bad_checksum(self): """ Read file and verify that all expected particles can be read. There are two lines with bad checksums in the file. The checksum after the colon is incorrect on lines 10 and 23 of the input file. Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BAD CHECKSUM =====') num_particles_to_request = num_expected_particles = 18 with open(os.path.join(RESOURCE_PATH, '20141207s_bcs.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BAD CHECKSUM =====') def test_badly_formed(self): """ Read file and verify that all expected particles can be read. Line 1 Improperly formatted - No particle generated Line 2 Improperly formatted - No particle generated Line 9 - Bad checksum - No particle generated No fuel cell data present on line 11 - No particle generated No fuel cell data present on line 12 - No particle generated No fuel cell data present on line 13 - No particle generated No fuel cell data present on line 14 - No particle generated No fuel cell data present on line 15 - No particle generated Line 20 - Bad checksum - No particle generated Line 24 Improperly formatted - No particle generated Line 26 Improperly formatted - No particle generated Line 27 Improperly formatted - No particle generated Line 28 Bad/Missing Timestamp - No particle generated Line 29 Bad/Missing Timestamp - No particle generated Line 30 No data found - No particle generated Line 31 No terminator found - No particle generated Line 32 Improper format - No particle generated Only one test is run as the content of the input files is the same for recovered or telemetered. """ log.debug('===== START TEST BADLY FORMED =====') num_particles_to_request = 33 num_expected_particles = 16 with open(os.path.join(RESOURCE_PATH, '20141207_badform.pwrsys.log'), 'rU') as file_handle: parser = FuelCellEngDclParser(self._recovered_parser_config, file_handle, self.exception_callback) particles = parser.get_records(num_particles_to_request) self.assertEquals(len(particles), num_expected_particles) log.debug('===== END TEST BADLY FORMED =====') def test_bad_configuration(self): """ Attempt to build a parser with a bad configuration. """ log.debug('===== START TEST BAD CONFIGURATION =====') with open(os.path.join(RESOURCE_PATH, '20141207s.pwrsys.log'), 'rU') as file_handle: with self.assertRaises(ConfigurationException): parser = FuelCellEngDclParser(self._bad_parser_config, file_handle,
benadida/helios-server
helios_auth/auth_systems/github.py
Python
apache-2.0
2,315
0.015983
""" Github Authentication """ import httplib2 from django.conf import settings from django.core.mail import send_mail from oauth2client.client import OAuth2WebServerFlow from helios_auth import utils # some parameters to indicate that status updating is not possible STATUS_UPDATES = False # display tweaks LOGIN_MESSAGE = "Log in with GitHub" def get_flow(redirect_url=None): return OAuth2WebServerFlow( client_id=settings.GH_CLIENT_ID, client_secret=settings.GH_CLIENT_SECRET, scope='read:user user:email', auth_uri="https://github.com/login/oauth/authorize", token_uri="https://github.com/login/oauth/access_token", redirect_uri=redirect_url, ) def get_auth_url(request, redirect_url): flow = get_flow(redirect_url) request.session['gh_redirect_uri'] = redirect_url return flow.step1_get_authorize_url() def get_user_info_after_auth(request): redirect_uri = request.session['gh_redirect_uri'] del request.session['gh_redirect_uri'] flow = get_flow(redirect_uri) if 'code' not in request.GET: return None code = request.GET['code'] credentials = flow.step2_exchange(code) http = httplib2.Http(".cache") http = credentials.authorize(http) (_, content) = http.request("https://api.github.com/user", "GET") response = utils.from_json(content.decode('utf-8')) user_id = response['login'] user_name = response['name'] (_, content) = http.request("https://api.github.com/user/emails", "GET") response = utils.from_json(content.decode('utf-8')) user_email = None for email in response: if email['verified'] and email['primary']: user_email = email['email'] break if not user_email: raise Exception("email address with GitHub not verified") return { 'type': 'github', 'user_id': user_id,
'name': '%s (%s)' % (user_id, user_name), 'info': {'email': user_email}, 'token': {}, } def do_logout(user): return None def update_status(token, message): pass def send_message(user_id, name, user_info, subject, body): send_mail(
subject, body, settings.SERVER_EMAIL, ["%s <%s>" % (user_id, user_info['email'])], fail_silently=False, ) def check_constraint(eligibility, user_info): pass # # Election Creation # def can_create_election(user_id, user_info): return True
DIRACGrid/VMDIRAC
VMDIRAC/Resources/Cloud/KeystoneClient.py
Python
gpl-3.0
9,771
0.007983
""" KeystoneClient class encapsulates the work with the keystone service interface """ from __future__ import print_function from __future__ import division from __future__ import absolute_import import requests from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.Core.Utilities.Time import fromString, dateTime __RCSID__ = '$Id$' class KeystoneClient(): """ """ def __init__(self, url, parameters): self.log = gLogger.getSubLogger("Keystone") self.url = url self.apiVersion = None if "v3" in url: self.apiVersion = 3 if "v2" in url: self.apiVersion = 2 if self.apiVersion is None: # Assume v2.0 self.apiVersion = 2 self.url = self.url.rstrip('/') + "/v2.0" self.parameters = parameters self.token = None self.expires = None self.project = self.parameters.get('Tenant', self.parameters.get('Project')) self.projectID = self.parameters.get('ProjectID') self.computeURL = None self.imageURL = None self.networkURL = None self.caPath = self.parameters.get('CAPath', True) self.valid = False result = self.initialize() if result['OK']: self.valid = True else: gLogger.error("Keystone initialization failed: %s" % result['Message']) def initialize(self): """ Initialize the Keystone object obtaining the corresponding token :return: S_OK/S_ERROR """ self.log.debug("Initializing for API version %d" % self.apiVersion) result = self.getToken() if not result['OK']: return result # If the tenant is not specified, try to get it and obtain the tenant specific token if not self.project: result = self.getTenants() if not result['OK']: return result if result['Value']: self.project, self.projectID = result['Value'][0] result = self.getToken(f
orce=True) if not result['OK']: return result return S_OK() def getToken(self, force=False): """Get the Keystone token :param force: flag to force getting the token if even there
is one in the cache :return: S_OK(token) or S_ERROR """ if self.token is not None and not force: if self.expires and (self.expires - dateTime()).seconds > 300: return S_OK(self.token) if self.apiVersion == 2: result = self.__getToken2() else: result = self.__getToken3() return result def __getToken2(self): """Get the Keystone token for the version v2 of the keystone service :return: S_OK(token) or S_ERROR """ user = self.parameters.get('User') password = self.parameters.get('Password') authArgs = {} if user and password: authDict = {'auth': {"passwordCredentials": {"username": user, "password": password} } } if self.project: authDict['auth']['tenantName'] = self.project elif self.parameters.get('Auth') == "voms": authDict = {'auth': {'voms': True}} if self.project: authDict['auth']['tenantName'] = self.project if self.parameters.get('Proxy'): authArgs['cert'] = self.parameters.get('Proxy') try: result = requests.post("%s/tokens" % self.url, headers={"Content-Type": "application/json"}, json=authDict, verify=self.caPath, **authArgs) except Exception as exc: return S_ERROR('Exception getting keystone token: %s' % str(exc)) output = result.json() if result.status_code in [400, 401]: message = "None" if 'error' in output: message = output['error'].get('message') return S_ERROR('Authorization error: %s' % message) self.token = str(output['access']['token']['id']) expires = fromString(str(output['access']['token']['expires']).replace('T', ' ').replace('Z', '')) issued = fromString(str(output['access']['token']['issued_at']).replace('T', ' ').replace('Z', '')) self.expires = dateTime() + (expires - issued) self.projectID = output['access']['token']['tenant']['id'] for endpoint in output['access']['serviceCatalog']: if endpoint['type'] == 'compute': self.computeURL = str(endpoint['endpoints'][0]['publicURL']) elif endpoint['type'] == 'image': self.imageURL = str(endpoint['endpoints'][0]['publicURL']) elif endpoint['type'] == 'network': self.networkURL = str(endpoint['endpoints'][0]['publicURL']) return S_OK(self.token) def __getToken3(self): """Get the Keystone token for the version v3 of the keystone service :return: S_OK(token) or S_ERROR """ domain = self.parameters.get('Domain', "Default") user = self.parameters.get('User') password = self.parameters.get('Password') appcred_file = self.parameters.get('Appcred') authDict = {} authArgs = {} if user and password: authDict = {'auth': {"identity": {"methods": ["password"], "password": {"user": {"name": user, "domain": {"name": domain}, "password": password } } } } } elif self.parameters.get('Auth') == "voms": authDict = {"auth": {"identity": {"methods": ["mapped"], "mapped": {'voms': True, 'identity_provider': 'egi.eu', "protocol": 'mapped'}}}} if self.parameters.get('Proxy'): authArgs['cert'] = self.parameters.get('Proxy') elif appcred_file: # The application credentials are stored in a file of the format: # id secret ac_fd = open(appcred_file, 'r') auth_info = ac_fd.read() auth_info = auth_info.strip() ac_id, ac_secret = auth_info.split(" ", 1) ac_fd.close() authDict = {'auth': {"identity": {"methods": ["application_credential"], "application_credential": {"id": ac_id, "secret": ac_secret}}}} else: return S_ERROR("No valid credentials provided") # appcred includes the project scope binding in the credential itself if self.project and not appcred_file: authDict['auth']['scope'] = {"project": {"domain": {"name": domain}, "name": self.project } } gLogger.debug('Request token with auth arguments: %s and body %s' % (str(authArgs), str(authDict))) url = "%s/auth/tokens" % self.url try: result = requests.post(url, headers={"Content-Type": "application/json", "Accept": "application/json", }, json=authDict, verify=self.caPath, **authArgs) except Exception as exc: return S_ERROR('Exception getting keystone token: %s' % str(exc)) if result.status_code not in [200, 201, 202, 203, 204]: return S_ERROR('Failed to get keystone token: %s' % result.text) try: self.token = result.headers['X-Subject-Token'] except Exception as exc: return S_ERROR('Failed to get keystone token: %s' % str(exc)) output = result.json() expires = fromString(str(output['token']['expires_at']).replace('T', ' ').replace('Z', '')) issued = fromString(str(output['token']['issued_at']).replace('T', ' ').replace('Z', '')) self.expires = dateTime() + (expires - issued) if 'project' in output['token']: if output['token']['project']['name'] == self.projec
bwalks/pymemcache
pymemcache/test/test_client_hash.py
Python
apache-2.0
7,403
0
from pymemcache.client.hash import HashClient from pymemcache.client.base import Client, PooledClient from pymemcache.exceptions import MemcacheError, MemcacheUnknownError from pymemcache import pool from .test_client import ClientTestMixin, MockSocket import unittest import pytest import mock import socket class TestHashClient(ClientTestMixin, unittest.TestCase): def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs): mock_client = Client(hostname, serializer=serializer, **kwargs) mock_client.sock = MockSocket(mock_socket_values) client = PooledClient(hostname, serializer=serializer) client.client_pool = pool.ObjectPool(lambda: mock_client) return mock_client def make_client(self, *mock_socket_values, **kwargs): current_port = 11012 client = HashClient([], **kwargs) ip = '127.0.0.1' for vals in mock_socket_values: s = '%s:%s' % (ip, current_port) c = self.make_client_pool( (ip, current_port), vals, **kwargs ) client.clients[s] = c client.hasher.add_node(s) current_port += 1 return client def test_setup_client_without_pooling(self): with mock.patch('pymemcache.client.hash.Client') as internal_client: client = HashClient([], timeout=999, key_prefix='foo_bar_baz') client.add_server('127.0.0.1', '11211') assert internal_client.call_args[0][0] == ('127.0.0.1', '11211') kwargs = internal_client.call_args[1] assert kwargs['timeout'] == 999 assert kwargs['key_prefix'] == 'foo_bar_baz' def test_get_many_all_found(self): client = self.make_client(*[ [b'STORED\r\n', b'VALUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients result = client.set(b'key1', b'value1', noreply=False) result = client.set(b'key3', b'value2', noreply=False) result = client.get_many([b'key1', b'key3']) assert result == {b'key1': b'value1', b'key3': b'value2'} def test_get_many_some_found(self): client = self.make_client(*[ [b'END\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients result = client.set(b'key1', b'value1', noreply=False) result = client.get_many([b'key1', b'key3']) assert result == {b'key1': b'value1'} def test_get_many_bad_server_data(self): client = self.make_client(*[ [b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients with pytest.raises(MemcacheUnknownError): client.set(b'key1', b'value1', noreply=False) client.set(b'key3', b'value2', noreply=False) client.get_many([b'key1', b'key3']) def test_get_many_bad_server_data_ignore(self): client = self.make_client(*[ [b'STORED\r\n', b'VAXLUE key3 0 6\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VAXLUE key1 0 6\r\nvalue1\r\nEND\r\n', ], ], ignore_exc=True) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients client.set(b'key1', b'value1', noreply=False) client.set(b'key3', b'value2', noreply=False) result = client.get_many([b
'key1', b'key3']) assert result == {} def test_gets_many(self): client = self.make_client(*[ [b'STORED\r\n', b'VALUE key3 0 6 1\r\nvalue2\r\nEND\r\n', ], [b'STORED\r\n', b'VALUE key1 0 6 1\r\nvalue1\r\nEND\r\n', ], ]) def get_clients(key): if key == b'key3': return client.clients['127.0.0.1:11012'] else: return client.clients['127.0.0.1:11013'] client._get_client = get_clients
assert client.set(b'key1', b'value1', noreply=False) is True assert client.set(b'key3', b'value2', noreply=False) is True result = client.gets_many([b'key1', b'key3']) assert (result == {b'key1': (b'value1', b'1'), b'key3': (b'value2', b'1')}) def test_no_servers_left(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) hashed_client = client._get_client('foo') assert hashed_client is None def test_no_servers_left_raise_exception(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=False, timeout=1, connect_timeout=1 ) with pytest.raises(MemcacheError) as e: client._get_client('foo') assert str(e.value) == 'All servers seem to be down right now' def test_unavailable_servers_zero_retry_raise_exception(self): from pymemcache.client.hash import HashClient client = HashClient( [('example.com', 11211)], use_pooling=True, ignore_exc=False, retry_attempts=0, timeout=1, connect_timeout=1 ) with pytest.raises(socket.error): client.get('foo') def test_no_servers_left_with_commands_return_default_value(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.get('foo') assert result is None result = client.set('foo', 'bar') assert result is False def test_no_servers_left_with_set_many(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.set_many({'foo': 'bar'}) assert result is False def test_no_servers_left_with_get_many(self): from pymemcache.client.hash import HashClient client = HashClient( [], use_pooling=True, ignore_exc=True, timeout=1, connect_timeout=1 ) result = client.get_many(['foo', 'bar']) assert result == {'foo': False, 'bar': False} # TODO: Test failover logic
bitmazk/django-people
people/south_migrations/0005_auto__add_field_persontranslation_roman_first_name__add_field_persontr.py
Python
mit
15,072
0.007564
# flake8: noqa # -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'PersonTranslation.roman_first_name' db.add_column('people_persontranslation', 'roman_first_name', self.gf('django.db.models.fields.CharField')(default=' ', max_length=256), keep_default=False) # Adding field 'PersonTranslation.roman_last_name' db.add_column('people_persontranslation', 'roman_last_name', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True), keep_default=False) # Adding field 'PersonTranslation.non_roman_first_name' db.add_column('people_persontranslation', 'non_roman_first_name', self.gf('django.db.models.fields.CharField')(default=' ', max_length=256), keep_default=False) # Adding field 'PersonTranslation.non_roman_last_name' db.add_column('people_persontranslation', 'non_roman_last_name', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'PersonTranslation.roman_first_name' db.delete_column('people_persontranslation', 'roman_first_name') # Deleting field 'PersonTranslation.roman_last_name' db.delete_column('people_persontranslation', 'roman_last_name') # Deleting field 'PersonTranslation.non_roman_first_name' db.delete_column('people_persontranslation', 'non_roman_first_name') # Deleting field 'PersonTranslation.non_roman_last_name' db.delete_column('people_persontranslation', 'non_roman_last_name') models
= { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 9, 0, 0)'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}), 'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}), 'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}), 'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}), 'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}) }, 'cms.placeholder': { 'Meta': {'object_name': 'Placeholder'}, 'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'filer.file': { 'Meta': {'object_name': 'File'}, '_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}), 'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 't
Azulinho/sunflower-file-manager-with-tmsu-tagging-support
application/plugin_base/mount_manager_extension.py
Python
gpl-3.0
1,433
0.032798
import gtk class ExtensionFeatures: SYSTEM_WIDE = 0 class MountManagerExtension: """Base class for mount manager extensions. Mount manager has only one instance and is created on program startup. Methods defined in this class are called automatically by the mount manager so you need to implement them. """ # features extension supports features = () def __init__(self, parent, window): self._parent = parent self._window = window self._application = self._parent._application # create us
er interface self._container = gtk.VBox(False, 5) self._controls = gtk.HBox(False, 5) separator = gtk.HSeparator() # pack interface self._container.pack_end(separator, False, False, 0) self._container.pack_end(self._controls, False, False, 0) def can_handle(
self, uri): """Returns boolean denoting if specified URI can be handled by this extension""" return False def get_container(self): """Return container widget""" return self._container def get_information(self): """Returns information about extension""" icon = None name = None return icon, name def unmount(self, uri): """Method called by the mount manager for unmounting the selected URI""" pass def focus_object(self): """Method called by the mount manager for focusing main object""" pass @classmethod def get_features(cls): """Returns set of features supported by extension""" return cls.features
DeltaEpsilon-HackFMI2/FMICalendar-REST
venv/lib/python2.7/site-packages/rest_framework/mixins.py
Python
mit
6,556
0.000915
""" Basic building blocks for generic class based views. We don't bind behaviour to http method handlers yet, which allows mixin classes to be composed in interesting ways. """ from __future__ import unicode_literals from django.http import Http404 from rest_framework import status from rest_framework.response import Response from rest_framework.request import clone_request import warnings def _get_validation_exclusions(obj, pk=None, slug_field=None, lookup_field=None): """ Given a model instance, and an optional pk and slug field, return the full list of all other field names on that model. For use when performing full_clean on a model instance, so we only clean the required fields. """ include = [] if pk: # Pending deprecation pk_field = obj._meta.pk while pk_field.rel: pk_field = pk_field.rel.to._meta.pk include.append(pk_field.name) if slug_field: # Pending deprecation include.append(slug_field) if lookup_field and lookup_field != 'pk': include.append(lookup_field) return [field.name for field in obj._meta.fields if field.name not in include] class CreateModelMixin(object): """ Create a model instance. """ def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.DATA, files=request.FILES) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(force_insert=True) self.post_save(self.object, created=True) headers = self.get_success_headers(serializer.data) return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) def get_success_headers(self, data): try: return {'Location': data['url']} except (TypeError, KeyError): return {} class ListModelMixin(object): """ List a queryset. """ empty_error = "Empty list and '%(class_name)s.allow_empty' is False." def list(self, request, *args, **kwargs): self.object_list = self.filter_queryset(self.get_queryset()) # Default is to allow empty querysets. This can be altered by setting # `.allow_empty = False`, to raise 404 errors on empty querysets. if not self.allow_empty and not self.object_list: warnings.warn( 'The `allow_empty` parameter is due to be deprecated. ' 'To use `allow_empty=False` style behavior, You should override ' '`get_queryset()` and explicitly raise a 404 on empty querysets.', PendingDeprecationWarning ) class_name = self.__class__.__name__ error_msg = self.empty_error % {'class_name': class_name} raise Http404(error_msg) # Switch between paginated or standard style responses page = self.paginate_queryset(self.object_list) if page is not None: serializer = self.get_pagination_serializer(page) else: serializer = self.get_serializer(self.object_list, many=True) return Response(serializer.data) class RetrieveModelMixin(object): """ Retrieve a model instance. """ def retrieve(self, request, *args, **kwargs): self.object = self.get_object() serializer = self.get_serializer(self.object) return Response(serializer.data) class UpdateModelMixin(object): """ Update a model instance. """ def update(self, request, *args, **kwargs): partial = kwargs.pop('partial', False) self.object = self.get_object_or_none() if self.object is None: created = True save_kwargs = {'force_insert': True} success_status_code = status.HTTP_201_CREATED else: created = False save_kwargs = {'force_update': True} success_status_code = status.HTTP_200_OK serializer = self.get_serializer(self.object, data=request.DATA, files=request.FILES, partial=partial) if serializer.is_valid(): self.pre_save(serializer.object) self.object = serializer.save(**save_kwargs) self.post_save(self.object, created=created) return Response(serializer.data, s
tatus=success_status_code) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, *args, **kwargs): kwargs['partial'] = True return self.update(request, *args, **kwargs) def get_object_or_none(self): try: return self.get_object() except Http404: if self.request.method == 'PUT': # For PUT-as-create operation, we need to ensure that we have # relevant permissions, as if this was a POST request. This # will either raise a PermissionDenied exception, or simply # return None. self.check_permissions(clone_request(self.request, 'POST')) else: # PATCH requests where the object does not exist should still # return a 404 response. raise def pre_save(self, obj): """ Set any attributes on the object that are implicit in the request. """ # pk and/or slug attributes are implicit in the URL. lookup = self.kwargs.get(self.lookup_field, None) pk = self.kwargs.get(self.pk_url_kwarg, None) slug = self.kwargs.get(self.slug_url_kwarg, None) slug_field = slug and self.slug_field or None if lookup: setattr(obj, self.lookup_field, lookup) if pk: setattr(obj, 'pk', pk) if slug: setattr(obj, slug_field, slug) # Ensure we clean the attributes so that we don't eg return integer # pk using a string representation, as provided by the url conf kwarg. if hasattr(obj, 'full_clean'): exclude = _get_validation_exclusions(obj, pk, slug_field, self.lookup_field) obj.full_clean(exclude) class DestroyModelMixin(object): """ Destroy a model instance. """ def destroy(self, request, *args, **kwargs): obj = self.get_object() obj.delete() return Response(status=status.HTTP_204_NO_CONTENT)
dakot/vilay-detect
vilay/detectors/FaceDetector.py
Python
gpl-3.0
1,782
0.014029
import cv2 import numpy as np import os from vilay.core.Descriptor import MediaTime, Shape from vilay.detectors.IDetector import IDetector from vilay.core.DescriptionScheme import DescriptionScheme class FaceDetector(IDetector): def getName(self): return "Face Detector" def initialize(self): # define haar-detector file print os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml' self.cascade = cv2.CascadeClassifier(os.getcwd() + '/vilay/detectors/FaceDetector/haarcascade_frontalface_default.xml') def detect(self, mediaTimes, tgtDS, film, rootDS, mainGUI):
for mediaTime in mediaTimes: for frameIdx in range(mediaTime.startTime, mediaTime.startTime + mediaTime.duration): actFrame = film.getFrame(frameIdx) # preprocessing actFrame = cv2.cvtColor(actFrame, cv2.cv.CV_BGR2GRAY) actFrame = cv2.equalizeHist(actFrame) # detect faces face
s = self.cascade.detectMultiScale(actFrame, 1.2, 3, 0, (5,5)) # create ds and add time and shape descriptor for faceIdx in range(len(faces)): [x,y,width,height] = faces[faceIdx,:] ds = DescriptionScheme('RTI', 'Face Detector') region = Shape('Face Detector','rect', np.array([[x, y], [x + width, y + height]])) mediaTime = MediaTime('Face Detector', frameIdx, 1) tgtDS.addDescriptionScheme(ds) ds.addDescriptor(region) ds.addDescriptor(mediaTime)
SickGear/SickGear
tests/common_tests.py
Python
gpl-3.0
235,694
0.007259
import unittest import warnings warnings.filterwarnings('ignore', module=r'.*fuz.*', message='.*Sequence.*') import sys import os.path sys.path.insert(1, os.path.abspath('..')) from sickbeard import common from sickbeard.common import Quality, WantedQualities from sickbeard.name_parser.parser import NameParser from six import iteritems quality_tests = { common.Quality.SDTV: [ 'Test.Show.S01E02.PDTV.XViD-GROUP', 'Test.Show.S01E02.PDTV.x264-GROUP', 'Test.Show.S01E02.HDTV.XViD-GROUP', 'Test.Show.S01E02.HDTV.x264-GROUP', 'Test.Show.S01E02.DSR.XViD-GROUP', 'Test.Show.S01E02.DSR.x264-GROUP', 'Test.Show.S01E02.TVRip.XViD-GROUP', 'Test.Show.S01E02.TVRip.x264-GROUP', 'Test.Show.S01E02.WEBRip.XViD-GROUP', 'Test.Show.S01E02.WEBRip.x264-GROUP', 'Test.Show.S01E02.Web-Rip.x264.GROUP', 'Test.Show.S01E02.WEB-DL.x264-GROUP', 'Test.Show.S01E02.WEB-DL.AAC2.0.H.264-GROUP', 'Test.Show.S01E02 WEB-DL H 264-GROUP', 'Test.Show.S01E02_WEB-DL_H_264-GROUP', 'Test.Show.S01E02.WEB-DL.AAC2.0.H264-GROUP', 'Test.Show.S01E02.HDTV.AAC.2.0.x264-GROUP', 'Test.Show.S01E02.HDTV.DD5.1.XViD-GROUP', 'Test.Show.S01E02.HDTV.DD7.1.h.264-GROUP', 'Test.Show.S01E02.WEB-DL.DD5.1.h.264-GROUP', 'Test.Show.S01E02.WEB.h264-GROUP', 'Test.Show.S01E02.WEB.x264-GROUP', 'Test.Show.S01E02.WEB.h265-GROUP', 'Test.Show.S01E02.WEB.x265-GROUP', 'Test.Show.S01E02.WEB.VP9-GROUP', 'Test.Show.S01E02.WEB.AV1-GROUP', 'Test.Show.S01E02.WEBRip.h264-GROUP', 'Test.Show.S01E02.WEBRip.x264-GROUP'], common.Quality.SDDVD: [ 'Test.Show.S01E02.DVDRiP.XViD-GROUP', 'Test.Show.S01E02.DVDRiP.DiVX-GROUP', 'Test.Show.S01E02.DVDRiP.x264-GROUP', 'Test.Show.S01E02.DVDRip.WS.XViD-GROUP', 'Test.Show.S01E02.DVDRip.WS.DiVX-GROUP', 'Test.Show.S01E02.DVDRip.WS.x264-GROUP', 'Test.Show-S01E02-Test.Dvd Rip', 'Test.Show.S01E02.BDRIP.XViD-GROUP', 'Test.Show.S01E02.BDRIP.DiVX-GROUP', 'Test.Show.S01E02.BDRIP.x264-GROUP', 'Test.Show.S01E02.BDRIP.VP9-GROUP', 'Test.Show.S01E02.BDRIP.WS.XViD-GROUP', 'Test.Show.S01E02.BDRIP.WS.DiVX-GROUP', 'Test.Show.S01E02.BDRIP.WS.x264-GROUP'], common.Quality.HDTV: [ 'Test.Show.S01E02.720p.HDTV.x264-GROUP', 'Test.Show.S01E02.720p.HDTV.VP9-GROUP', 'Test.Show.S01E02.HR.WS.PDTV.x264-GROUP', 'Test.Show.S01E02.720p.AHDTV.x264-GROUP'], common.Quality.RAWHDTV: [ 'Test.Show.S01E02.720p.HDTV.DD5.1.MPEG2-GROUP', 'Test.Show.S01E02.1080i.HDTV.DD2.0.MPEG2-GROUP', 'Test.Show.S01E02.1080i.HDTV.H.264.DD2.0-GROUP', 'Test Show - S01E02 - 1080i HDTV MPA1.0 H.264 - GROUP', 'Test.Show.S01E02.1080i.HDTV.DD.5.1.h264-GROUP'], common.Quality.FULLHDTV: [ 'Test.Show.S01E02.1080p.HDTV.x264-GROUP', 'Test.Show.S01E02.1080p.HDTV.vp9-GROUP', 'Test.Show.S01E02.1080p.AHDTV.x264-GROUP'], common.Quality.HDWEBDL: [ 'Test.Show.S01E02.720p.WEB-DL-GROUP', 'Test.Show.S01E02.720p.WEBRip-GROUP', 'Test.Show.S01E02.WEBRip.720p.H.264.AAC.2.0-GROUP', 'Test.Show.S01E02.720p.WEB-DL.AAC2.0.H.264-GROUP', 'Test Show S01E02 720p WEB-DL AAC2 0 H 264-GROUP', 'Test_Show.S01E02_720p_WEB-DL_AAC2.0_H264-GROUP', 'Test.Show.S01E02.720p.WEB-DL.AAC2.0.H264-GROUP', 'Test.Show.S01E02.720p.iTunes.Rip.H264.AAC-GROUP', 'Test.Show.s01e02.WEBDL.720p.GROUP', 'Test Show s01e02 WEBDL 720p GROUP', 'Test Show S01E02 720p WEB-DL AVC-GROUP', 'Test.Show.S01E02.WEB-RIP.720p.GROUP', 'Test.Show.S01E02.720p.WEB.h264-GROUP', 'Test.Show.S01E02.720p.WEB.x264-GROUP', 'Test.Show.S01E02.720p.WEB.h265-GROUP', 'Test.Show.S01E02.720p.WEB.x265-GROUP', 'Test.Show.S01E02.720p.WEB.vp9-GROUP', 'Test.Show.S01E02.720p.WEBRip.h264-GROUP', 'Test.Show.S01E02.720p.WEBRip.x264-GROUP'], common.Quality.FULLHDWEBDL: [ 'Test.Show.S01E02.1080p.WEB-DL-GROUP', 'Test.Show.S01E02.1080p.WEBRip-GROUP', 'Test.Show.S01E02.WEBRip.1080p.H.264.AAC.2.0-GROUP', 'Test.Show.S01E02.WEBRip.1080p.H264.AAC.2.0-GROUP', 'Test.Show.S01E02.1080p.iTunes.H.264.AAC-GROUP', 'Test Show S01E02 1080p iTunes H 264 AAC-GROUP', 'Test_Show_S01E02_1080p_iTunes_H_264_AAC-GROUP', 'Test.Show.s01e02.WEBDL.1080p.GROUP', 'Test Show s01e02 WEBDL 1080p GROUP', 'Test Show S01E02 1080p WEB-DL AVC-GROUP', 'Test.Show.S01E02.WEB-RIP.1080p.GROUP', 'Test.Show.S01E02.1080p.WEB.h264-GROUP', 'Test.Show.S01E02.1080p.WEB.x264-GROUP', 'Test.Show.S01E02.1080p.WEB.h265-GROUP', 'Test.Show.S01E02.1080p.WEB.x265-GROUP', 'Test.Show.S01E02.1080p.WEB.VP9-GROUP', 'Test.Show.S01E02.1080p.WEBRip.h264-GROUP', 'Test.Show.S01E02.1080p.WEBRip.x264-GROUP'], common.Quality.HDBLURAY: [ 'Test.Show.S01E02.720p.BluRay.x264-GROUP', 'Test.Show.S01E02.720p.BluRay.vp9-GROUP', 'Test.Show.S01E02.720p.HDDVD.x264-GROUP', 'Test.Show.S01E02.720p.Blu-ray.x264-GROUP'], common.Quality.FULLHDBLURAY: [ 'Test.Show.S01E02.1080p.BluRay.x264-GROUP', 'Test.Show.S01E02.1080p.HDDVD.x264-GROUP', 'Test.Show.S01E02.1080p.Blu-ray.x264-GROUP', 'Test.Show.S01E02.1080p.Blu-ray.vp9-GROUP', 'Test Show S02 1080p Remux AVC FLAC 5.1'], common.Quality.UHD4KWEB: [ 'Test.Show.S01E02.2160p.WEBRip.h264-GROUP', 'Test.Show.S01E02.2160p.WEBRip.x264-GROUP', 'Test.Show.S01E02.2160p.WEBRip.x265-GROUP', 'Test.Show.S01E02.2160p.WEBRip.vp9-GROUP'], common.Quality.UNKNOWN: ['Test.Show.S01E02-SiCKGEAR'] } class QualityTests(unittest.TestCase): def check_quality_names(self, quality, cases): for fn in cases: second = common.Quality.nameQuality(fn) self.assertEqual(quality, second, msg='fail [%s] != [%s] for case: %s' % (Quality.qualityStrings[quality], Quality.qualityStrings[second], fn)) def check_proper_level(self, cases, is_anime=False): np = NameParser(False, indexer_lookup=False, testing=True) for case, level in cases: p = np.parse(case) second = common.Quality.get_proper_level(p.extra_info_no_name(), p.version, is_anime) self.assertEqual(level, second, 'fail %s != %s for case: %s' % (level, second, case)) def check_wantedquality_list(self, cases): for show_quality, result in cases: sq =
common.Quality.combine
Qualities(*show_quality) wd = common.WantedQualities() _ = wd.get_wantedlist(sq, False, common.Quality.NONE, common.UNAIRED, manual=True) for w, v in iteritems(wd): if w == sq: for u, o in sorted(iteritems(v)): self.assertEqual(o, result.get(u)) def check_wantedquality_get_wantedlist(self, cases): for show_quality, result in cases: sq = common.Quality.combineQualities(*show_quality) wd = common.WantedQualities() for case, wlist in result: ka = {'qualities': sq} ka.update(case) res = wd.get_wantedlist(**ka) self.assertEqual(res, wlist) def check_sceneQuality(self, cases): msg = 'Test case: "%s", actual: [%s] != expected: [%s]' for show_name, result in cases: sq = common.Quality.sceneQuality(show_name[0], show_name[1]) self.assertE
zentralopensource/zentral
zentral/core/events/utils.py
Python
apache-2.0
851
0
def decode_args(s, delimiter="|", escapechar="\\"): args = [] escaping = False current_arg = "" for c in s: if escaping: current_arg += c escaping = False elif c == escapechar: escaping = True elif c == delimiter: args.append(current_arg) current_arg = "" else: current_arg += c args.append(current_arg) return args def encode_args(args, delimiter="|", escapechar="
\\"): encoded_args = "" for idx, arg in enumerate(args): if idx > 0: encoded_args += delimiter if not isinstance(arg, str): arg = str(arg)
for c in arg: if c == delimiter or c == escapechar: encoded_args += escapechar encoded_args += c return encoded_args
i-tek/inet_ncs
simulations/analysis_tools/python/omnet_vector.py
Python
gpl-3.0
3,886
0.008492
# # Python module to parse OMNeT++ vector files # # Currently only suitable for small vector files since # everything is loaded into RAM # # Authors: Florian Kauer <[email protected]> # # Copyright (c) 2015, Institute of Telematics, Hamburg University of Technology # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the Institute nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import re import scipy.interpolate import numpy as np vectors = [] class OmnetVector: def __init__(self,file_input): self.vectors = {} self.dataTime = {} self.dataValues = {} self.maxtime = 0 self.attrs = {} for line in file_input: m = re.search("([0-9]+)\t([0-9]+)\t([0-9.e\-+]+)\t([0-9.e\-+na]+)",line) #m = re.search("([0-9]+)",line) if m: vector = int(m.group(1)) if not vector in self.dataTime: se
lf.dataTime[vector] = [] self.dataValues[vector] = [] time = float(m.group(3)) self.dataTime[vector].append(time) self.maxtime = max(self.maxtime,tim
e) self.dataValues[vector].append(float(m.group(4))) else: # vector 7 Net802154.host[0].ipApp[0] referenceChangeStat:vector ETV m = re.search("vector *([0-9]*) *([^ ]*) *(.*):vector",line) if m: number = int(m.group(1)) module = m.group(2) name = m.group(3) if not name in self.vectors: self.vectors[name] = {} self.vectors[name][module] = number else: m = re.search("attr ([^ ]*) ([^ ]*)\n",line) if m: self.attrs[m.group(1)] = m.group(2) def get_vector(self,name,module,resample=None): num = self.vectors[name][module] (time,values) = (self.dataTime[num],self.dataValues[num]) if resample != None: newpoints = np.arange(0,self.maxtime,resample) lastvalue = values[-1] return (newpoints, scipy.interpolate.interp1d(time,values,'zero',assume_sorted=True, bounds_error=False,fill_value=(0,lastvalue)).__call__(newpoints)) else: return (time,values) def get_attr(self,name): return self.attrs[name]
quanticle/GorillaBot
gorillabot/plugins/settings.py
Python
mit
5,087
0.009043
# Copyright (c) 2013-2016 Molly White # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software # and associated documentation files (the "Software"), to deal in the Software without # restriction, including without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from plugins.util import admin, command, humanize_list @admin("set") def setcommand(m): """Adjust or view the settings on a command.""" #- !set setting value [#channel] #- #- ```irc #- < GorillaWarfare> !set link auto #- < GorillaBot> "link" set to "auto" in ##GorillaBot. #- ``` #- #- Change settings for a command. Allowed and default settings for a command are viewable in #- the command's documentation. Settings can only be edited for channels the bot is joined #- to, or has been joined to in the past. if len(m.line) > 4: m.bot.private_message(m.location, 'Too many arguments. Use "!set setting value [#channel]".') return # Check that this channel is in our config if len(m.line) <= 3: chan = m.location elif len(m.line) == 4: if m.line[3][0] != "#": m.bot.private_message(m.location, 'Poorly-formatted command. ' 'Use "!set setting value [#channel]".') return chan = m.line[3] if not chan in m.bot.configuration["chans"]: m.bot.private_message(m.location, "Cannot access settings for {0}. Do I know about the channel?".format( chan)) return # Respond to command settings = m.bot.configuration["chans"][chan]["settings"] if len(m.line) == 1: # Query all settings for channel if not settings: m.bot.private_message(m.location, "Nothing has been set for {0}.".format(chan)) else: m.bot.private_message(m.location, (" ".join( map(lambda s: ('"{0}" is set to "{1}".'.format(s[0], s[1])), iter(settings.items()))))) elif len(m.line) == 2: # Query value of a setting in a channel if not settings or m.line[1] not in settings: m.bot.private_message(m.location, '"{0}" has not been set for {1}.'.format(m.line[1], chan)) else: m.bot.private_message(m.location, '"{0}" set to "{1}" in {2}.'.format(m.line[1], settings[m.line[1]], chan)) else: setting = m.line[1].lower() value = m.line[2].lower() m.bot.configuration["chans"][chan]["settings"][setting] = value m.bot.update_con
figuration(m.bot.configuration) m.bo
t.logger.info( '"{0}" set to "{1}" in {2} by {3}.'.format(setting, value, chan, m.sender)) m.bot.private_message(m.location, '"{0}" set to "{1}" in {2}.'.format(setting, value, chan)) @admin() def unset(m): """Unset a given setting.""" #- !unset setting [#channel] #- #- ```irc #- < GorillaWarfare> !unset link #- < GorillaBot> "link" unset for ##GorillaBot. #- ``` #- #- Removes the setting for a channel. This will revert to the default value. Settings can only #- be edited for channels the bot is joined to, or has been joined to in the past. if len(m.line) != 2 and not (len(m.line) == 3 and m.line[2][0] == "#"): m.bot.private_message(m.location, 'Poorly-formatted command. Use "!unset setting [#channel]".') return chan = m.location if len(m.line) == 2 else m.line[2] if chan not in m.bot.configuration["chans"]: m.bot.private_message(m.location, "Cannot unset setting for {0}. Do I know about the channel?".format( chan)) return try: del m.bot.configuration["chans"][chan]["settings"][m.line[1]] m.bot.update_configuration(m.bot.configuration) except KeyError: # Doesn't matter if the value wasn't set to begin with pass m.bot.private_message(m.location, '"{0}" unset for {1}.'.format(m.line[1], chan))
ilastik/ilastik-0.5
ilastik/modules/unsupervised_decomposition/core/testModule.py
Python
bsd-2-clause
11,269
0.011625
from PyQt4 import QtCore import sys from ilastik.core.projectClass import Project from ilastik.core.testThread import TestThread from ilastik.modules.unsupervised_decomposition.core.unsupervisedMgr import UnsupervisedDecompositionModuleMgr from ilastik.modules.unsupervised_decomposition.core.algorithms.unsupervisedDecompositionPLSA import UnsupervisedDecompositionPLSA from ilastik.modules.unsupervised_decomposition.core.algorithms.unsupervisedDecompositionPCA import UnsupervisedDecompositionPCA import unittest from ilastik.core import jobMachine from ilastik import __path__ as ilastikpath from ilastik.core.testThread import setUp, tearDown # make sure that we have a recent numpy installation, the SVD used for PCA decomposition seems to have changed, resulting in a test failure! import numpy numpyversion = numpy.__version__.split('.') numpyTooOldMessage = str("Your current numpy version is too old. Is: " + numpy.__version__ + " Should Be: 1.4.0 or newer. Skipping some tests.") numpyRecentEnough = False if((int(numpyversion[0]) >= 1) & (int(numpyversion[1]) >= 4) & (int(numpyversion[2]) >= 0)): numpyRecentEnough = True #******************************************************************************* # U n s u p e r v i s e d D e c o m p o s i t i o n T e s t P r o j e c t * #******************************************************************************* class UnsupervisedDecompositionTestProject(object): # this class is used to set up a default project which is then used for testing functionality, # hopefully, this will reduced code redundancy def __init__(self, image_filename, unsupervisedMethod = None, numComponents = None): self.image_filename = image_filename self.tolerance = 0.01 # maximum derivation per pixel self.testdir = ilastikpath[0] + "/testdata/unsupervised_decomposition/" # create project self.project = Project('Project Name', 'Labeler', 'Description') self.dataMgr = self.project.dataMgr # create file list and load data path = str(self.testdir + self.image_filename) # the image is not really used since we load the threshold overlay from a file, however, we need it to set the correct dimensions fileList = [] fileList.append(path) self.project.addFile(fileList) # create automatic segmentation manager self.unsupervisedMgr = UnsupervisedDecompositionModuleMgr(self.dataMgr) # setup inputs self.inputOverlays = [] self.inputOverlays.append(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Raw Data"]) # use default decomposer
if unsupervisedMethod is None: self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod
else: self.unsupervisedMethod = self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod = unsupervisedMethod if numComponents is not None: self.unsupervisedMethod.setNumberOfComponents(numComponents) self.numIterations = numComponents else: self.numIterations = self.unsupervisedMethod.numComponents # overlay lists and filenames self.listOfResultOverlays = [] self.listOfFilenames = [] for i in range(self.numIterations): self.listOfResultOverlays.append(str("Unsupervised/" + self.unsupervisedMethod.shortname + " component %d" % (i+1))) filename = str(self.testdir + "gt_" + self.unsupervisedMethod.shortname + "_result_component_%d.h5" % (i+1)) print filename self.listOfFilenames.append(filename) #******************************************************************************* # T e s t W h o l e M o d u l e D e f a u l t D e c o m p o s e r * #******************************************************************************* class TestWholeModuleDefaultDecomposer(unittest.TestCase): # use default decomposer if not numpyRecentEnough: __test__ = False def setUp(self): self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5") def test_WholeModule(self): t = QtCore.QTimer() t.setSingleShot(True) t.setInterval(0) self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction) t.start() self.app.exec_() def mainFunction(self): self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance) QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest) self.testThread.start(self.testProject.inputOverlays) self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) def finalizeTest(self): # results comparison self.assertEqual(self.testThread.passedTest, True) self.app.quit() #******************************************************************************* # T e s t W h o l e M o d u l e P C A D e c o m p o s e r * #******************************************************************************* class TestWholeModulePCADecomposer(unittest.TestCase): # use PCA decomposer with 3 components if not numpyRecentEnough: __test__ = False def setUp(self): #print "setUp" self.app = QtCore.QCoreApplication(sys.argv) # we need a QCoreApplication to run, otherwise the thread just gets killed self.numComponents = 3 self.testProject = UnsupervisedDecompositionTestProject("sims_aligned_s7_32.h5", UnsupervisedDecompositionPCA, self.numComponents) def test_WholeModule(self): t = QtCore.QTimer() t.setSingleShot(True) t.setInterval(0) self.app.connect(t, QtCore.SIGNAL('timeout()'), self.mainFunction) t.start() self.app.exec_() def mainFunction(self): self.testThread = TestThread(self.testProject.unsupervisedMgr, self.testProject.listOfResultOverlays, self.testProject.listOfFilenames, self.testProject.tolerance) QtCore.QObject.connect(self.testThread, QtCore.SIGNAL('done()'), self.finalizeTest) self.testThread.start(self.testProject.inputOverlays) self.numOverlaysBefore = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) def finalizeTest(self): '''for i in range(self.testProject.unsupervisedMethod.numComponents): print "*************************************" print self.testProject.listOfResultOverlays[i] obtained = self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr[self.testProject.listOfResultOverlays[i]] from ilastik.core import dataImpex dataImpex.DataImpex.exportOverlay(str("c:/gt_PCA_result_component_%d" % (i+1)), "h5", obtained)''' # results comparison self.assertEqual(self.testThread.passedTest, True) # other conditions # exactly self.numComponents computed overlays + self.numComponents ground truth overlays were added self.numOverlaysAfter = len(self.testProject.dataMgr[self.testProject.dataMgr._activeImageNumber].overlayMgr.keys()) self.assertEqual(self.numOverlaysAfter - self.numOverlaysBefore, self.numComponents*2) self.app.quit() #******************************************************************************* # T e s t W h o l e M o d u l e P L S A D e c o m p o s e r * #******************************************************************************* class TestWholeModulePLSADecomposer(unittest.TestCase): # pLSA with 5 components def setUp(self): #print "setUp"
letops/django-sendgrid-parse
django_sendgrid_parse/__init__.py
Python
mit
139
0
from django
.utils.translation import ugettext_lazy as _ugl default_app_config = 'django_sendgrid_parse.apps.Djang
oSendgridParseAppConfig'
kayhayen/Nuitka
nuitka/codegen/LineNumberCodes.py
Python
apache-2.0
2,550
0.000392
# Copyright 2021, Kay Hayen, mailto:[email protected] # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in
writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Generate code that updates the source code line. """
def getCurrentLineNumberCode(context): frame_handle = context.getFrameHandle() if frame_handle is None: return "" else: source_ref = context.getCurrentSourceCodeReference() if source_ref.isInternal(): return "" else: return str(source_ref.getLineNumber()) def getLineNumberUpdateCode(context): lineno_value = getCurrentLineNumberCode(context) if lineno_value: frame_handle = context.getFrameHandle() return "%s->m_frame.f_lineno = %s;" % (frame_handle, lineno_value) else: return "" def getErrorLineNumberUpdateCode(context): ( _exception_type, _exception_value, _exception_tb, exception_lineno, ) = context.variable_storage.getExceptionVariableDescriptions() lineno_value = getCurrentLineNumberCode(context) if lineno_value: return "%s = %s;" % (exception_lineno, lineno_value) else: return "" def emitErrorLineNumberUpdateCode(emit, context): update_code = getErrorLineNumberUpdateCode(context) if update_code: emit(update_code) def emitLineNumberUpdateCode(expression, emit, context): context.setCurrentSourceCodeReference(expression.getCompatibleSourceReference()) code = getLineNumberUpdateCode(context) if code: emit(code) def getSetLineNumberCodeRaw(to_name, emit, context): assert context.getFrameHandle() is not None emit("%s->m_frame.f_lineno = %s;" % (context.getFrameHandle(), to_name)) def getLineNumberCode(to_name, emit, context): assert context.getFrameHandle() is not None emit("%s = %s->m_frame.f_lineno;" % (to_name, context.getFrameHandle()))
edusegzy/pychemqt
lib/reaction.py
Python
gpl-3.0
9,477
0.001478
#!/usr/bin/python # -*- coding: utf-8 -*- ############################################################################### # Module to define chemical reaction functionality ############################################################################### from math import exp, log import sqlite3 from numpy import polyval from scipy.optimize import fsolve from PyQt4.QtGui import QApplication from lib import unidades from lib.sql import databank_name class Reaction(object): """Chemical reaction object""" status = 0 msg = QApplication.translate("pychemqt", "undefined") error = 0 kwargs = {"comp": [], "coef": [], "tipo": 0, "fase": 0, "key": 0, "base": 0, "customHr": False, "Hr": 0.0,
"formula": False, "conversion": None, "keq": None} kwargsValue = ("Hr",) kwargsList = ("tipo", "fase", "key", "base") kwargsCheck = ("customHr", "formula") calculateValue = ("DeltaP", "DeltaP_f", "DeltaP_ac", "DeltaP_h", "DeltaP_v", "DeltaP_100ft", "V", "f", "Re", "Tout") TEXT_TYPE = [QApplication.translate("pychemqt", "Estequiometric"), QApplication.transl
ate("pychemqt", "Equilibrium"), QApplication.translate("pychemqt", "Kinetic"), QApplication.translate("pychemqt", "Catalitic")] TEXT_PHASE = [QApplication.translate("pychemqt", "Global"), QApplication.translate("pychemqt", "Liquid"), QApplication.translate("pychemqt", "Gas")] TEXT_BASE = [QApplication.translate("pychemqt", "Mole"), QApplication.translate("pychemqt", "Mass"), QApplication.translate("pychemqt", "Partial pressure")] def __init__(self, **kwargs): """constructor, kwargs keys can be: comp: array with index of reaction components coef: array with stequiometric coefficient for each component fase: Phase where reaction work 0 - Global 1 - Liquid 2 - Gas key: Index of key component base 0 - Mol 1 - Mass 2 - Partial pressure Hr: Heat of reaction, calculate from heat of formation if no input formula: boolean to show compound names in formules tipo: Kind of reaction 0 - Stequiometric, without equilibrium or kinetic calculations 1 - Equilibrium, without kinetic calculation 2 - Equilibrium by minimization of Gibbs free energy 3 - Kinetic 4 - Catalytic conversion: conversion value for reaction with tipo=0 keq: equilibrium constant for reation with tipo=1 -it is float if it don't depend with temperature -it is array if it depends with temperature """ self.kwargs = Reaction.kwargs.copy() if kwargs: self.__call__(**kwargs) def __call__(self, **kwargs): oldkwargs = self.kwargs.copy() self.kwargs.update(kwargs) if oldkwargs != self.kwargs and self.isCalculable: self.calculo() @property def isCalculable(self): self.msg = "" self.status = 1 if not self.kwargs["comp"]: self.msg = QApplication.translate("pychemqt", "undefined components") self.status = 0 return if not self.kwargs["coef"]: self.msg = QApplication.translate("pychemqt", "undefined stequiometric") self.status = 0 return if self.kwargs["tipo"] == 0: if self.kwargs["conversion"] is None: self.msg = QApplication.translate("pychemqt", "undefined conversion") self.status = 3 elif self.kwargs["tipo"] == 1: if self.kwargs["keq"] is None: self.msg = QApplication.translate("pychemqt", "undefined equilibrium constants") self.status = 3 elif self.kwargs["tipo"] == 2: pass elif self.kwargs["tipo"] == 3: pass return True def calculo(self): self.componentes = self.kwargs["comp"] self.coef = self.kwargs["coef"] self.tipo = self.kwargs["tipo"] self.base = self.kwargs["base"] self.fase = self.kwargs["fase"] self.calor = self.kwargs["Hr"] self.formulas = self.kwargs["formula"] self.keq = self.kwargs["keq"] databank = sqlite3.connect(databank_name).cursor() databank.execute("select nombre, peso_molecular, formula, \ calor_formacion_gas from compuestos where id IN \ %s" % str(tuple(self.componentes))) nombre = [] peso_molecular = [] formula = [] calor_reaccion = 0 check_estequiometria = 0 for i, compuesto in enumerate(databank): nombre.append(compuesto[0]) peso_molecular.append(compuesto[1]) formula.append(compuesto[2]) calor_reaccion += compuesto[3]*self.coef[i] check_estequiometria += self.coef[i]*compuesto[1] self.nombre = nombre self.peso_molecular = peso_molecular self.formula = formula if self.calor: self.Hr = self.kwargs.get("Hr", 0) else: self.Hr = unidades.MolarEnthalpy(calor_reaccion/abs( self.coef[self.base]), "Jkmol") self.error = round(check_estequiometria, 1) self.state = self.error == 0 self.text = self._txt(self.formulas) def conversion(self, corriente, T): """Calculate reaction conversion corriente: Corriente instance for reaction T: Temperature of reaction""" if self.tipo == 0: # Material balance without equilibrium or kinetics considerations alfa = self.kwargs["conversion"] elif self.tipo == 1: # Chemical equilibrium without kinetics if isinstance(self.keq, list): A, B, C, D, E, F, G, H = self.keq keq = exp(A+B/T+C*log(T)+D*T+E*T**2+F*T**3+G*T**4+H*T**5) else: keq = self.keq def f(alfa): conc_out = [ (corriente.caudalunitariomolar[i]+alfa*self.coef[i]) / corriente.Q.m3h for i in range(len(self.componentes))] productorio = 1 for i in range(len(self.componentes)): productorio *= conc_out[i]**self.coef[i] return keq-productorio alfa = fsolve(f, 0.5) print alfa, f(alfa) avance = alfa*self.coef[self.base]*corriente.caudalunitariomolar[self.base] Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] / self.coef[self.base] for i in range(len(self.componentes))] minimo = min(Q_out) if minimo < 0: # The key component is not correct, redo the result indice = Q_out.index(minimo) avance = self.coef[indice]*corriente.caudalunitariomolar[indice] Q_out = [corriente.caudalunitariomolar[i]+avance*self.coef[i] / self.coef[indice] for i in range(len(self.componentes))] h = unidades.Power(self.Hr*self.coef[self.base] / self.coef[indice]*avance, "Jh") else: h = unidades.Power(self.Hr*avance, "Jh") print alfa, avance caudal = sum(Q_out) fraccion = [caudal_i/caudal for caudal_i in Q_out] return fraccion, h # def cinetica(self, tipo, Ko, Ei): # """Método que define la velocidad de reacción""" # # def _txt(self, nombre=False): """Function to get text representation for reaction""" if nombre: txt = self.nombre else: txt = self.formula reactivos = [] productos = []
nive/nive
nive/extensions/path.py
Python
gpl-3.0
8,830
0.008041
import unicodedata import re class PathExtension: """ Enables readable url path names instead of ids for object traversal. Names are stored as meta.pool_filename and generated from title by default. Automatic generation can be disabled by setting *meta.customfilename* to False for each object. Extensions like *.html* are not stored. Path matching works independent from extensions. """ maxlength = 55 # max path length containerNamespace = True # unique filenames for container or global extension = None def Init(self): if self.id == 0: # skip roots return self.ListenEvent("commit", "TitleToFilename") self._SetName() def TitleToFilename(self, **kw): """ Uses title for filename """ customfilename = self.data.get("customfilename", None) # might not exist if customfilename: self._SetName() return # create url compatible filename from title filename = self.EscapeFilename(self.meta.title) # make unique filename filename = self.UniqueFilename(filename) if self.AddExtension(filename) == self.meta.pool_filename: # no change return if filename: # update self.meta["pool_filename"] = self.AddExtension(filename) else: # reset filename self.meta["pool_filename"] = "" self._SetName() self.Signal("pathupdate", path=self.meta["pool_filename"]) def UniqueFilename(self, name): """ Converts name to valid path/url """ if name == "file": name = "file_" if self.containerNamespace: unitref = self.parent.id else: unitref = None cnt = 1 root = self.root while root.search.FilenameToID(self.AddExtension(name), unitref, parameter=dict(id=self.id), operators=dict(id="!=")) != 0: if cnt>1: name = name.rstrip("1234567890-") name = name+"-"+str(cnt) cnt += 1 return name def EscapeFilename(self, path): """ Converts name to valid path/url Path length between *self.maxlength-20* and *self.maxlength* chars. Tries to cut longer names at spaces. (based on django's slugify) """ path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore") path = path.decode("utf-8") path = re.sub('[^\w\s-]', '', path).strip().lower() path = re.sub('[-\s]+', '_', path) # avoid ids as filenames try: int(path) path += "_n" except: pass # cut long filenames cutlen = 20 if len(path) <= self.maxlength: return path # cut at '_' pos = path[self.maxlength-cutlen:].find("_") if pos > cutlen: # no '_' found. cut at maxlength. return path[:self.maxlength] return path[:self.maxlength-cutlen+pos] def AddExtension(self, filename): if not self.extension: return filename return "%s.%s" % (filename, self.extension) # system functions ----------------------------------------------------------------- def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored if self.extension is None. `file` is a reserved name and used in the current object to map file downloads. """ if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except ValueError: name = id id = 0 if name: id = self.root.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if obj is None: raise KeyError(id) return obj def _SetName(self): self.__name__ = self.meta["pool_filename"] if not self.__name__: self.__name__ = str(self.id) class RootPathExtension(object): """ Extension for nive root objects to handle alternative url names """ extension = None # system functions -----------------------------------------------------------
------ def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored. `file` is a reserved name and used in the current object to map file downloads.
""" if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except: name = id id = 0 if name: id = self.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if not obj: raise KeyError(id) return obj class PersistentRootPath(object): """ Extension for nive root objects to handle alternative url names """ def Init(self): self.ListenEvent("commit", "UpdateRouting") self.ListenEvent("dataloaded", "UpdateRouting") self.UpdateRouting() def UpdateRouting(self, **kw): # check url name of root if self.meta.get("pool_filename"): name = self.meta.get("pool_filename") if name != self.__name__: # close cached root self.app._CloseRootObj(name=self.__name__) # update __name__ and hash self.__name__ = str(name) self.path = name # unique root id generated from name . negative integer. self.idhash = abs(hash(self.__name__))*-1 from nive.tool import Tool, ToolView from nive.definitions import ToolConf, FieldConf, ViewConf, IApplication tool_configuration = ToolConf( id = "rewriteFilename", context = "nive.extensions.path.RewriteFilenamesTool", name = "Rewrite pool_filename based on title", description = "Rewrites all or empty filenames based on form selection.", apply = (IApplication,), mimetype = "text/html", data = [ FieldConf(id="types", datatype="checkbox", default="", settings=dict(codelist="types"), name="Object types", description=""), FieldConf(id="testrun", datatype="bool", default=1, name="Testrun, no commits", description=""), FieldConf(id="resetall", datatype="string", default="", size=15, name="Reset all filenames", description="<b>Urls will change! Enter 'reset all'</b>"), FieldConf(id="tag", datatype="string", default="rewriteFilename", hidden=1) ], views = [ ViewConf(name="", view=ToolView, attr="form", permission="admin", context="nive.extensions.path.RewriteFilenamesTool") ] ) class RewriteFilenamesTool(Tool): def _Run(self, **values): parameter = dict() if values.get("resetall")!="reset all": parameter["pool_filename"] = "" if values.get("types"): tt = values.get("types") if not isinstance(tt, list): tt = [tt] parameter["pool_type"] = tt operators = dict(pool_type="IN", pool_filename="=") fields = ("id", "title", "pool_type", "pool_filename") root = self.app.root recs = root.search.Search(parameter, fields, max=10000, operators=operators, sort="id", ascending=0) if len(recs["items"]) == 0: return "<h2>None found!</h2>", False user = values["original"]["user"] testrun = values["testrun"] result = []
nijinashok/sos
sos/plugins/dracut.py
Python
gpl-2.0
862
0
# Copyright (C) 2016 Red Hat, Inc., Bryn M. Reeves <[email protected]> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. from sos.plugins import Plugin, RedHatPlugin class Dracu
t(Plugin, RedHatPlugin): """ Dracut initramfs generator """ plugin_name = "dracut" packages = ("dracut",)
def setup(self): self.add_copy_spec([ "/etc/dracut.conf", "/etc/dracut.conf.d" ]) self.add_cmd_output([ "dracut --list-modules", "dracut --print-cmdline" ]) # vim: set et ts=4 sw=4 :
cloudify-cosmo/cloudify-gcp-plugin
cloudify_gcp/admin/__init__.py
Python
apache-2.0
1,945
0
# ####### # Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oauth2client import GOOGLE_TOKEN_URI from oauth2client.client import GoogleCredentials from .. import gcp from .. import constants class CloudResourcesBase(gcp.GoogleCloudApi): def __init__(self, config, logger, scope=constants.COMPUTE_SCOPE, discovery=constants.CLOUD
RESOURCES_DISCOVERY, api_version=constants.API_V1): super(CloudResourcesBase, self).__init__( config, logger, scope, discovery, api_version) def get_credentials(self, scope): # check # run: gcloud beta
auth application-default login # look to ~/.config/gcloud/application_default_credentials.json credentials = GoogleCredentials( access_token=None, client_id=self.auth['client_id'], client_secret=self.auth['client_secret'], refresh_token=self.auth['refresh_token'], token_expiry=None, token_uri=GOOGLE_TOKEN_URI, user_agent='Python client library' ) return credentials def get(self): raise NotImplementedError() def create(self): raise NotImplementedError() def delete(self): raise NotImplementedError()
anhstudios/swganh
data/scripts/templates/object/tangible/ship/components/armor/shared_arm_reward_alderaan_elite.py
Python
mit
494
0.044534
#### N
OTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/ship/components/armor/shared_arm_reward_alderaan_elite.iff" result.attribute_template_id = 8 result.stfName("space/space_item","armor_reward_alderaan
_elite") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
AmatanHead/ardrone-autopilot
nodes_opencv/frame.py
Python
mit
1,941
0.00103
#!/usr/bin/env python # # This code is a part of `ardrone_autopilot` project # which is distributed under the MIT license. # See `LICENSE` file for details. # """ This node is based on `base.py`. See there a documentation. Inputs ------ * in/image -- main picture stream. Outputs ------- * out/image -- result image. Parameters ---------- * ~show = False [bool] -- show the result instead of publishing it. * ~encoding = "bgr8" [str] -- video encoding used by bridge. """ import rospy import cv2 import tf from tf.transformations import quaternion_matrix import numpy as np import image_geometry import math from base import BaseStreamHandler class Show(BaseStreamHandler): def __init__(self, *args, **kwargs): self.tf = tf.TransformListener() self.camera_model = image_geometry.PinholeCameraModel() super(Show, self).__init__(*args, **
kwargs) def on_image(self, img): if self.info is None: return self.camera_model.fromCameraInfo(self.info) # self.camera_model.rectifyImage(img, img) self.tf.waitForTransform('ardrone/odom', 'ardrone/ardrone_base_frontcam', rospy.Time(0)
, rospy.Duration(3)) trans, rot = self.tf.lookupTransform('ardrone/odom', 'ardrone/ardrone_base_frontcam', rospy.Time(0)) rot_matrix = np.array(quaternion_matrix(rot)) for a in range(0, 360, 30): vector = np.array(np.array([0.1 * math.cos(a * math.pi / 180), 0.1 * math.sin(a * math.pi / 180), 0, 0])) point = vector.dot(rot_matrix) x, y = self.camera_model.project3dToPixel(point) cv2.circle(img, (int(x), int(y)), 5, (0, 0, 255), -1) return img if __name__ == "__main__": Show.launch_node()
xhochy/arrow
python/pyarrow/tests/test_compute.py
Python
apache-2.0
34,188
0
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from functools import lru_cache import inspect import pickle import pytest import random import textwrap import numpy as np import pyarrow as pa import pyarrow.compute as pc all_array_types = [ ('bool', [True, False, False, True, True]), ('uint8', np.arange(5)), ('int8', np.arange(5)), ('uint16', np.arange(5)), ('int16', np.arange(5)), ('uint32', np.arange(5)), ('int32', np.arange(5)), ('uint64', np.arange(5, 10)), ('int64', np.arange(5, 10)), ('float', np.arange(0, 0.5, 0.1)), ('double', np.arange(0, 0.5, 0.1)), ('string', ['a', 'b', None, 'ddd', 'ee']), ('binary', [b'a', b'b', b'c', b'ddd', b'ee']), (pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']), (pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]), (pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]), (pa.struct([('a', pa.int8()), ('b', pa.int8())]), [ {'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]), ] exported_functions = [ func for (name, func) in sorted(pc.__dict__.items()) if hasattr(func, '__arrow_compute_function__')] exported_option_classes = [ cls for (name, cls) in sorted(pc.__dict__.items()) if (isinstance(cls, type) and cls is not pc.FunctionOptions and issubclass(cls, pc.FunctionOptions))] numerical_arrow_types = [ pa.int8(), pa.int16(), pa.int64(), pa.uint8(), pa.uint16(), pa.uint64(), pa.float32(), pa.float64() ] def test_exported_functions(): # Check that all exported concrete functions can be called with # the right number of arguments. # Note that unregistered functions (e.g. with a mismatching name) # will raise KeyError. functions = exported_functions assert len(functions) >= 10 for func in functions: args = [object()] * func.__arrow_compute_function__['arity'] with pytest.raises(TypeError, match="Got unexpected argument type " "<class 'object'> for compute function"): func(*args) def test_exported_option_classes(): classes = exported_option_classes assert len(classes) >= 10 for cls in classes: # Option classes must have an introspectable constructor signature, # and that signature should not have any *args or **kwargs. sig = inspect.signature(cls) for param in sig.parameters.values(): assert param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD) def test_list_functions(): assert len(pc.list_functions()) > 10 assert "add" in pc.list_functions() def _check_get_function(name, expected_func_cls, expected_ker_cls, min_num_kernels=1): func = pc.get_function(name) assert isinstance(func, expected_func_cls) n = func.num_kernels assert n >= min_num_kernels assert n == len(func.kernels) assert all(isinstance(ker, expected_ker_cls) for ker in func.kernels) def test_get_function_scalar(): _check_get_function("add", pc.ScalarFunction, pc.ScalarKernel, 8) def test_get_function_vector(): _check_get_function("unique", pc.VectorFunction, pc.VectorKernel, 8) def test_get_function_aggregate(): _check_get_function("mean", pc.ScalarAggregateFunction, pc.ScalarAggregateKernel, 8) def test_call_function_with_memory_pool(): arr = pa.array(["foo", "bar", "baz"]) indices = np.array([2, 2, 1]) result1 = arr.take(indices) result2 = pc.call_function('take', [arr, indices], memory_pool=pa.default_memory_pool()) expected = pa.array(["baz", "baz", "bar"]) assert result1.equals(expected) assert result2.equals(expected) result3 = pc.take(arr, indices, memory_pool=pa.default_memory_pool()) assert result3.equals(expected) def test_pickle_functions(): # Pick
le registered functions for name in pc.list_functions(): func = pc.get_function(name) reconstructed = pickle.loads(pickle.dumps(func)) assert type(reconstructed) is type(func) assert reconstructed.name == func.name assert reconstructed.arity == func.arity assert reconstructed.num_kernels == func.num_kernels def test_pickle_global_functions(): # Pickle global wrappers (manual or
automatic) of registered functions for name in pc.list_functions(): func = getattr(pc, name) reconstructed = pickle.loads(pickle.dumps(func)) assert reconstructed is func def test_function_attributes(): # Sanity check attributes of registered functions for name in pc.list_functions(): func = pc.get_function(name) assert isinstance(func, pc.Function) assert func.name == name kernels = func.kernels assert func.num_kernels == len(kernels) assert all(isinstance(ker, pc.Kernel) for ker in kernels) assert func.arity >= 1 # no varargs functions for now repr(func) for ker in kernels: repr(ker) def test_input_type_conversion(): # Automatic array conversion from Python arr = pc.add([1, 2], [4, None]) assert arr.to_pylist() == [5, None] # Automatic scalar conversion from Python arr = pc.add([1, 2], 4) assert arr.to_pylist() == [5, 6] # Other scalar type assert pc.equal(["foo", "bar", None], "foo").to_pylist() == [True, False, None] @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_array(arrow_type): arr = pa.array([1, 2, 3, 4], type=arrow_type) assert arr.sum().as_py() == 10 assert pc.sum(arr).as_py() == 10 arr = pa.array([], type=arrow_type) assert arr.sum().as_py() is None # noqa: E711 @pytest.mark.parametrize('arrow_type', numerical_arrow_types) def test_sum_chunked_array(arrow_type): arr = pa.chunked_array([pa.array([1, 2, 3, 4], type=arrow_type)]) assert pc.sum(arr).as_py() == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pc.sum(arr).as_py() == 10 arr = pa.chunked_array([ pa.array([1, 2], type=arrow_type), pa.array([], type=arrow_type), pa.array([3, 4], type=arrow_type) ]) assert pc.sum(arr).as_py() == 10 arr = pa.chunked_array((), type=arrow_type) assert arr.num_chunks == 0 assert pc.sum(arr).as_py() is None # noqa: E711 def test_mode_array(): # ARROW-9917 arr = pa.array([1, 1, 3, 4, 3, 5], type='int64') mode = pc.mode(arr) assert len(mode) == 1 assert mode[0].as_py() == {"mode": 1, "count": 2} mode = pc.mode(arr, 2) assert len(mode) == 2 assert mode[0].as_py() == {"mode": 1, "count": 2} assert mode[1].as_py() == {"mode": 3, "count": 2} arr = pa.array([], type='int64') assert len(pc.mode(arr)) == 0 def test_mode_chunked_array(): # ARROW-9917 arr = pa.chunked_array([pa.array([1, 1, 3, 4, 3, 5], type='int64')]) mode = pc.mode(arr) assert len(mode) == 1 assert mode[0].as_py() == {"mode": 1, "count": 2} mode = pc.mode(arr, 2) assert len(mode) == 2 assert mode[0].as_py() == {"mode": 1, "count": 2} assert mode[1].as_py() == {"mode": 3, "count": 2} arr = pa.chunked_array((), ty
mapbox/mapbox-sdk-py
tests/test_datasets.py
Python
mit
8,145
0.000859
import base64 import json import responses from mapbox.services.datasets import Datasets username = 'testuser' access_token = 'pk.{0}.test'.format( base64.b64encode(b'{"u":"testuser"}').decode('utf-8')) def test_class_attrs(): """Get expected class attr values""" serv = Datasets() assert serv.api_name == 'datasets' assert serv.api_version == 'v1' def test_datasets_service_properties(): """Get expected username and baseuri.""" datasets = Datasets(access_token=access_token) assert datasets.username == username assert datasets.baseuri == 'https://api.mapbox.com/datasets/v1' @responses.activate def test_datasets_list(): """Listing datasets works""" body = ''' [ { "owner": "testuser", "id": "ds1", "created": "2015-09-19", "modified": "2015-09-19" }, { "owner": "testuser", "id": "ds2", "created": "2015-09-19", "modified": "2015-09-19" } ] ''' responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format( username, access_token), match_querystring=True, body=body, status=200, content_type='application/json') response = Datasets(access_token=access_token).list() assert response.status_code == 200 assert [item['id'] for item in response.json()] == ['ds1', 'ds2'] @responses.activate def test_datasets_create(): """Creating a named and described dataset works.""" def request_callback(request): payload = json.loads(request.body.decode()) resp_body = { 'owner': username, 'id': 'new', 'name': payload['name'], 'description': payload['description'], 'created': '2015-09-19', 'modified': '2015-09-19'} headers = {} return (200, headers, json.dumps(resp_body)) responses.add_callback( responses.POST, 'https://api.mapbox.com/datasets/v1/{0}?access_token={1}'.format( username, access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).create( name='things', description='a collection of things') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_dataset_read(): """Dataset name and description reading works.""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'test', access_token), match_querystring=True, body=json.dumps( {'name': 'things', 'description': 'a collection of things'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).read_dataset('test') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_dataset_update(): """Updating dataset name and description works.""" def request_callback(request): payload = json.loads(request.body.decode()) resp_body = { 'owner': username, 'id': 'foo', 'name': payload['name'], 'description': payload['description'], 'created': '2015-09-19', 'modified': '2015-09-19'} headers = {} return (200, headers, json.dumps(resp_body)) responses.add_callback( responses.PATCH, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'foo', access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).update_dataset( 'foo', name='things', description='a collection of things') assert response.status_code == 200 assert response.json()['name'] == 'things' assert response.json()['description'] == 'a collection of things' @responses.activate def test_delete_dataset(): """Delete a dataset""" responses.add( responses.DELETE, 'https://api.mapbox.com/datasets/v1/{0}/{1}?access_token={2}'.format( username, 'test', access_token), match_querystring=True, status=204) response = Datasets(access_token=access_token).delete_dataset('test') assert response.status_code == 204 @responses.activate def test_da
taset_list_features(): """Features retrieval work""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features('test') assert response.s
tatus_code == 200 assert response.json()['type'] == 'FeatureCollection' @responses.activate def test_dataset_list_features_reverse(): """Features retrieval in reverse works""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&reverse=true'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features( 'test', reverse=True) assert response.status_code == 200 assert response.json()['type'] == 'FeatureCollection' @responses.activate def test_dataset_list_features_pagination(): """Features retrieval pagination works""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features?access_token={2}&start=1&limit=1'.format( username, 'test', access_token), match_querystring=True, body=json.dumps({'type': 'FeatureCollection'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).list_features( 'test', start=1, limit=1) assert response.status_code == 200 assert response.json()['type'] == 'FeatureCollection' # Tests of feature-scoped methods. @responses.activate def test_read_feature(): """Feature read works.""" responses.add( responses.GET, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, body=json.dumps({'type': 'Feature', 'id': '1'}), status=200, content_type='application/json') response = Datasets(access_token=access_token).read_feature('test', '1') assert response.status_code == 200 assert response.json()['type'] == 'Feature' assert response.json()['id'] == '1' @responses.activate def test_update_feature(): """Feature update works.""" def request_callback(request): payload = json.loads(request.body.decode()) assert payload == {'type': 'Feature'} return (200, {}, "") responses.add_callback( responses.PUT, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, callback=request_callback) response = Datasets(access_token=access_token).update_feature( 'test', '1', {'type': 'Feature'}) assert response.status_code == 200 @responses.activate def test_delete_feature(): """Deletes a feature.""" responses.add( responses.DELETE, 'https://api.mapbox.com/datasets/v1/{0}/{1}/features/{2}?access_token={3}'.format( username, 'test', '1', access_token), match_querystring=True, status=204) response = Datasets(access_token=access_token).delete_feature('test', '1') assert response.status_code == 204
neynt/tsundiary
tsundiary/jinja_env.py
Python
mit
1,603
0.00815
# encoding=utf-8 from tsundiary import app app.jinja_env.globals.update(theme_nicename = { 'classic': 'Classic Orange', 'tsun-chan': 'Classic Orange w/ Tsundiary-chan', 'minimal': 'Minimal Black/Grey', 'misato-tachibana': 'Misato Tachibana', 'rei-ayanami': 'Rei Ayanami', 'rei-ayanami-2': 'Rei Ayanami 2', 'saya': 'Saya', 'yuno': 'Yuno Gasai', 'hitagi': 'Hitagi Senjougahara', 'kyoko-sakura': 'Kyok
o Sakura', 'colorful': 'Based on favorite color' }) app.jinja_env.global
s.update(themes = ['classic', 'tsun-chan', 'minimal', 'misato-tachibana', 'rei-ayanami', 'rei-ayanami-2', 'saya', 'yuno', 'colorful']) app.jinja_env.globals.update(theme_creds = { 'tsun-chan': 'Artist: <span title="<3">bdgtard</span>', 'misato-tachibana': 'Misato Tachibana source: Nichijou OP1.', 'rei-ayanami': '<a href="http://megadud20.deviantart.com/art/Rei-Ayanami-Vector-214547575">Rei source</a>.', 'saya': u'<a href="http://e-shuushuu.net/image/34277/">Saya source</a>. Artist: 中央東口 (Chuuou Higashiguchi).', 'yuno': '<a href="http://xyanderegirl.deviantart.com/art/Yuno-Gasai-Render-293856645">Yuno source</a>.', 'kyoko-sakura': '<a href="http://3071527.deviantart.com/art/kyoko-sakura-376238110">Kyoko source</a>.' }) app.jinja_env.globals.update(theme_colors = [ ('Red', '0,100,100'), ('Orange', '35,100,100'), ('Yellow', '50,100,100'), ('Green', '120,100,80'), ('Cyan', '180,100,80'), ('Blue', '215,100,100'), ('Purple', '270,100,100'), ('Black', '0,0,0'), ('Grey', '0,0,70'), ('White', '0,0,100'), ])
vasily-v-ryabov/pywinauto
pywinauto/unittests/test_win32functions.py
Python
bsd-3-clause
5,560
0.00054
# GUI Application automation and testing library # Copyright (C) 2006-2018 Mark Mc Mahon and Contributors # https://github.com/pywinauto/pywinauto/graphs/contributors # http://pywinauto.readthedocs.io/en/latest/credits.html # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of pywinauto nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests for win32functions.py""" import unittest import sys sys.path.append(".") from pywinauto.win32structures import POINT # noqa: E402 from pywinauto.win32structures import RECT # noqa: E402 from pywinauto.win32functions import MakeLong, HiWord, LoWord # noqa: E402 class Win32FunctionsTestCases(unittest.TestCase): "Unit tests for the win32function methods" def testMakeLong(self): data = ( (0, (0, 0)), (1, (0, 1)), (0x10000, (1, 0)), (0xffff, (0, 0xffff)), (0xffff0000, (0xffff, 0)), (0xffffffff, (0xffff, 0xffff)), (0, (0x10000, 0x10000)), ) for result, (hi, lo) in data: self.assertEqual(result, MakeLong(hi, lo)) def testMakeLong_zero(self): "test that makelong(0,0)" self.assertEqual(0, MakeLong(0, 0)) def testMakeLong_lowone(self): "Make sure MakeLong() function works with low word == 1" self.
assertEqual(1, MakeLong(0, 1)) def testMakeLong_highone(self): "Make sure MakeLong() function works with high word == 1" self.assertEqual(0x10000, MakeLong(1, 0)) def testM
akeLong_highbig(self): "Make sure MakeLong() function works with big numder in high word" self.assertEqual(0xffff0000, MakeLong(0xffff, 0)) def testMakeLong_lowbig(self): "Make sure MakeLong() function works with big numder in low word" self.assertEqual(0xffff, MakeLong(0, 0xffff)) def testMakeLong_big(self): "Make sure MakeLong() function works with big numders in 2 words" self.assertEqual(0xffffffff, MakeLong(0xffff, 0xffff)) def testLowWord_zero(self): self.assertEqual(0, LoWord(0)) def testLowWord_one(self): self.assertEqual(1, LoWord(1)) def testLowWord_big(self): self.assertEqual(1, LoWord(MakeLong(0xffff, 1))) def testLowWord_vbig(self): self.assertEqual(0xffff, LoWord(MakeLong(0xffff, 0xffff))) def testHiWord_zero(self): self.assertEqual(0, HiWord(0)) def testHiWord_one(self): self.assertEqual(0, HiWord(1)) def testHiWord_bigone(self): self.assertEqual(1, HiWord(0x10000)) def testHiWord_big(self): self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 1))) def testHiWord_vbig(self): self.assertEqual(0xffff, HiWord(MakeLong(0xffff, 0xffff))) def testPOINTindexation(self): p = POINT(1, 2) self.assertEqual(p[0], p.x) self.assertEqual(p[1], p.y) self.assertEqual(p[-2], p.x) self.assertEqual(p[-1], p.y) self.assertRaises(IndexError, lambda: p[2]) self.assertRaises(IndexError, lambda: p[-3]) def testPOINTiteration(self): p = POINT(1, 2) self.assertEqual([1, 2], [i for i in p]) def testPOINTcomparision(self): """Test POINT comparision operations""" p0 = POINT(1, 2) p1 = POINT(0, 2) self.assertNotEqual(p0, p1) p1.x = p0.x self.assertEqual(p0, p1) # tuple comparision self.assertEqual(p0, (1, 2)) self.assertNotEqual(p0, (0, 2)) # wrong type comparision self.assertNotEqual(p0, 1) def test_RECT_hash(self): """Test RECT is hashable""" r0 = RECT(0) r1 = RECT(1) d = { "r0": r0, "r1": r1 } self.assertEqual(r0, d["r0"]) self.assertEqual(r1, d["r1"]) self.assertNotEqual(r0, r1) def test_RECT_repr(self): """Test RECT repr""" r0 = RECT(0) self.assertEqual(r0.__repr__(), "<RECT L0, T0, R0, B0>") if __name__ == "__main__": unittest.main()
abbgrade/wok_hooks
wok_hooks/hook_distribute.py
Python
mit
9,715
0.001029
import logging import os from ftplib import FTP as FTPClient from paramiko import SFTPClient, Transport as SFTPTransport ALLOWED_BACKEND_TYPES = ['ftp', 'sftp'] DEFAULT_BACKEND_TYPE = 'ftp' from wok_hooks.misc import Configuration as _Configuration class Configuration(_Configuration): def __init__(self, path, **kwargs): _Configuration.__init__(self, path, **kwargs) if not 'type' in self or not self['type'] in ALLOWED_BACKEND_TYPES: self['type'] = DEFAULT_BACKEND_TYPE self.save() class Observable: def __init__(self, observer=None): self._observer = [] if observer: for item in observer: self.register_observer(item) def register_observer(self, observer): self._observer.append(observer) class Stateful(Observable): def __init__(self, observer=None): if not hasattr(self, '_state'): self._state = None Observable.__init__(self, observer) if self._state is None: raise NotImplementedError() @property def state(self): return self._state @state.setter def state(self, value): if value != self._state: self._state = value logging.info('%s is now %s' % (self, value)) self._raise_state_update() def _raise_state_update(self): for observer in self._observer: observer.on_state_update(self) class FileBackend(Stateful): STATE_DISCONNECTED = 'disconnected' STATE_CONNECTED = 'connected' class ConnectionException(Exception): pass def __init__(self, config, observer=None): self.config = config self._state = self.STATE_DISCONNECTED Stateful.__init__(self, observer) def file_create_folder(self, path): raise NotImplementedError() def put_file(self, path, file_handle): raise NotImplementedError() def get_metadata(self, path): raise NotImplementedError() def get_file_and_metadata(self, path): raise NotImplementedError() def get_root_path(self): raise NotImplementedError() def connect(self): raise NotImplementedError() def disconnect(self): raise NotImplementedError() class FTP(FileBackend): def __init__(self, config): FileBackend.__init__(self, config) self._init_config() self.session = None self._init_session() DEFAULT_CONFIG = { 'ftp_host': 'localhost', 'ftp_user': 'anonymous', 'ftp_password': '', 'ftp_output_path': ''} def _init_config(self): some_changes = False if 'type' in self.config: for option, value in FTP.DEFAULT_CONFIG.items(): if not option in self.config: self.config[option] = value some_changes = True logging.info('set default ftp config.') else: self.config['type'] = 'ftp' self.config.update(FTP.DEFAULT_CONFIG) some_changes = True logging.info('set default ftp config.') if some_changes: self.config.save() def _init_session(self): self.connect() def connect(self): self._authenticate() self.state = self.STATE_CONNECTED def _authenticate(self): self.session = FTPClient(self.config['ftp_host'], self.config['ftp_user'], self.config['ftp_password']) logging.info('FTP Authorization succeed') def disconnect(self): if self.session: self.session.quit() def file_create_folder(self, path): if self.state == self.STATE_CONNECTED: self.session.cwd('/') dirlist = path.split('/') while '' in dirlist: dirlist.remove('') previous = self.session.pwd() for dirname in dirlist: dir_contents = self.session.nlst(previous) if not dirname in dir_contents: self.session.mkd(dirname) self.session.cwd(dirname) previous += dirname + '/' elif self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('FTP is %s' % self.state) else: raise NotImplementedError() def put_file(self, path, file_handle): if self.state == self.STATE_CONNECTED: dirpath = '/'.join(path.split('/')[:-1]) self.file_create_folder(dirpath) self.session.storbinary('STOR ' + path.split('/')[-1], file_handle) elif self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('FTP is %s' % self.state) else: raise NotImplementedError() def get_root_path(self): raise NotImplementedError() class SFTP(FileBackend): def __init__(self, config): FileBackend.__init__(self, config) self._init_config() self.session = None self._init_session() DEFAULT_CONFIG = { 'sftp_host': 'localhost', 'sftp_port': 22, 'sftp_user': 'anonymous', 'sftp_password': '', 'output_path': ''} def _init_config(self): some_changes = False if 'type' in self.config: for option, value in SFTP.DEFAULT_CONFIG.items(): if not option in self.config: self.config[option] = value some_changes = True logging.info('set default sftp config.') else: self.config['type'] = 'sftp' self.config.update(SFTP.DEFAULT_CONFIG) some_changes = True logging.info('set default
sftp config.') if some_changes: self.config.save() # cast config types self.config['sftp_port'] = int(self.config['sftp_port']) def _init_session(self): self.connect() def connect(self): self._authenticate() self.state = sel
f.STATE_CONNECTED def _authenticate(self): self._transport = SFTPTransport((self.config['sftp_host'], self.config['sftp_port'])) self._transport.connect(username=self.config['sftp_user'], password=self.config['sftp_password']) self.session = SFTPClient.from_transport(self._transport) logging.info('SFTP Authorization succeed') def disconnect(self): self.session.close() self._transport.close() def file_create_folder(self, path): if self.state == self.STATE_CONNECTED: dirlist = path.split('/') current_dirlist = [''] missing_dirlist = [] current_dirlist.extend(dirlist[:]) while len(current_dirlist) > 0: current_path = '/'.join(current_dirlist) try: self.session.chdir(current_path) break except: missing_dirlist.append(current_dirlist.pop()) missing_dirlist.reverse() for dirname in missing_dirlist: dir_contents = self.session.listdir() if not dirname in dir_contents: self.session.mkdir(dirname) logging.info('Create remote directory %s' % self.session.getcwd() + '/' + dirname) self.session.chdir(dirname) elif self.state == self.STATE_DISCONNECTED: raise self.ConnectionException('SFTP is %s' % self.state) else: raise NotImplementedError() def put_file(self, path, file_handle): if self.state == self.STATE_CONNECTED: dirpath = '/'.join(path.split('/')[:-1]) self.file_create_folder(dirpath) try: self.session.putfo(fl=file_handle, remotepath='/' + path) logging.info('Create remote file %s' % '/' + path) except Exception as ex: logging.error(ex) elif self.state == self.STATE_DISCONNECT
sernst/cauldron
cauldron/test/session/test_session_reloading.py
Python
mit
4,115
0
from datetime import datetime from email.mime import text as mime_text from unittest.mock import MagicMock from unittest.mock import Mock from unittest.mock import patch import cauldron as cd from cauldron.session import reloading from cauldron.test import support from cauldron.test.support import scaffolds from cauldron.test.support.messages import Message class TestSessionReloading(scaffolds.ResultsTest): """Test suite for the reloading module""" def test_watch_bad_argument(self): """Should not reload a module""" self.assertFalse( reloading.refresh(datetime, force=True), Message('Should not reload not a module') ) def test_watch_good_argument(self): """Should reload the specified package/subpackage""" self.assertTrue( reloading.refresh('datetime', force=True), Message('Should reload the datetime module') ) def test_watch_not_needed(self): """Don't reload modules that haven't changed.""" support.create_project(self, 'betty') project = cd.project.get_internal_project() project.current_step = project.steps[0] self.assertFalse( reloading.refresh(mime_text), Message('Expect no reload if the step has not been run before.') ) support.run_command('run') project.current_step = project.steps[0] self.assertFalse( reloading.refresh(mime_text), Message('Expect no reload if module has not changed recently.') ) def test_watch_recursive(self): """Should reload the email module.""" self.assertTrue( reloading.refresh('email', recursive=True, force=True), Message('Expected email module to be reloaded.') ) def test_get_module_name(self): """Should get the module name from the name of its spec.""" target = MagicMock() target.__spec__ = MagicMock() target.__spec__.name = 'hello' self.assertEqual('hello', reloading.get_module_name(target)) def test_get_module_name_alternate(self): """ Should get the module name from its dunder name if the spec name does not exist. """ target = Mock(['__name__']) target.__name__ = 'hello' self.assertEqual('hello', reloading.get_module_name(target)) @patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reloading.importlib.reload') def test_do_reload_error(self, reload: MagicMock, os_path: MagicMock): """Should fail to import the specified module and so return False.""" target = MagicMock() target.__file__ = None target.__path__ = ['fake'] os_path.getmtime.return_value = 10 reload.side_effect = ImportError('FAKE') self.assertFalse(reloading.do_reload(target, 0)) self.assertEqual(1, reload.call_count) @patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reloading.importlib.reload') def test_do_reload(self, reload: MagicMock, os_path: MagicMock): """Should import the specified module and return True.""" target = MagicMock() target.__file__ = 'fake' os_path.getmtime.return_value = 10 self.assertTrue(reloading.do_reload(target
, 0)) self.assertEqual(1, reload.call_count) @patch('cauldron.session.reloading.os.path') @patch('cauldron.session.reloading.importlib.reload') def test_do_reload_skip(self, reload: MagicMock, os_path: MagicMock): """ Should skip reloading the specified module because it hasn't been modified and return False. """ target = MagicMock() target.__file__ = 'fake' os_path.getmtime.return_value
= 0 self.assertFalse(reloading.do_reload(target, 10)) self.assertEqual(0, reload.call_count) def test_reload_children_module(self): """Should abort as False for a module that has no children.""" target = Mock() reloading.reload_children(target, 10)
Xycl/plugin.image.mypicsdb
resources/lib/googlemaps.py
Python
gpl-2.0
6,409
0.021688
#!/usr/bin/python # -*- coding: utf8 -*- """ Copyright (C) 2012 Xycl This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import xbmc, xbmcgui import common from urllib2 import Request, urlopen from urllib import urlencode from os.path import join,isfile,basename import os from traceback import print_exc LABEL_TEXT = 100 BUTTON_CLOSE = 101 BUTTON_ZOOM_IN = 102 BUTTON_ZOOM_OUT = 103 GOOGLE_MAP = 200 CANCEL_DIALOG = ( 9, 10, 92, 216, 247, 257, 275, 61467, 61448, ) ACTION_SELECT_ITEM = 7 ACTION_MOUSE_START = 100 ACTION_TAB = 18 SELECT_ITEM = (ACTION_SELECT_ITEM, ACTION_MOUSE_START) ACTION_DOWN = [4] ACTION_UP = [3] class GoogleMap( xbmcgui.WindowXMLDialog ): def __init__( self, xml, cwd, default): xbmcgui.WindowXMLDialog.__init__(self) def onInit( self ): self.setup_all('') def onAction( self, action ): # Close if ( action.getId() in CANCEL_DIALOG or self.getFocusId() == BUTTON_CLOSE and action.getId() in SELECT_ITEM ): self.close() # Zoom in elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_IN or action in ACTION_UP): self.zoom('+') # Zoom out elif ( action.getId() in SELECT_ITEM and self.getFocusId() == BUTTON_ZOOM_OUT or action in ACTION_DOWN): self.zoom('-') def set_file(self, filename): self.filename = filename def set_place(self, place): self.place = place def set_datapath(self, datapath): self.datapath = datapath def set_pic(self, pic): pass def set_map(self, mapfile): self.getControl( GOOGLE_MAP ).setImage(mapfile) def setup_all( self, filtersettings = ""): self.getControl( LABEL_TEXT ).setLabel( common.getstring(30220) ) self.getControl( BUTTON_CLOSE ).setLabel( common.getstring(30224) ) self.getControl( BUTTON_ZOOM_IN ).setLabel( common.getstring(30225) ) self.getControl( BUTTON_ZOOM_OUT ).setLabel( common.getstring(30226) ) self.zoomlevel = 15 self.zoom_max = 21 self.zoom_min = 0 self.load_map() def zoom(self,way,step=1): if way=="+": self.zoomlevel = self.zoomlevel + step elif way=="-": self.zoomlevel = self.zoomlevel - step else: self.zoomlevel = step if self.zoomlevel > self.zoom_max: self.zoomlevel = self.zoom_max elif self.zoomlevel < self.zoom_min: self.zoomlevel = self.zoom_min self.load_map() def load_map(self): #google geolocalisation static_url = "http://maps.google.com/maps/api/staticmap?" param_dic = {#location parameters (http://gmaps-samples.googlecode.com/svn/trunk/geocoder/singlegeocode.html) "center":"", #(required if markers not present) "zoom":self.zoomlevel, # 0 to 21+ (req if no markers #map parameters "size":"640x640", #widthxheight (required) "format":"jpg", #"png8","png","png32","gif","jpg","jpg-baseline" (opt) "maptype":"hybrid", #"roadmap","satellite","hybrid","terrain" (opt) "language":"", #Feature Parameters: "markers" :"color:red|label:P|%s",#(opt) #markers=color:red|label:P|lyon|12%20rue%20madiraa|marseille|Lille #&markers=color:blue|label:P|Australie "path" : "", #(opt) "visible" : "", #(opt) #Reporting Parameters: "sensor" : "false" #is there a gps on system ? (req) } param_dic["markers"]=param_dic["markers"]%self.place request_headers = { 'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; fr; rv:1.9.2.10) Gecko/20100914 Firefox/3.6.10' } request = Request(static_url+urlencode(param_dic), None, request_headers) try: urlfile = urlopen(request) except: dialog = xbmcgui.Dialog() dialog.ok('XBMC Network Error', 'Google maps is not reachable') self.close() return extension = urlfile.info().getheader("Content-Type","").split("/")[1] filesize = int(urlfile.info().getheader("Content-Length","")) mappath = xbmc.translatePath(self.datapath) mapfile = join(self.datapath,basename(self.filename).split(".")[0]+"_maps%s."%self.zoomlevel+extension) mapfile = xbmc.translatePath(mapfile) # test existence of path if not os.path.exists(mappath): os.makedirs(mappath) label = self.getControl( LABEL_TEXT ) if not isfile(mapfile): #mapfile is not downloaded yet
, download it now... try: #f=open(unicode(mapfile, 'utf-8'),"wb") f=open(common.smart_unicode(mapfile), "wb") exc
ept: try: f=open(common.smart_utf8(mapfile), "wb") except: print_exc() #print "GEO Exception: "+mapfile for i in range(1+(filesize/10)): f.write(urlfile.read(10)) label.setLabel(common.getstring(30221)%(100*(float(i*10)/filesize)))#getting map... (%0.2f%%) urlfile.close() #pDialog.close() try: f.close() except: print_exc() self.set_pic(self.filename) self.set_map(mapfile) label.setLabel(common.getstring(30222)%int(100*(float(self.zoomlevel)/self.zoom_max)))#Zoom level %s
buztard/mxpy
examples/test-item-view.py
Python
lgpl-2.1
1,682
0.002973
from random import randint import gobject import clutter import mxpy as mx sort_set = False filter_set = False def sort_func(model, a, b, data): return int(a.to_hls()[0] - b.to_hls()[0]) def filter_func(model, iter, data): color = iter.get(0)[0] h = color.to_hls()[0] return (h > 90 and h < 180) def key_release_cb(stage, event, model): from clutter import keysyms global sort_set, filter_set if event.keyval == keysyms.s: if not sort_set: model.set_sort(0, sort_func, None) else: model.set_sort(-1, None, None) sort_set = not sort_set elif event.keyval == keysyms.f: if not filter_set: model.set_filter(filter_func) else: model.set_filter(None, None) filter_set = not filter_set if __name__ == '__main__': stage = clutter.Stage() stage.connect('destroy', clutter.main_quit) stage.set_color((255, 255, 255, 255)) stage.set_size(320, 240) color = clutter.Color(0x0, 0xf, 0xf, 0xf) scroll = mx.ScrollView() scroll.set_size(*stage.get_size()) stage.add(scroll) view = mx.ItemView() scroll.add(v
iew) model = clutter.ListModel(clutter.Col
or, "color", float, "size") for i in range(360): color = clutter.color_from_hls(randint(0, 255), 0.6, 0.6) color.alpha = 0xff model.append(0, color, 1, 32.0) view.set_model(model) view.set_item_type(clutter.Rectangle) view.add_attribute("color", 0) view.add_attribute("width", 1) view.add_attribute("height", 1) stage.connect('key-release-event', key_release_cb, model) stage.show() clutter.main()
ck1125/sikuli
sikuli-script/src/main/python/sikuli/VDict.py
Python
mit
3,120
0.030128
# Copyright 2010-2011, Sikuli.org # Released under the MIT License. from org.sikuli.script import VDictProxy import java.io.File ## # VDict implements a visual dictionary that has Python's conventional dict # interfaces. # # A visual dictionary is a data type for storing key-value pairs using # images as keys. Using a visual dictionary, a user can easily automate # the tasks of saving and retrieving arbitrary data objects by images. # The syntax of the visual dictionary data type is modeled after that of # the built-in Python dictionary data type. class VDict(VDictProxy): ## # the default similarity for fuzzy matching. The range of this is from # 0 to 1.0, where 0 matches everything and 1.0 does exactly matching. # <br/> # The default similarity is 0.7. _DEFAULT_SIMILARITY = 0.7 _DEFAULT_GET_ITEM_N = 0 ## # Constructs a new visual dictionary with the same mapping as the given dict. # def __init__(self, dict=None): self._keys = {} if dict: for k in dict.keys(): self[k] = dict[k] ## # Returns the number of keys in this visual dictionary. # def __len__(self): return self.size() ## # Maps the specified key to the specified item in this visual dictionary. # def __setitem__(self, key, item): self.insert(key, item) self._keys[key] = item ## # Tests if the specified object looks like a key in this visual dictionary # with the default similarity. # def __contains__(self, key): return len(self.get(key)) > 0 ## # Returns all values to which the specified key is fuzzily matched in # this visual dictionary with the default similarity. # <br/> # This is a wrapper for the {@link #VDict.get get} method. def __getitem__(self, key): return self.get(key) ## # Deletes the key and its corresponding value from this visual dictionary. # def __del
item__(self, key): self.erase(key) del self._keys[key] ## # Returns a list of the keys in this visual dictionary. # def keys(self): return self._keys.keys() ## # Returns the value to which the specified key is exactly matched in # this visual dictionary. # def get_exact(self, key): if key == None: return None return self.lookup(key) ## # Returns the values to which the specified key is fuzzily matched in # this visual di
ctionary with the given similarity and the given maximum # number of return items. # @param similarity the similarity for matching. # @param n maximum number of return items. # def get(self, key, similarity=_DEFAULT_SIMILARITY, n=_DEFAULT_GET_ITEM_N): if key == None: return None return self.lookup_similar_n(key, similarity, n) ## # Returns the value to which the specified key is best matched in # this visual dictionary with the given similarity. # @param similarity the similarity for matching. # def get1(self, key, similarity=_DEFAULT_SIMILARITY): if key == None: return None return self.lookup_similar(key, similarity)
bd-j/hmc
hmc.py
Python
gpl-2.0
15,392
0.001559
import numpy as np import matplotlib.pyplot as pl class BasicHMC(object): def __init__(self, model=None, verbose=True): """A basic HMC sampling object. :params model: An object with the following methods: * lnprob(theta) * lnprob_grad(theta) * (optional) check_constrained :params verbose: bool, print lots of junk? """ self.verbose = verbose self.model = model self.has_bounds = hasattr(self.model, 'check_constrained') self.set_mass_matrix() def lnprob(self, theta): return self.model.lnprob(theta) def lnprob_grad(self, theta): return self.model.lnprob_grad(theta) def sample(self, initial, iterations=1, epsilon=None, mass_matrix=None, length=10, sigma_length=0.0, store_trajectories=False): """Sample for `iterations` trajectories (i.e., compute that many trajectories, resampling the momenta at the end of each trajectory). :params initial: The initial position from which to start the sampling. ndarray of shape (`ndim`,) :param iterations: The number of trajectories to compute. Integer. :param epsilon: (optional, default: N
one) The stepsize for the leapfrog integrator. Scalar float or ndarray of shape (ndim,). If `None`, a scalar value will be crudely estimated. :param mass_matrix: (optional, default: None) "Masses" in each dimension used to rescale the momentum vectors in the HMC trajectories. Ideally this would be the inverse of the covariance matrix of the posteri
or PDF. If `None` all masses will be assumed 1. Otherwise can be ndarray of shape (ndim,) for a diagonal covariance matrix or (ndim, ndim), in which case it must be positive semi-definite. :param length: Number of leapfrog steps to take in each trajectory. Integer. :param sigma_length: (optional, default: 0.0) The dispersion in the length of each trajectory. If greater than zero, the length of each trajectory will be drawn from a gaussian with mean `length` and dispersion `sigma_length` :param store_trajectories: If `True`, store not just the endpoints of each trajectory but the steps along each trajectory in a `trajectories` attribute. """ self.ndim = len(initial) self.store_trajectories = store_trajectories # set some initial values self.set_mass_matrix(mass_matrix) if epsilon is None: epsilon = self.find_reasonable_stepsize(initial.copy()) print('using epsilon = {0}'.format(epsilon)) self.mu = np.log(10 * epsilon) # set up the output self.reset() self.chain = np.zeros([iterations, self.ndim]) self.lnp = np.zeros([iterations]) self.accepted = np.zeros([iterations]) if self.store_trajectories: self.trajectories = [] theta = initial.copy() self.traj_num = 0 # loop over trajectories lnp, grad = None, None # initial P and lnP are unknown for i in xrange(int(iterations)): ll = int(np.clip(np.round(np.random.normal(length, sigma_length)), 2, np.inf)) if self.verbose: print('eps={:3.8f}, L={:5.0f}'.format(epsilon, ll)) info = self.trajectory(theta, epsilon, ll, lnP0=lnp, grad0=grad) theta, lnp, grad, accepted = info self.lnp[i] = lnp self.chain[i, :] = theta self.accepted[i] = accepted self.traj_num += 1 return theta, lnp, grad def trajectory(self, theta0, epsilon, length, lnP0=None, grad0=None): """Compute one trajectory for a given starting location, epsilon, and length. The momenta in each direction are drawn from a gaussian before performing 'length' leapfrog steps. If the trajectories attribute exists, store the path of the trajectory. :param theta0: Starting position, ndarray of shape (ndim,) :param epsilon: Stepsize(s) to use for this trajectory. scalar float or ndarray of shape (ndim,) :param length: The length of this trajectory, integer. :param lnP0: optional The lnprob value of the initial position (can be used to save a call to lnprob) :param grad0: optional The gradients of the lnprob function at `theta0`, ndarray of shape (ndim,) :returns theta: The final position vector, which if the trajectory was not accepted will be equal to the initial position. ndarray of shape (ndim,) :returns lnP: The ln-probability at the final position, float. :returns grad: The gradient of the ln-probability at the final position, ndarray of shape (ndim,) :returns accepted: Whether the trajectory was accepted (1.0) or not (0.0) """ if self.store_trajectories: self.trajectories.append(np.zeros([length, self.ndim])) # --- Set up for the run ---- # save initial position theta = theta0.copy() # random initial momenta p0 = self.draw_momentum() # gradient in U at initial position, negative of gradient lnP if grad0 is None: grad0 = -self.lnprob_grad(theta0) if lnP0 is None: lnP0 = self.lnprob(theta0) # use copies of initial momenta and gradient p, grad = p0.copy(), grad0.copy() # --- Compute Trajectory --- # do 'length' leapfrog steps along the trajectory (and store?) for step in xrange(int(length)): theta, p, grad = self.leapfrog(theta, p, epsilon, grad, check_oob=self.has_bounds) if self.store_trajectories: self.trajectories[-1][step, :] = theta # ---- Accept/Reject --- # Odds ratio of the proposed move lnP = self.lnprob(theta) # change in potential = negative change in lnP dU = lnP0 - lnP # change in kinetic dK = self.kinetic_energy(p) - self.kinetic_energy(p0) # acceptance criterion alpha = np.exp(-dU - dK) if self.verbose: print('H={0}, dU={1}, dK={2}'.format(alpha, dU, dK)) # Accept or reject if np.random.uniform(0, 1) < alpha: accepted = 1.0 return theta, lnP, grad, accepted else: accepted = 0.0 return theta0, lnP0, grad0, accepted def leapfrog(self, q, p, epsilon, grad, check_oob=False): """Perfrom one leapfrog step, updating the momentum and position vectors. This uses one call to the model.lnprob_grad() function, which must be defined. It also performs an optional check on the value of the new position to make sure it satistfies any parameter constraints, for which the check_constrained method of model is called. """ # half step in p p -= 0.5 * epsilon * grad # full step in theta q += epsilon * self.velocity(p) # check for constraints on theta while check_oob: q, sign, check_oob = self.model.check_constrained(q) p *= sign # flip the momentum if necessary # compute new gradient in U, which is negative of gradient in lnP grad = -self.lnprob_grad(q) # another half step in p p -= 0.5 * epsilon * grad return q, p, grad def draw_momentum(self): if self.ndim_mass == 0: p = np.random.normal(0, 1, self.ndim) elif self.ndim_mass == 1: p = np.random.normal(0, np.sqrt(self.mass_matrix)) else: p = np.random.multivariate_normal(np.zeros(self.ndim), self.mass_matrix) return p def velocity(self, p): """Get the velocities given
fcecin/infinitum
contrib/devtools/symbol-check.py
Python
mit
6,197
0.011457
#!/usr/bin/python2 # Copyright (c) 2014 Wladimir J. van der Laan # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' A script to check that the (Linux) executables produced by gitian only contain allowed gcc, glibc and libstdc++ version symbols. This makes sure they are still compatible with the minimum supported Linux distribution versions. Example usage: find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py ''' from __future__ import division, print_function, unicode_literals import subprocess import re import sys import os # Debian 6.0.9 (Squeeze) has: # # - g++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=g%2B%2B) # - libc version 2.11.3 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libc6) # - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libstdc%2B%2B6) # # Ubuntu 10.04.4 (Lucid Lynx) has: # # - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid&section=all) # - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid&section=all) # - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid&section=all&arch=any&keywords=libstdc%2B%2B&searchon=names) # # Taking the minimum of these as our target. # # According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to: # GCC 4.4.0: GCC_4.4.0 # GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3 # (glibc) GLIBC_2_11 # MAX_VERSIONS = { 'GCC': (4,4,0), 'CXXABI': (1,3,3), 'GLIBCXX': (3,4,13), 'GLIBC': (2,11) } # See here for a description of _IO_stdin_used: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 # Ignore symbols that are exported as part of every executable IGNORE_EXPORTS = { b'_edata', b'_end', b'_init', b'__bss_start', b'_fini', b'_IO_stdin_used' } READELF_CMD = os.getenv('READELF', '/usr/bin/readelf') CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt') # Allowed NEEDED libraries ALLOWED_LIBRARIES = { # infinitumd and infinitum-qt b'libgcc_s.so.1', # GCC base support b'libc.so.6', # C library b'libpthread.so.0', # threading b'libanl.so.1', # DNS resolve b'libm.so.6', # math library b'librt.so.1', # real-time (clock) b'ld-linux-x86-64.so.2', # 64-bit dynamic linker b'ld-linux.so.2', # 32-bit dynamic linker # infinitum-qt only b'libX11-xcb.so.1', # part of X11 b'libX11.so.6', # part of X11 b'libxcb.so.1', # part of X11 b'libfontconfig.so.1', # font support b'libfreetype.so.6', # font parsing b'libdl.so.2' # programming interface to dynamic linker } class CPPFilt(object): ''' Demangle C++ symbol names. Use a pipe to the 'c++filt' command. ''' def __init__(self): self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subpro
cess.PIPE) def __call__(self, mangled): self.proc.stdin.write(mangled + b'\n') self.proc.stdin.flush() return self.proc.stdout.readline().rstrip() def close(self): self.proc.stdin.close() self.proc.stdout.close() self.proc.wait() def read_symbols(executabl
e, imports=True): ''' Parse an ELF executable and return a list of (symbol,version) tuples for dynamic, imported symbols. ''' p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip())) syms = [] for line in stdout.split(b'\n'): line = line.split() if len(line)>7 and re.match(b'[0-9]+:$', line[0]): (sym, _, version) = line[7].partition(b'@') is_import = line[6] == b'UND' if version.startswith(b'@'): version = version[1:] if is_import == imports: syms.append((sym, version)) return syms def check_version(max_versions, version): if b'_' in version: (lib, _, ver) = version.rpartition(b'_') else: lib = version ver = '0' ver = tuple([int(x) for x in ver.split(b'.')]) if not lib in max_versions: return False return ver <= max_versions[lib] def read_libraries(filename): p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) (stdout, stderr) = p.communicate() if p.returncode: raise IOError('Error opening file') libraries = [] for line in stdout.split(b'\n'): tokens = line.split() if len(tokens)>2 and tokens[1] == b'(NEEDED)': match = re.match(b'^Shared library: \[(.*)\]$', b' '.join(tokens[2:])) if match: libraries.append(match.group(1)) else: raise ValueError('Unparseable (NEEDED) specification') return libraries if __name__ == '__main__': cppfilt = CPPFilt() retval = 0 for filename in sys.argv[1:]: # Check imported symbols for sym,version in read_symbols(filename, True): if version and not check_version(MAX_VERSIONS, version): print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym).decode('utf-8'), version.decode('utf-8'))) retval = 1 # Check exported symbols for sym,version in read_symbols(filename, False): if sym in IGNORE_EXPORTS: continue print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym).decode('utf-8'))) retval = 1 # Check dependency libraries for library_name in read_libraries(filename): if library_name not in ALLOWED_LIBRARIES: print('%s: NEEDED library %s is not allowed' % (filename, library_name.decode('utf-8'))) retval = 1 exit(retval)
jaseg/python-prompt-toolkit
prompt_toolkit/keys.py
Python
bsd-3-clause
2,546
0.007855
from __future__ import unicode_literals __all__ = ( 'Key', 'Keys', ) class Key(object): def __init__(self, name): #: Descriptive way of writing keys in configuration files. e.g. <C-A> #: for ``Control-A``. self.name = name def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.name) class Keys(object): Escape = Key('<Escape>') ControlA = Key('<C-A>') ControlB = Key('<C-B>') ControlC = Key('<C-C>') ControlD = Key('<C-D>') ControlE = Key('<C-E>') ControlF = Key('<C-F>') ControlG = Key('<C-G>') ControlH = Key('<C-H>') ControlI = Key('<C-I>') # Tab ControlJ = Key('<C-J>') # Enter ControlK = Key('<C-K>') ControlL = Key('<C-L>') ControlM = Key('<C-M>') # Enter ControlN = Key('<C-N>') ControlO = Key('<C-O>') ControlP = Key('<C-P>') ControlQ = Key('<C-Q>') ControlR = Key('<C-R>') Control
S = Key('<C-S>') ControlT = Key('<C-T>') ControlU = Key('<C-U>') ControlV = Key('<C-V>') ControlW = Key('<C-W>') ControlX = Key('<C-X>') ControlY = Key('<C-Y>') ControlZ = Key('<C-Z>'
) ControlSpace = Key('<C-Space>') ControlBackslash = Key('<C-Backslash>') ControlSquareClose = Key('<C-SquareClose>') ControlCircumflex = Key('<C-Circumflex>') ControlUnderscore = Key('<C-Underscore>') ControlLeft = Key('<C-Left>') ControlRight = Key('<C-Right>') ControlUp = Key('<C-Up>') ControlDown = Key('<C-Down>') Up = Key('<Up>') Down = Key('<Down>') Right = Key('<Right>') Left = Key('<Left>') Home = Key('<Home>') End = Key('<End>') Delete = Key('<Delete>') ShiftDelete = Key('<ShiftDelete>') PageUp = Key('<PageUp>') PageDown = Key('<PageDown>') BackTab = Key('<BackTab>') # shift + tab Tab = ControlI Backspace = ControlH F1 = Key('<F1>') F2 = Key('<F2>') F3 = Key('<F3>') F4 = Key('<F4>') F5 = Key('<F5>') F6 = Key('<F6>') F7 = Key('<F7>') F8 = Key('<F8>') F9 = Key('<F9>') F10 = Key('<F10>') F11 = Key('<F11>') F12 = Key('<F12>') F13 = Key('<F13>') F14 = Key('<F14>') F15 = Key('<F15>') F16 = Key('<F16>') F17 = Key('<F17>') F18 = Key('<F18>') F19 = Key('<F19>') F20 = Key('<F20>') # Matches any key. Any = Key('<Any>') # Special CPRResponse = Key('<Cursor-Position-Response>')
meshy/django-conman
tests/routes/test_urls.py
Python
bsd-2-clause
1,649
0.000606
from django.core.urlresolvers import resolve, Resolver404 from django.test import TestCase from conman.routes import views class RouteRouterViewTest(TestCase): """Test the route_router view.""" def assert_url_uses_router(self, url): """Check a url resolves to the route_router view.""" resolved_view = resolve(url) self.assertEqual(resolved_view.func, views.route_router) def test_blank_url(self): """Blank urls should not resolve. This is actually a test of django, as urls must start with `/`. """ with self.assertRaises(Resolver404): self.assert_url_uses_router('') def test_double_slash_url(self): """Trailing slashes should trail something.""" with self.assertRaises(Resolver404): self.assert_url_uses_router('//') def test_root_url(self): """The root url is resolved using views.route_router.""" self.assert_url_uses_router('/') def t
est_child_url(self): """A child url is resolved using views.route_router.""" self.assert_url_uses_router('/slug/') def test_nested_child_url(sel
f): """A nested child url is resolved using views.route_router.""" self.assert_url_uses_router('/foo/bar/') def test_numerical_url(self): """A numeric url is resolved using views.route_router.""" self.assert_url_uses_router('/meanings/42/') def test_without_trailing_slash(self): """A url without a trailing slash is not resolved by views.route_router.""" with self.assertRaises(Resolver404): self.assert_url_uses_router('/fail')
levithomason/neo
apps/neo_graph_test/urls.py
Python
mit
388
0.002577
from django.conf.urls import patterns, include, url from django.contrib import admin admin.autodiscover() urlpatterns = patterns('',
url(r'^', 'apps.neo_graph_test.views.create_graph', name='create_graph'), url(r'^', include('apps.citizens.urls')), url(r'^admin/',
include(admin.site.urls)), )
utopianf/maobot_php
imgdl.py
Python
mit
7,849
0.010957
#!/usr/bin/python3 # -*- coding: utf-8 -*- import sys import re import os from urllib.request import urlretrieve from urllib.request import urlopen from urllib.request import build_opener, HTTPCookieProcessor from urllib.parse import urlencode, quote from http.cookiejar import CookieJar from configparser import SafeConfigParser from imghdr import what from bs4 import BeautifulSoup from PIL import Image import pymysql from subprocess import Popen, PIPE from mvdl import * from pixivpy3 import * dlDir = "./images/" dlDir_mov = "./mov/" thumbDir = "./images/thumbnail/" thumb_lDir = "./images/thumbnail_l/" def thumbnail(input_file, output_file): size = 150 img = Image.open(input_file) w,h = img.size l,t,r,b = 0,0,size,size new_w, new_h = size,size if w>=h: new_w = size * w // h l = (new_w - size) // 2 r = new_w - l else: new_h = size * h // w t = (new_h - size) // 2 b = new_h - t thu = img.resize((new_w, new_h), Image.ANTIALIAS) thu = thu.crop((l,t,r,b)) thu.save(thumbDir + output_file, quality=100, optimize=True) thu = img.resize((w*300//h, 300), Image.ANTIALIAS) thu.save(thumb_lDir + output_file, quality=100, optimize=True) def regImg(loc, orig, thum, type, mov=0): nick = "" channel = "" if len(sys.argv) == 4: nick = os.fsencode(sys.argv[2]).decode('utf-8') channel = os.fsencode(sys.argv[3]).decode('utf-8') conn = pymysql.connect(host='127.0.0.1',user='maobot', passwd='msc3824',db='maobot',charset='utf8') cur = conn.cursor() if mov == 0: statement = "INSERT INTO images (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)" elif mov == 1: statement = "INSERT INTO movies (user,channel,loc,orig,thum,type) VALUES(%s, %s, %s, %s, %s, %s)" data = (nick, channel, loc, orig, thum, type) cur.execute(statement, data) cur.connection.commit() cur.close() conn.close() def readConfig(): config = SafeConfigParser() if os.path.exists('imgdl.ini'): config.read('imgdl.ini') else: print("No Configuration File.") sys.exit(2) try: nicouser = config.get('nicoseiga.jp', 'user') nicopass = config.get('nicoseiga.jp', 'pass') except Exception as e: return "error: could not read nico configuration." + e try: pixiuser = config.get('pixiv.net', 'user') pixipass = config.get('pixiv.net', 'pass') except Exception as e: return "error: could not read pixiv configuration." + e return nicouser, nicopass, pixiuser, pixipass def main(): orig_url = sys.argv[1] html = urlopen(orig_url) nicouser, nicopass, pixiuser, pixipass = readConfig() bsObj = BeautifulSoup(html, "lxml") twi = re.compile('https:\/\/twitter.com\/[a-zA-Z0-9_]+\/status\/\d+') nic = re.compile('http:\/\/seiga.nicovideo.jp\/seiga\/[a-zA-Z0-9]+') pix1 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=medium\&illust_id=[0-9]+') pix2 = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?illust_id=[0-9]+\&mode=medium') pix_ = re.compile('https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=[0-9]+\&page=[0-9]+') nico_mov = re.compile('https?:\/\/www.nicovideo.jp\/watch\/[a-zA-Z0-9]+') yout_mov = re.compile('https:\/\/www.youtube.com\/watch\?v=[a-zA-Z0-9]+') image_format = ["jpg", "jpeg", "gif", "png"] if twi.match(orig_url): images = bsObj.find("div", {"class": "permalink-tweet-container"}).find("div", {"class": "AdaptiveMedia-container"}).findAll("div", {"class": "AdaptiveMedia-photoContainer"}) for item in images: imageLoc = item.find("img")["src"] urlretrieve(imageLoc , dlDir + "twi" + imageLoc[28:]) loc = dlDir+"twi"+imageLoc[28:] thumb = "thumb_twi" + imageLoc[28:] type = what(loc) thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif nic.match(orig_url): opener = build_opener(HTTPCookieProcessor(CookieJar())) post = { 'mail_tel': nicouser, 'password': nicopass } data = urlencode(post).encode("utf_8") response = opener.open('https://secure.nicovideo.jp/secure/login', data) response.close() image_id = orig_url[34:] with opener.open('http://seiga.nicovideo.jp/image/source?id=' + image_id) as response: bsObj = BeautifulSoup(response) imageLoc = bsObj.find("div", {"class": "illust_view_big"}).find("img")["src"] dlLoc = dlDir + "nic" + image_id urlretrieve('http://lohas.nicoseiga.jp' + imageLoc, dlLoc) type = what(dlLoc) loc = dlLoc + "." + type os.rename(dlLoc, loc) thumb = "thumb_nico"+image_id+"."+type print(thumb_lDir+thumb) thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) elif pix1.match(orig_url) or pix2.match(orig_url): imageLocs = [] image_id = re.search('\d+', orig_url).group() api = AppPixivAPI() api.login(pixiuser, pixipass) json_result = api.illust_detail(image_id, req_auth=True) illust = json_result.illust if "original" in illust.image_urls: imageLocs.append(illust.image_urls.original) elif "meta_pages" in illust and len(illust.meta_pages)!=0: for i in illust.meta_pages: imageLocs.append(i.image_urls.original) elif "meta_single_page" in illust: imageLocs.append(illust.meta_single_page.original_image_url) # print(imageLocs) for imageLoc in imageLocs: api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1]) loc = dlDir + "pix" + imageLoc.split("/")[-1] type = what(loc) thumb
= "thumb_pix"+imageLoc.split("/")[-1] thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif pix_.match(orig_url): imageLocs = [] reg = re.compile("https?:\/\/www.pixiv.net\/member_illust.php\?mode=manga_big\&illust_id=(\d+)\&page=(\d+)") im
age_id = int(reg.match(orig_url).group(1)) page = int(reg.match(orig_url).group(2)) api = AppPixivAPI() api.login(pixiuser, pixipass) json_result = api.illust_detail(image_id, req_auth=True) imageLocs.append(json_result.illust.meta_pages[page].image_urls.original) for imageLoc in imageLocs: api.download(imageLoc, path=dlDir, name="pix" + imageLoc.split("/")[-1]) loc = dlDir + "pix" + imageLoc.split("/")[-1] type = what(loc) thumb = "thumb_pix"+imageLoc.split("/")[-1] thumbnail(loc, thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) print(thumb_lDir+thumb) elif nico_mov.match(orig_url): proc = Popen(["./mvdl.py", orig_url], stdout=PIPE, stderr=PIPE) retcode = proc.poll() elif orig_url.split(".")[-1] in image_format: filename = "_".join(quote(orig_url).split("/")[-2:]) if len(filename) > 10: from datetime import datetime filename = datetime.now().strftime('%s') + filename[-10:] loc = dlDir + filename thumb = "thumb_"+filename urlretrieve(orig_url , loc) type = what(loc) if type == None: type = orig_url.split(".")[-1] thumbnail(loc, thumb) print(thumb_lDir+thumb) regImg(loc, orig_url, "./images/thumbnail/"+thumb, type) if __name__ == '__main__' : main()
szecsi/Gears
GearsPy/Project/Components/Composition/Min.py
Python
gpl-2.0
828
0.03744
import Gears as gears from .. import * from ..Pif.Base import * class Min(Base) : def applyWithArgs( self, spass, functionName, *, pif1 : 'First operand. (Pif.*)' = Pif.Solid( color = 'white' ), pif2 : 'Second operand. (Pif.*)' = Pif.Solid( color = 'white' ) ) : stimulus = spass.getStimulus() pif1.apply(spass, functionName + '_op1') pif2.apply(spass, functionName +
'_op2') spass.setShaderFunction( name = functionName, src = self.glslEsc( ''' vec3 @<pattern>@ (vec2 x, float time){
return min( @<pattern>@_op1(x), @<pattern>@_op2(x) ); } ''').format( pattern=functionName ) )
igemsoftware2017/USTC-Software-2017
tests/core/plugins/bad_plugin/apps.py
Python
gpl-3.0
268
0
from biohub.core.plugins import PluginConfig class BadPluginConfig
(PluginConfig): name = 'tests.core.plugins.bad_plugin' title = 'My Plugin' author = 'hsfzxjy' description = 'This is my plugin.' def ready(self): raise ZeroDivi
sionError
jeff-alves/Tera
game/message/unused/S_PARTY_MEMBER_INTERVAL_POS_UPDATE.py
Python
mit
246
0.012195
from util.tipo
import tipo class S_PART
Y_MEMBER_INTERVAL_POS_UPDATE(object): def __init__(self, tracker, time, direction, opcode, data): print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
ACBL-Bridge/Bridge-Application
Home Files/LoginandSignupV10.py
Python
mit
17,362
0.013708
from tkinter import * import mysql.connector as mysql from MySQLdb import dbConnect from HomeOOP import * import datetime from PIL import Image, ImageTk class MainMenu(Frame): def __init__(self, parent): #The very first screen of the web app Frame.__init__(self, parent) w, h = parent.winfo_screenwidth(), parent.winfo_screenheight() #parent.overrideredirect(1) parent.geometry("%dx%d+0+0" % (w, h)) frame = Frame(parent, width=w, height=h).place(x=350, y=450) # frame.pack(expand=True) # canvas = Canvas(parent, width=w, height=h) # scale_width = w / 3900 # scale_height = h / 2613 web = "https://raw.githubusercontent.com/ACBL-Bridge/Bridge-Application/master/Login/" URL = "login_background_resized.jpg" u = urlopen(web + URL) raw_data = u.read() u.close() im = Image.open(BytesIO(raw_data)) bckgrd = ImageTk.PhotoImage(im) login_bckgrd = Label(frame, image=bckgrd) login_bckgrd.image = bckgrd login_bckgrd.place(x=0, y=0, relwidth=1, relheight=1) titleLabel = Label(frame, text="LET'S PLAY BRIDGE", fg="black", font='Arial 36') titleLabel.pack(side="top", pady=150) loginButton = Button(frame, text="Existing User", fg="blue", font="Arial 14", command=lambda: self.LoginScreen(parent)) loginButton.pack(side='top') signupButton = Button(frame, text="Sign up", fg="blue", font="Arial 14", command=self.SignupScreen) signupButton.pack(side="top") quitButton = Button(frame, text="Quit", font="Arial 14", command=self.SignupScreen) quitButton.pack(side="top") ####################################Login - GUI ########################### def LoginScreen(self,parent): global entry_user global entry_pass top = Toplevel(self) top.title("Log In - ABCL") w, h = top.winfo_screenwidth(), top.winfo_screenheight() top.overrideredirect(1) top.geometry("550x400+%d+%d" % (w/2-275, h/2-125)) #250 #top.configure(background = 'white') quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20) #entry_user = StringVar() #entry_pass = StringVar() # Frames to divide the window into three parts.. makes it easier to organize the widgets topFrame = Frame(top) topFrame.pack() middleFrame = Frame(top) middleFrame.pack(pady=50) bottomFrame = Frame(top) bottomFrame.pack(side=BOTTOM) # Widgets and which frame they are in #label = Label(topFrame, text="LET'S PLAY BRIDGE") userLabel = Label(middleFrame, text='Username:', font="Arial 14") passLabel = Label(middleFrame, text='Password:', font="Arial 14") entry_user = Entry(middleFrame) # For DB entry_pass = Entry(middleFrame, show ='*') # For DB b = Button(bottomFrame, text="Log In",fg ="blue", font ="Arial 14", command=lambda: get_Login_input(self, parent)) #Location of the Widgets in their frames #label.pack(side="top", fill="both", expand=True, padx=20, pady=20) userLabel.grid(row=10, column=0, sticky=W, padx=20) entry_user.grid(row=10, column=1, padx=20) passLabel.grid(row=11, column=0, sticky=W, padx=20) entry_pass.grid(row=11, column=1, padx=20) b.grid(row=12, columnspan=2) ###############################################DATABASE Check Login!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def go_to_HomePage(user): root = Tk() app = Home(root,user) root.mainloop() def get_Login_input(self, parent): var = dbConnect() dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db) cur = dbconn.cursor() # Cursor object - required to execute all queries cur.execute("SELECT username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get())) rows = cur.fetchall() if rows: cur.execute("SELECT firstname, lastname, username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get())) for namerow in cur.fetchall(): # print all the first cell fn = namerow[0] #store firstname ln = namerow[1] #store lastname user = namerow[2] self.destroy() parent.destroy() go_to_HomePage(user) '''top = Toplevel(self) w, h = top.winfo_screenwidth(), top.winfo_screenheight() top.overrideredirect(1) top.geometry("%dx%d+0+0" % (w, h)) # Frames to divide the window into three parts.. makes it easier to organize the widgets topFrame = Frame(top) topFrame.pack() middleFrame = Frame(top) middleFrame.pack(pady=250) bottomFrame = Frame(top) bottomFrame.pack(side=BOTTOM) myProfileButton = Button(middleFrame, text="My Profile", fg="blue", font="
Arial 14", command=self.myProfileScreen) myProfileButton.pack() quitButton = Button(top, text="Log Out", font="Arial 14", command=top.destroy).pack(side="bottom", padx=20) #top.title(':D') #top.geometry('250x200') #get first name and last name of current player cur.execute("SELECT f
irstname, lastname FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get())) for namerow in cur.fetchall(): # print all the first cell fn = namerow[0] #store firstname ln = namerow[1] #store lastname rlb1 = Label(middleFrame, text='\nWelcome %s %s\n' % (fn, ln), font="Arial 14") rlb1.pack() rlb2 = Label(middleFrame, text='\nUserName: %s' % entry_user.get(), font="Arial 14") rlb2.pack() top.mainloop() self.destroy() parent.destroy() go_to_HomePage()''' else: r = Tk() r.title(':D') r.geometry('150x150') rlbl = Label(r, text='\n[!] Invalid Login') rlbl.pack() r.mainloop() dbconn.close() ########################################## SIGN UP SCREEN - GUI #################################################### def SignupScreen(self): global entry_fname global entry_lname global entry_user global entry_pass global entry_repass global entry_email global entry_ACBL global entry_disID top = Toplevel(self) w, h = top.winfo_screenwidth(), top.winfo_screenheight() top.overrideredirect(1) top.geometry("550x450+%d+%d" % (w / 2 - 275, h / 2 - 140)) # 250 #top.configure(background='white') quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20) #topFrame = Frame(top) #topFrame.pack() middleFrame = Frame(top) middleFrame.pack(pady=50) bottomFrame = Frame(top) bottomFrame.pack(side=BOTTOM) # Widgets and which frame they are in #label = Label(topFrame, text="LET'S PLAY BRIDGE") fnameLabel = Label(middleFrame,text = 'First Name:',font="Arial 14") lnameLabel = Label(middleFrame, text='Last Name:',font="Arial 14") userLabel = Label(middleFrame, text='Username:',font="Arial 14") passLabel = Label(middleFrame, text='Password:',font="Arial 14") repassLabel = Label(middleFrame, text='Re-Enter Password:',font="Arial 14") emailLabel = Label(middleFrame, text='Email(optional):',font="Arial 14") ACBLnumLabel = Label(middleFrame, text='ACBLnum(optional):',font="Arial
akrherz/iem
scripts/dbutil/rwis2archive.py
Python
mit
5,737
0
""" Copy RWIS data from iem database to its final resting home in 'rwis' The RWIS data is partitioned by UTC
timestamp Run at 0Z and 12Z, provided with a timestamp to process """ import d
atetime import sys import psycopg2.extras from pyiem.util import get_dbconn, utc def main(argv): """Go main""" iemdb = get_dbconn("iem") rwisdb = get_dbconn("rwis") ts = utc(int(argv[1]), int(argv[2]), int(argv[3])) ts2 = ts + datetime.timedelta(hours=24) rcursor = rwisdb.cursor() # Remove previous entries for this UTC date for suffix in ["", "_soil", "_traffic"]: rcursor.execute( f"DELETE from t{ts.year}{suffix} WHERE valid >= %s and valid < %s", (ts, ts2), ) rcursor.close() # Always delete stuff 3 or more days old from iemaccess icursor = iemdb.cursor() icursor.execute( "DELETE from rwis_traffic_data_log WHERE " "valid < ('TODAY'::date - '3 days'::interval)" ) icursor.execute( "DELETE from rwis_soil_data_log WHERE " "valid < ('TODAY'::date - '3 days'::interval)" ) icursor.close() # Get traffic obs from access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) icursor.execute( """SELECT l.nwsli as station, s.lane_id, d.* from rwis_traffic_data_log d, rwis_locations l, rwis_traffic_sensors s WHERE s.id = d.sensor_id and valid >= '%s' and valid < '%s' and s.location_id = l.id""" % (ts, ts2) ) rows = icursor.fetchall() if not rows: print("No RWIS traffic found between %s and %s" % (ts, ts2)) icursor.close() # Write to archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year}_traffic (station, valid, lane_id, avg_speed, avg_headway, normal_vol, long_vol, occupancy) VALUES (%(station)s,%(valid)s, %(lane_id)s, %(avg_speed)s, %(avg_headway)s, %(normal_vol)s, %(long_vol)s, %(occupancy)s) """, rows, ) rcursor.close() # Get soil obs from access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) sql = """SELECT l.nwsli as station, d.valid, max(case when sensor_id = 1 then temp else null end) as tmpf_1in, max(case when sensor_id = 3 then temp else null end) as tmpf_3in, max(case when sensor_id = 6 then temp else null end) as tmpf_6in, max(case when sensor_id = 9 then temp else null end) as tmpf_9in, max(case when sensor_id = 12 then temp else null end) as tmpf_12in, max(case when sensor_id = 18 then temp else null end) as tmpf_18in, max(case when sensor_id = 24 then temp else null end) as tmpf_24in, max(case when sensor_id = 30 then temp else null end) as tmpf_30in, max(case when sensor_id = 36 then temp else null end) as tmpf_36in, max(case when sensor_id = 42 then temp else null end) as tmpf_42in, max(case when sensor_id = 48 then temp else null end) as tmpf_48in, max(case when sensor_id = 54 then temp else null end) as tmpf_54in, max(case when sensor_id = 60 then temp else null end) as tmpf_60in, max(case when sensor_id = 66 then temp else null end) as tmpf_66in, max(case when sensor_id = 72 then temp else null end) as tmpf_72in from rwis_soil_data_log d, rwis_locations l WHERE valid >= '%s' and valid < '%s' and d.location_id = l.id GROUP by station, valid""" % ( ts, ts2, ) icursor.execute(sql) rows = icursor.fetchall() if not rows: print("No RWIS soil obs found between %s and %s" % (ts, ts2)) icursor.close() # Write to RWIS Archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year}_soil (station, valid, tmpf_1in, tmpf_3in, tmpf_6in, tmpf_9in, tmpf_12in, tmpf_18in, tmpf_24in, tmpf_30in, tmpf_36in, tmpf_42in, tmpf_48in, tmpf_54in, tmpf_60in, tmpf_66in, tmpf_72in) VALUES ( %(station)s,%(valid)s, %(tmpf_1in)s, %(tmpf_3in)s, %(tmpf_6in)s, %(tmpf_9in)s, %(tmpf_12in)s, %(tmpf_18in)s, %(tmpf_24in)s, %(tmpf_30in)s, %(tmpf_36in)s, %(tmpf_42in)s, %(tmpf_48in)s, %(tmpf_54in)s, %(tmpf_60in)s, %(tmpf_66in)s, %(tmpf_72in)s) """, rows, ) rcursor.close() # Get regular obs from Access icursor = iemdb.cursor(cursor_factory=psycopg2.extras.DictCursor) # Since we store drct in the RWIS archive as NaN, we better make sure # we don't attempt to use these values as it will error out icursor.execute("update current_log set drct = null where drct = 'NaN'") sql = """SELECT c.*, t.id as station from current_log c, stations t WHERE valid >= '%s' and valid < '%s' and t.network ~* 'RWIS' and t.iemid = c.iemid""" % ( ts, ts2, ) icursor.execute(sql) rows = icursor.fetchall() if not rows: print("No RWIS obs found between %s and %s" % (ts, ts2)) icursor.close() # Write to RWIS Archive rcursor = rwisdb.cursor() rcursor.executemany( f"""INSERT into t{ts.year} (station, valid, tmpf, dwpf, drct, sknt, tfs0, tfs1, tfs2, tfs3, subf, gust, tfs0_text, tfs1_text, tfs2_text, tfs3_text, pcpn, vsby) VALUES (%(station)s, %(valid)s,%(tmpf)s,%(dwpf)s,%(drct)s,%(sknt)s,%(tsf0)s, %(tsf1)s,%(tsf2)s,%(tsf3)s,%(rwis_subf)s,%(gust)s,%(scond0)s, %(scond1)s,%(scond2)s,%(scond3)s,%(pday)s,%(vsby)s)""", rows, ) rcursor.close() rwisdb.commit() iemdb.commit() rwisdb.close() iemdb.close() if __name__ == "__main__": main(sys.argv)
urisimchoni/samba
third_party/waf/wafadmin/Scripting.py
Python
gpl-3.0
15,298
0.032684
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2005 (ita) "Module called for configuring, compiling and installing targets" import os, sys, shutil, traceback, datetime, inspect, errno import Utils, Configure, Build, Logs, Options, Environment, Task from Logs import error, warn, info from Constants import * g_gz = 'bz2' commands = [] def prepare_impl(t, cwd, ver, wafdir): Options.tooldir = [t] Options.launch_dir = cwd # some command-line options can be processed immediately if '--version' in sys.argv: opt_obj = Options.Handler() opt_obj.curdir = cwd opt_obj.parse_args() sys.exit(0) # now find the wscript file msg1 = 'Waf: Please run waf from a directory containing a file named "%s" or run distclean' % WSCRIPT_FILE # in theory projects can be configured in an autotool-like manner: # mkdir build && cd build && ../waf configure && ../waf build_dir_override = None candidate = None lst = os.listdir(cwd) search_for_candidate = True if WSCRIPT_FILE in lst: candidate = cwd elif 'configure' in sys.argv and not WSCRIPT_BUILD_FILE in lst: # autotool-like configuration calldir = os.path.abspath(os.path.dirname(sys.argv[0])) if WSCRIPT_FILE in os.listdir(calldir): candidate = calldir search_for_candidate = False else: error('arg[0] directory does not contain a wscript file') sys.exit(1) build_dir_override = cwd # climb up to find a script if it is not found while search_for_candidate: if len(cwd) <= 3: break # stop at / or c: dirlst = os.listdir(cwd) if WSCRIPT_FILE in dirlst: candidate = cwd if 'configure' in sys.argv and candidate: break if Options.lockfile in dirlst: env = Environment.Environment() try: env.load(os.path.join(cwd, Options.lockfile)) except: error('could not load %r' % Options.lockfile) try: os.stat(env['cwd']) except: candidate = cwd else: candidate = env['cwd'] break cwd = os.path.dirname(cwd) # climb up if not candidate: # check if the user only wanted to display the help if '-h' in sys.argv or '--help' in sys.argv: warn('No wscript file found: the help message may be incomplete') opt_obj = Options.Handler() opt_obj.curdir = cwd opt_obj.parse_args() else: error(msg1) sys.exit(0) # We have found wscript, but there is no guarantee that it is valid try: os.chdir(candidate) except OSError: raise Utils.WafError("the folder %r is unreadable" % candidate) # define the main module containing the functions init, shutdown, .. Utils.set_main_module(os.path.join(candidate, WSCRIPT_FILE)) if build_dir_override: d = getattr(Utils.g_module, BLDDIR, None) if d: # test if user has set the blddir in wscript. msg = ' Overriding build directory %s with %s' % (d, build_dir_override) warn(msg) Utils.g_module.blddir = build_dir_override # bind a few methods and classes by default def set_def(obj, name=''): n = name or obj.__name__ if not n in Utils.g_module.__dict__: setattr(Utils.g_module, n, obj) for k in [dist, distclean, distcheck, clean, install, uninstall]: set_def(k) set_def(Configure.ConfigurationContext, 'configure_context') for k in ['build', 'clean', 'install', 'uninstall']: set_def(Build.BuildContext, k + '_context') # now parse the options from the user wscript file opt_obj = Options.Handler(Utils.g_module) opt_obj.curdir = candidate try: f = Utils.g_module.set_options except AttributeError: pass else: opt_obj.sub_options(['']) opt_obj.parse_args() if not 'init' in Utils.g_module.__dict__: Utils.g_module.init = Utils.nada if not 'shutdown' in Utils.g_module.__dict__: Utils.g_module.shutdown = Utils.nada main() def prepare(t, cwd, ver, wafdir): if WAFVERSION != ver: msg = 'Version mismatch: waf %s <> wafadmin %s (wafdir %s)' % (ver, WAFVERSION, wafdir) print('\033[91mError: %s\033[0m' % msg) sys.exit(1) #""" try: prepare_impl(t, cwd, ver, wafdir) except Utils.WafError, e: error(str(e)) sys.exit(1) except KeyboardInterrupt: Utils.pprint('RED', 'Interrupted') sys.exit(68) """ import cProfile, pstats cProfile.runctx("import Scripting; Scripting.prepare_impl(t, cwd, ver, wafdir)", {}, {'t': t, 'cwd':cwd, 'ver':ver, 'wafdir':wafdir}, 'profi.txt') p = pstats.Stats('profi.txt') p.sort_stats('time').print_stats(45) #""" def main(): global commands commands = Options.arg_line[:] while commands: x = commands.pop(0) ini = datetime.datetime.now() if x == 'configure': fun = configure elif x == 'build': fun = build else: fun = getattr(Utils.g_module, x, None) if not fun: raise Utils.WscriptError('No such command %r' % x) ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: # compatibility TODO remove in waf 1.6 try: fun(ctx) except TypeError: fun() else: fun(ctx) ela = '' if not Options.options.progress_bar: ela = ' (%s)' % Utils.get_elapsed_time(ini) if x != 'init' and x != 'shutdown': info('%r finished successfully%s' % (x, ela)) if not commands and x != 'shutdown': commands.append('shutdown') def configure(conf): src = getattr(Options.options, SRCDIR, None) if not src: src = getattr(Utils.g_module, SRCDIR, None) if not src: src = getattr(Utils.g_module, 'top', None) if not src: src = '.' incomplete_src = 1 src = os.path.abspath(src) bld = getattr(Options.options, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, BLDDIR, None) if not bld: bld = getattr(Utils.g_module, 'out', None) if not bld: bld = 'build' incomplete_bld = 1 if bld == '.': raise Utils.WafError('Setting blddir="." may cause distclean problems') bld = os.path.abspath(bld) try: os.makedirs(bld) except OSError: pass # It is not possible to compile specific targets in the configuration # this may cause configuration errors if autoconfig is set targets = Options.options.compile_targets Options.options.compile_targets = None Options.is_install = False conf.srcdir = src conf.blddir = bld conf.post_init() if 'incomplete_src' in vars(): conf.check_message_1('Setting srcdir to') conf.check_message_2(src) if 'incomplete_bld' in vars(): conf.check_message_1('Setting blddir to') conf.check_message_2(bld) # calling to main wscript's configure() conf.sub_config(['']) conf.store() # this will write a configure lock so that subsequent builds will # consider the current path as the root directory (see prepare_impl). # to remove: use 'waf distclean' env = Environment.Environment() env[BLDDIR] = bld env[SRCDIR] = src env['argv'] = sys.argv env['commands'] = Options.commands env['options'] = Options.options.__dict__ # conf.hash & conf.files hold wscript files paths and hash # (used only by Configure.autoconfig) env['hash'] = conf.hash env['files'] = conf.files env['environ'] = dict(conf.environ) env['cwd'] = os.path.split(Utils.g_module.root_path)[0] if Utils.g_module.root_path != src: # in case the source dir is somewhere else env.store(os.path.join(src, Options.lockfile)) env.store(Options.lockfile) Options.o
ptions.compile_targets = targets def clean(bld): '''removes the build files''' try: proj = Environment.Environment(Options.lockfile) except IOError: raise Utils.WafError('Nothing to clean (project not configured)') bld.load_dirs(proj[SRCDIR], proj[BLDDIR]) bld.load_envs() bld.is_install = 0 # False # read the scripts - and set the path to the wscript path (useful for srcdir='/foo/bar') bld.add_subdirs([os.path.split(Utils.g_module.roo
t_path)[0]]) try: bld.clean() finally: bld.save() def check_configured(bld): if not Configure.autoconfig: return bld conf_cls = getattr(Utils.g_module, 'configure_context', Utils.Context) bld_cls = getattr(Utils.g_module, 'build_context', Utils.Context) def reconf(proj): back = (Options.commands, Options.options.__dict__, Logs.zones, Logs.verbose) Options.commands = proj['commands'] Options.options.__dict__ = proj['options'] conf = conf_cls() conf.environ = proj['environ'] configure(conf) (Options.commands, Options.options
seungkim11/election-2016
python_streaming/yesterday_dump.py
Python
apache-2.0
1,289
0.006206
import time from pymongo import MongoClient from datetime import datetime, timedelta import json from bson import Binary, Code from bson.json_util import dumps client = MongoClient('localhost', 27017) db = client['election-2016'] def dumpData(yesterdayStr): collectionName = 't' + yesterdayStr cursor = db[collectionName].find() count = cursor.count() print(collectionName + ' found ' + str(count) + '
tweets') # dump only if data count is greater than 0 if count > 0: file = open('out/' + yesterdayStr + '.j
son', 'w') file.write('[') i = 0 for document in cursor: doc = dumps(document) file.write(doc) if (i != count - 1): file.write(',\n') else: file.write('\n]') i = i + 1 print('data for ' + yesterdayStr + ' successfully dumped at ' + str(now)) # Run following code when the program starts if __name__ == '__main__': currentDate = str(datetime.now().month) + '_' + str(datetime.now().day) #get now and yesterday strings now = datetime.now() yesterday = now - timedelta(days=1) yesterdayStr = str(yesterday.month) + '_' + str(yesterday.day) #update currentDate dumpData(yesterdayStr)
publysher/rdflib-django
src/rdflib_django/testsettings.py
Python
mit
1,265
0.000791
""" Settings for testing the application. """ import os DEBUG = True DJANGO_RDFLIB_DEVELOP = True DB_PATH = os.path.
abspath(os.path.join(__file__, '..', '..', '..', 'rdflib_django.db')) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': DB_PATH, 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '',
} } SITE_ID = 1 STATIC_URL = '/static/' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'django.contrib.admindocs', 'rdflib_django', ) ROOT_URLCONF = 'rdflib_django.urls' LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' }, }, 'loggers': { '': { 'handlers': ['console'], 'propagate': True, 'level': 'INFO', }, } }
Kuniwak/vint
vint/ast/plugin/scope_plugin/redir_assignment_parser.py
Python
mit
1,250
0.0024
from vint.ast.traversing import traverse, register_traverser_extension from vint.ast.parsing import Parser from vint.ast.node_type import NodeType REDIR_CONTENT =
'VINT:redir_content' class RedirAssignmentParser(object): "
"" A class to make redir assignment parseable. """ def process(self, ast): def enter_handler(node): node_type = NodeType(node['type']) if node_type is not NodeType.EXCMD: return is_redir_command = node['ea']['cmd'].get('name') == 'redir' if not is_redir_command: return redir_cmd_str = node['str'] is_redir_assignment = '=>' in redir_cmd_str if not is_redir_assignment: return parser = Parser() redir_content_node = parser.parse_redir(node) node[REDIR_CONTENT] = redir_content_node traverse(ast, on_enter=enter_handler) return ast def get_redir_content(node): return node.get(REDIR_CONTENT) @register_traverser_extension def traverse_redir_content(node, on_enter=None, on_leave=None): if REDIR_CONTENT not in node: return traverse(node[REDIR_CONTENT], on_enter=on_enter, on_leave=on_leave)
bmcinnes/VCU-VIP-Nanoinformatics
NERD/CRF/CRF.py
Python
gpl-3.0
5,075
0.00532
# Used for when precision or recall == 0 to supress warnings def warn(*args, **kwargs): pass import warnings warnings.warn = warn import numpy as np import sklearn_crfsuite from sklearn.metrics import make_scorer, confusion_matrix from sklearn_crfsuite import metrics from sklearn_crfsuite.utils import flatten from sklearn.model_selection import cross_validate, cross_val_predict, StratifiedKFold from collections import Counter from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from chemdataextractor.doc import Paragraph from Tools import TextTools stop_word_list = set(stopwords.words('english')) wordnet_lemmatizer = WordNetLemmatizer() chem_ents = [] def main(): train_docs = TextTools.loadNER("ASC") train_sents = [] for index, row in train_docs.iterrows(): for word in row['text']: train_sents.append(word) chem_ents = extract_chem_entities(train_sents) X = [sent2features(s,chem_ents) for s in train_sents] y = [sent2labels(s) for s in train_sents] crf = sklearn_crfsuite.CRF( algorithm='lbfgs', c1=0.1, c2=0.1, all_possible_transitions=True) crf.fit(X, y) # List of labels removing the non-entity classes labels = list(crf.classes_) labels.remove('O') NUMBER_OF_FOLDS = 5 scoreers = { "
f1_scores": make_scorer(metrics.flat_f1_score, average='weighted', labels=labels), "precision_scores": make_scorer(metrics.flat_precision_score, average='weighted', labels=labels), "recall_scores": make_scorer(metrics.flat_recall_score, average='weighted', labels=labels), } scores = cross_validate(crf, X, y, cv=NUMBER_OF_FOLDS, scoring=scoreers,
return_train_score=False, n_jobs=-1) f1_scores = scores['test_f1_scores'] precision_scores = scores['test_precision_scores'] recall_scores = scores['test_recall_scores'] for x in range(NUMBER_OF_FOLDS): print("Fold number: ", x) print("Precision: ", precision_scores[x]) print("Recall: ", recall_scores[x]) print("F1 Score: ", f1_scores[x]) print("\n") print("Averages Across Folds") print("Precision: ", np.average(np.array(precision_scores))) print("Recall: ", np.average(np.array(recall_scores))) print("F1 Score: ", np.average(np.array(f1_scores))) y_pred = cross_val_predict(crf, X, y, cv=NUMBER_OF_FOLDS) conf_mat = confusion_matrix(flatten(y), flatten(y_pred)) print("\nConfusion Matrix\n") print(" ".join(["NonEntity", "CoreComposition", "Precursor", "ReducingAgent", "Solvent", "Stabilizer"])) print(conf_mat) print("Top positive:") print_state_features(Counter(crf.state_features_).most_common(30)) print("\nTop negative:") print_state_features(Counter(crf.state_features_).most_common()[-30:]) def extract_chem_entities(sents): document_text = [[str(w[0]) for w in s] for s in sents] document_text = [" ".join(s) for s in document_text] document_text = " ".join(document_text) paragraph = Paragraph(document_text) chem_entities = paragraph.cems chem_entities = [c.text for c in chem_entities] return chem_entities def print_state_features(state_features): for (attr, label), weight in state_features: print("%0.6f %-8s %s" % (weight, label, attr)) def word2features(sent, word_position): SENTENCE_BEGGINING = 0 SENTENCE_END = len(sent) - 1 word = sent[word_position][0] pos = sent[word_position][1] features = featureize(word, pos) if word_position == SENTENCE_BEGGINING: features.append('BOS') if word_position > SENTENCE_BEGGINING: previous_word = sent[word_position-1][0] previous_pos = sent[word_position-1][1] features.extend(featureize(previous_word, previous_pos, relation="-1")) if word_position < SENTENCE_END: next_word = sent[word_position+1][0] next_pos = sent[word_position+1][1] features.extend(featureize(next_word, next_pos, relation="+1")) if word_position == SENTENCE_END: features.append('EOS') return features def featureize(word, postag, relation=""): suffix = word[-3:] prefix = word[:3] return [ relation + 'word.lower=' + word.lower(), relation + 'word.isupper=%s' % word.isupper(), relation + 'word.istitle=%s' % word.istitle(), relation + 'word.isdigit=%s' % word.isdigit(), relation + 'word.postag=%s' % postag, relation + 'word.prefix=%s' % prefix, relation + 'word.suffix=%s' % suffix, relation + 'word.lemma=%s' % wordnet_lemmatizer.lemmatize(word), relation + 'word.ischem=%s' % (word in chem_ents), relation + 'word.containsdigit=%s' % contains_digit(word), ] def sent2features(sent, chem_ents): return [word2features(sent, i) for i in range(len(sent))] def sent2labels(sent): return [label for token, postag, label in sent] def contains_digit(s): return any(i.isdigit() for i in s) if __name__ == "__main__": main()
possatti/memoo
converter.py
Python
mit
6,467
0.025472
'''Biblioteca que contém as rotinas de coversão dos diferentes tipos de máquinas. Autor: Lucas Possatti ''' import re import collections def mealy_to_moore(me): '''Converte o parâmetro 'me' (que deve ser uma máquina Mealy) para uma máquina de Moore, que é retornada. ''' # Verifica se a máquina recebida, realemente é mealy. if me[0] != 'mealy': raise 'O método mealy_to_moore esperava receber uma máquina de mealy como entrada.' # Cria a máquina de moore. moo = ['moore'] #!# # Procura as trasições com destino a cada um dos estados, para #!# # verificar se há mais de uma transição que destina a um único estado. #!# for state in me[3][1:]: #!# state_trans_outputs = set() #!# for trans in me[6][1:]: #!# if state == trans[1]: #!# pass # Inicia um dicionário, com todos os estados como chaves, e um conjunto # vazio para seus valores. state_outputs = collections.OrderedDict() for state in me[3][1:]: state_outputs[state] = [] # Busca as saídas que são geradas com a transição para cada um dos estados. for trans in me[6][1:]: # Verifica se o estado de destino está no dicionário 'state_outputs'. if trans[1] not in state_outputs: raise "Some transition state destination is not declared in the machine definition (states section). Malformed machine definition." # Adiciona a saída a lista do estado, somente se já não tiver sido adicionada. if trans[3] not in state_outputs[trans[1]]: state_outputs[trans[1]].append(trans[3]) # Define quais serão os novos estados na máquina de moore. moore_states = [] out_fn = [] for state in state_outputs: # Se o estado tem mais de um output if len(state_outputs[state]) > 1: # Itera sobre cada um dos outputs desse estado, para gerar os # novos estados que forem necessários. Acrescentando '*' para # cada novo estado criado. i = 0 for output in state_outputs[state]: # Gera o nome para o novo estado. new_state = state + '*'*i # Adiciona o estado, a lista de estados da nova máquina moore_states.append(new_state) # Forma a tupla para a função de saída (out-fn). out_fn.append([new_state, output]) i += 1 # Se o estado tem um único output. elif len(state_out
puts[state]) == 1: # Adiciona o estado, a lista de estados da nova máquina moore_states.append(state) # Pega a única saí
da desse estado. output = state_outputs[state][0] # Forma a tupla para a função de saída (out-fn). out_fn.append([state, output]) # Caso o estado não tenha qualquer output (como por exemplo, se # não houver qualquer transição com destino a ele). else: # Adiciona o estado, a lista de estados da nova máquina moore_states.append(state) # Forma a tupla para a função de saída (out-fn), no caso # o estado não tem qualquer saída. out_fn.append([state, []]) # Gera as transições necessárias para a máquina de moore. moore_trans = [] for trans in me[6][1:]: for new_state in moore_states: for fn in out_fn: #!#print(trans, ":", new_state, ":", fn, "=", re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1])#!# # Usa os vários dados já obtidos para verificar como as # transições para a máquina de moore devem ser criadas # e quais delas devem ser consideradas. if re.match("^" + trans[1] + r"\**", new_state) and re.match("^" + trans[1] + r"\**", fn[0]) and trans[3] == fn[1]: # Forma a transição que será adicionada. temp_trans = [trans[0], fn[0], trans[2]] # Adciona a nova transição, somente se ele já não tiver # sido adicionada. if temp_trans not in moore_trans: moore_trans.append(temp_trans) # Define os estados que são finais. Que a princípio, são todos os da # máquina de mealy, com a diferença de que é necessário observar se # os estados que foram criados (com '*') também são finais. moore_finals = [] for final in me[5][1:]: for moo_state in moore_states: if re.match("^" + final + r"\**", moo_state): moore_finals.append(moo_state) moo.append(["symbols-in"] + me[1][1:]) moo.append(["symbols-out"] + me[2][1:]) moo.append(["states"] + moore_states) moo.append(["start"] + [me[4][1]]) moo.append(["finals"] + moore_finals) moo.append(["trans"] + moore_trans) moo.append(["out-fn"] + out_fn) #!# print('\nDEBUG:') #!# print('me[0]', me[0]) #!# print('me[1]', me[1]) #!# print('me[2]', me[2]) #!# print('me[3]', me[3]) #!# print('me[4]', me[4]) #!# print('me[5]', me[5]) #!# print('me[6]', me[6]) #!# print(':END DEBUG\n') return moo def moore_to_mealy(moo): '''Converte o parâmetro 'moo' (que deve ser uma máquina Moore) para uma máquina de Mealy, que é retornada. ''' # Verifica se a máquina recebida, realemente é moore. if moo[0] != 'moore': raise 'O método moore_to_mealy esperava receber uma máquina de moore como entrada.' # Cria a máquina de mealy. me = ['mealy'] # Repete os simbolos de entrada e de entrada. me.append(['symbols-in'] + moo[1][1:]) me.append(moo[2]) # Repete os estados porém adicionando o 'qe'. estados = [moo[3][0]] + ['qe'] + moo[3][1: ] me.append(estados) # O estado inicial é 'qe'. me.append(['start', 'qe']) # Os estados finais são os mesmos. me.append(moo[5]) # Traduz as transições e saídas da máquina de moore para mealy. mealy_trans = [] moore_trans = moo[6][1:] moore_outfn = moo[7][1:] for trans in moore_trans: # Busca a saída para aquela mudança de estado. mealy_trans_output = None for out in moore_outfn: if out[0] == trans[1]: mealy_trans_output = out[1] # Forma a transição no formato mealy. mealy_trans_stage = [trans[0], trans[1], trans[2], mealy_trans_output] # Se a transição for do estado inicial, precisamos adicionalemente # acrescenta-la como transição do estado 'qe' if mealy_trans_stage[0] == moo[4][1]: mealy_trans.append(['qe'] + mealy_trans_stage[1:]) # E adiciona ao conjunto de transições da máquina mealy. mealy_trans.append(mealy_trans_stage) # Coloca as transações da mealy dentro da máquina. me.append(['trans'] + mealy_trans) #!# print('DEBUG:') #!# print('moo[0]', moo[0]) #!# print('moo[1]', moo[1]) #!# print('moo[2]', moo[2]) #!# print('moo[3]', moo[3]) #!# print('moo[4]', moo[4]) #!# print('moo[5]', moo[5]) #!# print('moo[6]', moo[6]) #!# print('moo[7]', moo[7][0:-1]) #!# print(':END DEBUG') return me
kipe/enocean
enocean/protocol/tests/test_temperature_sensors.py
Python
mit
1,616
0.005569
# -*- encoding: utf-8 -*- from __future__ import print_function, unicode_literals, division, absolute_import from enocean.protocol.eep import EEP eep = EEP() # profiles = eep. def test_first_range(): offset = -40 values = range(0x01, 0x0C) for i in range(len(values)): minimum = float(i * 10 + offset) maximum = minimum + 40 profile = eep.find_profile([], 0xA5, 0x02, values[i]) assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text) def test_second_range(): offset = -60 values = range(0x10, 0x1C) for i in range(len(values)): minimum = float(i * 10 + offset) maximum = minimum + 80 profile = eep.find_profile([], 0xA5, 0x02, values[i]) assert minimum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert maximum == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text) def test_rest(): profile = eep.find_profile([], 0xA5, 0x02, 0x20) assert -10 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert +41.2 == float(pro
file.find('value', {'shortcut': 'TMP'}).find('scale
').find('max').text) profile = eep.find_profile([], 0xA5, 0x02, 0x30) assert -40 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('min').text) assert +62.3 == float(profile.find('value', {'shortcut': 'TMP'}).find('scale').find('max').text)
adamnovak/hgvm-graph-bakeoff-evalutations
scripts/computeVariantsDistances.py
Python
mit
67,700
0.005628
#!/usr/bin/env python2.7 """ Compare all sample graphs to baseline graphs (platvcf and g1kvcf). depends on callVariants.py output directory structure. Can do: 1)kmer set (jaccard and recall) 2)corg overlap """ import argparse, sys, os, os.path, random, subprocess, shutil, itertools, glob import doctest, re, json, collections, time, timeit, string, math, copy from collections import defaultdict from Bio.Phylo.TreeConstruction import _DistanceMatrix, DistanceTreeConstructor from Bio import Phylo import matplotlib matplotlib.use('Agg') import pylab import networkx as nx from collections import defaultdict from toil.job import Job from toillib import RealTimeLogger, robust_makedirs from callVariants import alignment_sample_tag, alignment_region_tag, alignment_graph_tag, run from callVariants import graph_path, sample_vg_path, g1k_vg_path, graph_path, sample_txt_path from evaluateVariantCalls import defaultdict_set from vcfQualStats import vcf_qual_stats, balance_tables def parse_args(args): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) # Add the Toil options so the job store is the first argument Job.Runner.addToilOptions(parser) # General options parser.add_argument("in_gams", nargs="+", help="input alignment files") parser.add_argument("var_dir", type=str, help="output dir for callVariants.py") parser.add_argument("graph_dir", type=str, help="name of input graphs directory") parser.add_argument("comp_type", type=str, help="comparison type from {kmer,corg,vcf,sompy,happy,vcfeval}") parser.add_argument("comp_dir", type=str, help="directory to write comparison output") parser.add_argument("--kmer", type=int, default=27, help="kmer size for indexing") parser.add_argument("--edge_max", type=int, default=5, help="edge-max parameter for vg kmer index") parser.add_argument("--overwrite", action="store_true", default=False, help="overwrite existing files (indexes and comparison output)") parser.add_argument("--g1kvcf_path", type=str, default="data/g1kvcf", help="path to search for 1000 genomes vcf and sequences. expects " "these to be in <g1kvcf_path>BRCA1.vcf. etc. ") parser.add_argument("--platinum_path", type=str, default="data/platinum", help="path to search for platinum genomes vcf. expects " "these to be in <platinum_path>/<sample>/BRCA1.vcf. etc. ") parser.add_argument("--chrom_fa_path", type=str, default="data/g1kvcf/chrom.fa", help="fasta file with entire chromosome info for all regions") parser.add_argument("--happy_fa_path", type=str, default="data/g1kvcf/chrom2.fa", help="fasta file with chrXX names for chromosomes. todo- use for above") parser.add_argument("--gatk3_path", type=str, default="data/gatk3", help="path to search for gatk3 vcf. expects " " these to bein <gatk3_path>/<sample>/BRCA1.vcf etc.") parser.add_argument("--platypus_path", type=str, default="data/platypus", help="path to search for platypus vcf. expects " " these to bein <platypus_path>/<sample>/BRCA1.vcf etc.") parser.add_argument("--freebayes_path", type=str, default="data/freebayes", help="path to search for freebayes vcf. expects " " these to bein <freebayes_path>/<sample>/BRCA1.vcf etc.") parser.add_argument("--samtools_path", type=str, default="data/samtools", help="path to search for samtools vcf. expects " " these to bein <samtools_path>/<sample>/BRCA1.vcf etc.") parser.add_argument("--vg_cores", type=int, default=1, help="number of cores to give to vg commands (and hap.py)") parser.add_argument("--timeout", type=int, default=sys.maxint, help="timeout in seconds for long jobs (vg index and corg in this case)") parser.add_argument("--orig", action="store_true", help="do all vs all comparison of input graphs") parser.add_argument("--sample", action="store_true", help="do all vs all comparison of sample graphs") parser.add_argument("--orig_and_sample", action="store_true", help="do all vs all comparison of sample + input graphs") parser.add_argument("--ignore", action="append", default=[], help="keyword to ignore in vcf comparison") parser.add_argument("--normalize", action="store_true", default =False, help="run vt normalization on all input vcfs") parser.add_argument("--clip", type=str, default=None, help="clip vcf using specified bed file before call comparisons") parser.add_argument("--clip_fp", type=str, default=None, help="false positives outside region will be called unknown (hap.py or som.py)") parser.add_argument("--roc", action="store_true", default=False, help="generate happy rocs for gatk3 and platypus") parser.add_argument("--qpct", type=float, default=None, help="apply quality percentile filter for gatk and platypus and freebayes") parser.add_argument("--qgraph", action="store_true", default=False, help="apply quality percentile filter to graphs too") parser.add_argument("--baseline", type=str, default="platvcf", help="baseline to use (platvcf or g1kvcf) for vcf comparisons") parser.add_argument("--gt", action="store_true", help="take into account genotype information (sompy or vcfeval)") parser.add_argument("--new", action="store_true", help="use new caller (vg genotype)") parser.add_argument("--min_ll", type=float, default=None, help="apply likelihood filter to vg call vcfs") parser.add_argument("--filter_type", type=str, default="xaad", help="used for vcfFilter for curves (when not --new): {xaad, ad, ll, xl}") parser.add_argument("--dedupe", action="store_true", default=False, help="use --dedupe option in vcfFilterQuality.py") parser.add_argument("--vroc", action="store_true", default=False, help="use vcfevals roc logic (only gives total, not indel snp breakdown) and wont work with clipping") parser.add_argument("--cwd", default=os.getcwd(), help="set Toil job working directory") parser.add_argument("--combine_samples", type=str, default=None, help="comma-separated list of samples to combine into COMBINED sample") parser.add_argument("--combined_name", type=str, default="COMBINED", help="name of the combined sample to generate") parser.add_argument("--tp_baseline", action="store_true", default=False, help="use tp-baseline.vcf instead of tp.vcf from vcfeval output for precision and recall") args = args[1:] return parser.parse_args(args) def index_path(graph, options): """ get the path of the index given the graph """ return
graph + ".index" def compute_kmer_index(job, graph, options): """ run vg index (if necessary) and vg compare on the input vg indexes are just created in place, ie same dir as graph, so need to have write permission there """ # Move to the appropriate working dire
ctory from wherever Toil dropped us os.chdir(options.cwd) out_index_path = index_path(graph, options) do_index = options.overwrite or not os.path.exists(out_index_path) index_opts = "-s -k {} -t {}".format(options.kmer, options.vg_cores)
grepme/CMPUT410Lab01
virt_env/virt1/lib/python2.7/site-packages/PasteDeploy-1.5.2-py2.7.egg/paste/deploy/compat.py
Python
apache-2.0
961
0.007284
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php """Python 2<->3 compatibility module""" import sy
s def print_(template, *args, **kwargs): template = str(template) if args: template = template % args elif kwargs: template = template % kwargs sys.stdout.writelines(template) if sys.version_info < (3, 0): basestring = basestring from ConfigParser import ConfigParser from urllib import unquote iteritems = lambda d: d.iteritems() dictkeys = lambda d: d.keys() def reraise(t, e, tb): exec('raise t, e, tb', dict(t=t, e=e, tb=tb)) else:
basestring = str from configparser import ConfigParser from urllib.parse import unquote iteritems = lambda d: d.items() dictkeys = lambda d: list(d.keys()) def reraise(t, e, tb): raise e.with_traceback(tb)
Hackers-To-Engineers/ghdata-sprint1team-2
organizationHistory/pythonBlameHistoryTree.py
Python
mit
12,918
0.014476
#How to run this: #Python libraries needed to run this file: Flask, Git Python, SQLAlchemy #You will need to have Git installed, and it will need to be in your path. #For example, on Windows you should be able to run a command like 'git pull' from the #ordinary Windows command prompt and not just from Git Bash. #You will need a MySQL server with the MSR14 datasource or other GHTorrent database with the same schema. #Edit the line in this code that says db = sqlalchemy.create_engine to match your username:password@hostname:port/database. #This file is hardcoded to download the ghdata repository. #Since it is a preliminary example, each time it runs, #it deletes the local ghdata repo and re-downloads it (though this might not be a good option for the future). #Because of this: if you have a folder named ghdata whose contents you do not want deleted, #do not place this file in the same folder as your ghdata folder. #to run this, type "python pythonBlameHistoryTree.py" into the command prompt #You will see some output about running on 127.0.0.1:5000 in the command prompt #Open a web browser and navigate to 127.0.0.1:5000. #This page will load for quite a while. At least several minutes is expected. #You can see it is still running due to the testing output in the command prompt Outer loop: commit# Inner loop: commit# #When the testing output stops running you should see some output in
the browser tab. #the output shows the commit number and date, the total lines of code and other files (for example, the readme) #and the percentage written by each organization. #expected output for ghdata should show only the spdx-tools organization (Matt is a member) #Number of lines corresponds to
the lines written by Matt. #You can see that earlier commits are lower on the page, and chronologically later ones appear higher up. #An "error" I expect us to encounter when testing other repos: #The way my sql query works right now, a user can be a member of multiple organizations. #For a simple case of expected output problems: #User1 wrote the entire repository (100%) #User1 is a member of Microsoft and IBM #Microsoft wrote 100% of the repository. IBM also wrote 100% of the repository for a total of 200% #Other issues: #If a user does not have both an email and organization available in GHTorrent database, #the user will not be counted towards any organization. #Future changes planned for this file: #Code cleanup for better readability #Code commenting for each portion #Thorough testing for various potential cases we might encounter #Deciding for certain how to decide whether a user is a member of an organization #A better method of dealing with local repository rather than deleting each time and re-downloading #Not having the database password directly in the code #Look into improving code efficiency where possible for faster runtime from flask import Flask from git import * import sqlalchemy from sqlalchemy import text import shutil import os import stat import time app = Flask(__name__) @app.route("/") def pythonBlameHistory(): #path is the hardcoded folder for the last download of ghdata repo_path = './ghdata' #We must remove the old ghdata if we want to download a new copy. #In order to delete it, we must first change the permissions #To be writable for all files and directories. #Based on this: http://stackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions if os.path.exists(repo_path): for root, directories, files in os.walk(repo_path): for directory in directories: os.chmod(os.path.join(root, directory), stat.S_IWRITE) for file in files: os.chmod(os.path.join(root, file), stat.S_IWRITE) os.chmod(repo_path, stat.S_IWRITE) #delete the old ghdata shutil.rmtree(repo_path) #connect to the database username:password@hostname:port/databasename db = sqlalchemy.create_engine('mysql+pymysql://root:password@localhost:3306/msr14') schema = sqlalchemy.MetaData() schema.reflect(bind=db) #Get the ghdata repository from GitHub repo = Repo.init('ghdata') origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git') origin.fetch() origin.pull(origin.refs[0].remote_head) #Dictionary to store results of sql queries #associating emails with organizations. #Without this dictionary, we would have to repeat #the same query over and over, which on my local machine #meant a runtime of over 24 hours (as opposed to several minutes using the dictionary) orgs_associated_with_user = {} #This dictionary keeps track of the lines written per organization for a single file. lines_per_organization_per_file = {} #This is the total number of lines in a single file total_lines_in_file = 0 #this is used later to hold percentage results for output percentage = 0 #This is the total number of lines in an entire repo total_lines_in_repo = 0 #This dictionary keeps track of the lines written per organization for the entire repo. lines_per_organization_entire_repo = {} #The output string will be displayed to the screen once everything is done running. outputString = "" #Outer loop: loop through each commit in the master branch. #This corresponds to the history of commits over time. for history_commit in repo.iter_commits('master'): #Since we want to see the change over time in repo percentage by organization, #clear the variables for total lines and organization lines for each new commit #we examine. lines_per_organization_entire_repo = {} total_lines_in_repo = 0 #Testing output: only purpose is to show you it's still running :) print("Outer loop: " + str(history_commit)) #Now loop through every file in the repo. #You cannot use the os library file/directory loop for this part. #(as was used above to change file permissions) #That is because some files do not exist in every commit. #You must loop through the commit tree, not the ghdata directory. for file_in_repo in history_commit.tree.traverse(): #For each file, we want to clear out the total lines and organization totals per file. #That's because we're starting over with a new file. lines_per_organization_per_file = {} total_lines_in_file = 0 #Files are of the blob type. This if statement prevents us from trying #to examine 'lines' in a directory. if file_in_repo.type == 'blob': #Now for each file, perform git blame. This will traverse #the lines in the file. #You can see there are now two variables of type commit: #history_commit and blame_commit (will improve variable naming in a future update) #history_commit is the commit with respect to the overall repo history. #blame_commit is the commit in which this line was most recently changed #as obtained through git blame. We use the "blame_commit" variable #to obtain the author of the commit for when the lines were last changed. for blame_commit, lines in repo.blame(history_commit, file_in_repo.path): #Git blame does not always return one line at a time. #Sometimes we are returned several lines committed by the same author. #In that case, we must count how many lines there are or our #total will not match the actual file. blameLineCount = 0 for line in lines: #increment lines to later attribute to an organization. blameLineCount += 1 #increment lines in the file as a whole total_lines_in_file += 1 #Testing output: only shows that things are still running. print("Inner loo
spdx/tools-python
spdx/parsers/__init__.py
Python
apache-2.0
577
0
# Copyright (c) 2014 Ahmed H. Ismail # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/l
icenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # S
ee the License for the specific language governing permissions and # limitations under the License.
safl/wtty
wtty/iod.py
Python
apache-2.0
5,729
0.002095
#!/usr/bin/env python # -*- coding: ascii -*- from subprocess import Popen, PIPE import threading import select import logging import fcntl import time import sys import os TTY_OPTS="-icrnl -onlcr -imaxbel -opost -isig -icanon -echo line 0 kill ^H min 100 time 2 brkint 115200" READERS = [] WRITERS = [] SELECT_TO = 0.1 def tty_set_opts(dev, opts): """Set tty options""" cmd = ["stty", "-F", dev] + opts.split(" ") prcs = Popen(cmd, stdout=PIPE,stderr=PIPE) out, err = prcs.communicate() if out: logging.info(out) if err: logging.error(err) return prcs.returncode class TTYWorker(threading.Thread): def __init__(self, dev, root): threading.Thread.__init__(self) self.tty = os.path.basename(dev) self.dev = dev self.root = root self.keep_running = True def stop(self): self.keep_running = False def run(self): raise Exception("Not implemented") class TTYReader(TTYWorker): """Reads tty output to file""" def run(self): tty_out_path = os.sep.join([self.root, "%s.log" % self.tty]) logging.info("tty_out_path(%s)" % tty_out_path) while self.keep_running: err = not os.path.exists(self.dev) if err: logging.error("dev(%s) does not exist" % self.dev) time.sleep(1) continue err = not os.path.exists(self.root) if err: logging.error("root(%s) does not exist" % self.root) time.sleep(1) continue err = tty_set_opts(self.dev, TTY_OPTS) if err: logging.error("failed stty err(%d)", err) time.sleep(1) continue try: with open(self.dev, "rb", 0) as dev_r, \ open(tty_out_path, "ab", 0) as tty_out: while self.keep_running and \ os.fstat(dev_r.fileno()).st_nlink and \ os.fstat(tty_out.fileno()).st_nlink: ready, _, _ = select.select( [dev_r.fileno()], [], [], SELECT_TO ) if not ready: continue logging.debug("dev_r.read(1)") payload = dev_r.read(1) logging.debug("dev_r.read(1) -- DONE") if payload is None: break logging.debug("tty_out.write") tty_out.write(payload) logging.debug("tty_out.write -- DONE") except: logging.error("error(%s)" % str(sys.exc_info())) class TTYWriter(TTYWorker): """Write commands to tty""" def run(self): tty_in_path = os.sep.join([self.root, "%s.in" % self.tty]) logging.info("tty_in(%s)" % tty_in_path) while self.keep_running: err = not os.path.exists(self.dev) if err: logging.error("dev(%s) does not exist" % self.dev) time.sleep(1) continue err = not os.path.exists(self.root) if err: logging.error("root(%s) does not exist" % self.root) time.sleep(1) continue err = not os.path.exists(tty_in_path) if err: logging.error("tty_in_path(%s) does not exist" % tty_in_path) time.sleep(1) continue err = tty_set_opts(self.dev, TTY_OPTS) if err: logging.error("failed stty err(%d)", err) time.sleep(1) continue try: with open(self.dev, "a", 0) as dev_w, \ open(tty_in_path, "r", 0) as tty_in: tty_in.seek(0, 2) while self.keep_running and \ os.fstat(dev_w.fileno()).st_nlink and \ os.fstat(tty_in.fileno()).st_nlink: ready, _, _ = select.select( [tty_in.fileno()], [], [], SELECT_TO ) if not ready: continue line = tty_in.readline() if not line: continue logging.debug("dev_w.write") dev_w.
write(line.strip()) logging.debug("dev_w.write -- DONE") time
.sleep(0.1) logging.debug("dev_w.write CR") dev_w.write('\r') logging.debug("dev_w.write CR -- DONE") except: logging.error("error(%s)" % str(sys.exc_info())) def main(cfg, state): """Entry point for wtty-iod""" logging.critical("Starting...") for tty in cfg["devices"]: READERS.append(TTYReader(tty, cfg["roots"]["reader"])) WRITERS.append(TTYWriter(tty, cfg["roots"]["writer"])) logging.info("Starting workers") for worker in READERS + WRITERS: worker.start() logging.critical("Working...") while (state["keep_running"]): time.sleep(0.1) logging.info("Stopping") for i, worker in enumerate(WRITERS + READERS): logging.debug("Stopping i(%d)" % i) worker.stop() logging.info("Joining") for i, worker in enumerate(WRITERS + READERS): logging.debug("Joining i(%d)" % i) worker.join() logging.critical("Stopped.")
romanoved/nanomsg-python
setup.py
Python
mit
2,912
0.003777
from __future__ import division, absolute_import, print_function,\ unicode_literals import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup, Extension from distutils.core import Extension from distutils.errors import DistutilsError from distutils.command.build_ext import build_ext with open(os.path.join('nanomsg','version.py')) as f: exec(f.read()) class skippable_build_ext(build_ext): def run(self): try: build_ext.run(self) except Exception as e: print() print("=" * 79) print("WARNING : CPython API extension could not be built.") print() print("Exception was : %r" % (e,)) print() print( "If you need the extensions (they may be faster than " "alternative on some" ) print(" platforms) check you have a compiler configured with all" " the necessary") print(" headers and libraries.") print("=" * 79) print() try: import ctypes if sys.platform in ('win32', 'cygwin'): _lib = ctypes.windll.nanoconfig else: _lib = ctypes.cdll.LoadLibrary('libnanoconfig.so') except OSError: # Building without nanoconfig cpy_extension = Extension(str('_nanomsg_cpy'), sources=[str('_nanomsg_cpy/wrapper.c')], libraries=[str('nanomsg')], ) else: # Building with nanoco
nfig cpy_extension = Extension(str('_nanomsg_cpy'), define_macros=[('WITH_NANOCONFIG', '1')], sources=[str('_nanomsg_cpy/wrapper.c')], libraries=[str('nanomsg'), str('nanoconfig')], ) install_requires = [] try: import importlib except ImportError: i
nstall_requires.append('importlib') setup( name='nanomsg', version=__version__, packages=[str('nanomsg'), str('_nanomsg_ctypes'), str('nanomsg_wrappers')], ext_modules=[cpy_extension], cmdclass = {'build_ext': skippable_build_ext}, install_requires=install_requires, description='Python library for nanomsg.', classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", ], author='Tony Simpson', author_email='[email protected]', url='https://github.com/tonysimpson/nanomsg-python', keywords=['nanomsg', 'driver'], license='MIT', test_suite="tests", )
LamCiuLoeng/budget
budget/model/__init__.py
Python
mit
2,408
0.009551
# -*- coding: utf-8 -*- """The application's model objects""" from zope.sqlalchemy import ZopeTran
sactionExtension from sqlalchemy.orm import scoped_session, sessionmaker # from
sqlalchemy import MetaData from sqlalchemy.ext.declarative import declarative_base # Global session manager: DBSession() returns the Thread-local # session object appropriate for the current web request. maker = sessionmaker( autoflush = True, autocommit = False, extension = ZopeTransactionExtension() ) DBSession = scoped_session( maker ) # Base class for all of our model classes: By default, the data model is # defined with SQLAlchemy's declarative extension, but if you need more # control, you can switch to the traditional method. DeclarativeBase = declarative_base() # There are two convenient ways for you to spare some typing. # You can have a query property on all your model classes by doing this: # DeclarativeBase.query = DBSession.query_property() # Or you can use a session-aware mapper as it was used in TurboGears 1: # DeclarativeBase = declarative_base(mapper=DBSession.mapper) # Global metadata. # The default metadata is the one from the declarative base. metadata = DeclarativeBase.metadata # If you have multiple databases with overlapping table names, you'll need a # metadata for each database. Feel free to rename 'metadata2'. # metadata2 = MetaData() ##### # Generally you will not want to define your table's mappers, and data objects # here in __init__ but will want to create modules them in the model directory # and import them at the bottom of this file. # ###### def init_model( engine ): """Call me before using any of the tables or classes in the model.""" DBSession.configure( bind = engine ) # If you are using reflection to introspect your database and create # table objects for you, your tables must be defined and mapped inside # the init_model function, so that the engine is available if you # use the model outside tg2, you need to make sure this is called before # you use the model. # # See the following example: # global t_reflected # t_reflected = Table("Reflected", metadata, # autoload=True, autoload_with=engine) # mapper(Reflected, t_reflected) # Import your model modules here. from auth import User, Group, Permission from logic import * from sysutil import * from fileutil import *
PyBossa/mnemosyne
mnemosyne/core.py
Python
agpl-3.0
1,728
0.001159
# -*- coding: utf8 -*- # This file is part of Mnemosyne. # # Copyright (C) 2013 Daniel Lombraña González # # Mnemosyne is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Mnemosyne is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Mnemosyne. If not, see <http://www.gnu.org/licenses/>. """ Package for creating the Flask application. This exports: - create_app a function that creates the Flask application """ from flask import Flask from mnemosyne.frontend import frontend from mnemosyne.model import db try: import mnemosyne.settings as settings except: print "Settings file is missing" def create_app(db_name=None, testing=False): """ Create the Flask app object after configuring it. Keyword arguments: db_name -- Database name testing -- Enable/Disable testing mode Return value: app -- Flask application object """ try: app = Flask(__name__) app.config.from_object(settings) except: print "Settings file is missing, trying with env config..." app.config.from_envvar
('MNEMOSY
NE_SETTINGS', silent=False) if db_name: app.config['SQLALCHEMY_DATABASE_URI'] = db_name db.init_app(app) app.register_blueprint(frontend) return app
diefenbach/django-lfs
lfs/criteria/utils.py
Python
bsd-3-clause
3,002
0.001666
from django.contrib.contenttypes.models import ContentType from lfs.core.utils import import_symbol from lfs.criteria.models import Criterion import logging logger = logging.getLogger(__name__) # DEPRECATED 0.8 def is_vali
d(request, object, product=None): """ Returns True if the given object is valid.
This is calculated via the attached criteria. Passed object is an object which can have criteria. At the moment these are discounts, shipping/payment methods and shipping/payment prices. """ logger.info("Decprecated: lfs.criteria.utils.is_valid: this function is deprecated. Please use the Criteria class instead.") for criterion_object in get_criteria(object): criterion_object.request = request criterion_object.product = product if criterion_object.is_valid() is False: return False return True # DEPRECATED 0.8 def get_criteria(object): """ Returns all criteria for given object. """ logger.info("Decprecated: lfs.criteria.utils.get_criteria: this function is deprecated. Please use the Criteria class instead.") content_type = ContentType.objects.get_for_model(object) criteria = [] for criterion in Criterion.objects.filter(content_id=object.id, content_type=content_type): criteria.append(criterion.get_content_object()) return criteria def get_first_valid(request, objects, product=None): """ Returns the first valid object of given objects. Passed object is an object which can have criteria. At the moment these are discounts, shipping/payment methods and shipping/payment prices. """ for object in objects: if object.is_valid(request, product): return object return None # DEPRECATED 0.8 def save_criteria(request, object): """ Saves the criteria for the given object. The criteria are passed via request body. """ logger.info("Decprecated: lfs.criteria.utils.save_criteria: this function is deprecated. Please use the Criteria class instead.") # First we delete all existing criteria objects for the given object. for co in get_criteria(object): co.delete() # Then we add all passed criteria to the object. for key, model in request.POST.items(): if key.startswith("type"): try: id = key.split("-")[1] except KeyError: continue # Get the values for the criterion operator = request.POST.get("operator-%s" % id) position = request.POST.get("position-%s" % id) criterion_class = import_symbol(model) criterion = criterion_class.objects.create(content=object, operator=operator, position=position) if criterion.get_value_type() == criterion.MULTIPLE_SELECT: value = request.POST.getlist("value-%s" % id) else: value = request.POST.get("value-%s" % id) criterion.update(value)
nhsengland/publish-o-matic
datasets/ccgois/load.py
Python
mit
3,287
0.003651
""" Load the CCGOIS datasets into a CKAN instance """ import dc import json import slugify import ffs def make_name_from_title(title): # For some reason, we're finding duplicate names name = slugify.slugify(title).lower()[:99] if not name.startswith('ccgois-'): name = u"ccgois-{}".format(name) return name def load_ccgois(datasets): for metadata in datasets: resources = [ dict( description=r['description'], name=r['name'], format=r['filetype'], url=r['url'] ) for r in metadata['resources'] ] print [r['name'] for r in metadata['resources']] metadata['title'] = u'CCGOIS - {}'.format(metadata['title']) metadata['name'] = make_name_from_title(metadata['title']) print u'Creating {}'.format(metadata['name'])
dc.Dataset.create_or_update( name=metadata['name'], title=metadata['title'], state='active', license_id='uk-ogl', notes=metadata['description'], origin='https://indicators.ic.nhs.uk/webview/', tags=dc.tags(*metadata['keyword(s)']), resources=resources, #frequency=[metadata['frequ
ency'], ], owner_org='hscic', extras=[ dict(key='frequency', value=metadata.get('frequency', '')), dict(key='coverage_start_date', value=metadata['coverage_start_date']), dict(key='coverage_end_date', value=metadata['coverage_end_date']), dict(key='domain', value=metadata['domain']), dict(key='origin', value='HSCIC'), dict(key='next_version_due', value=metadata['next version due']), dict(key='nhs_OF_indicators', value=metadata['nhs_of_indicators']), dict(key='HSCIC_unique_id', value=metadata['unique identifier']), dict(key='homepage', value=metadata['homepage']), dict(key='status', value=metadata['status']), dict(key='language', value=metadata['language']), dict(key='assurance_level', value=metadata['assurance_level']), dict(key='release_date', value=metadata['current version uploaded']) ] ) return def group_ccgois(datasets): for metadata in datasets: dataset_name = make_name_from_title(metadata['title']) try: dataset = dc.ckan.action.package_show(id=dataset_name) except: print "Failed to find dataset: {}".format(dataset_name) print "Can't add to group" continue if [g for g in dataset.get('groups', []) if g['name'] == 'ccgois']: print 'Already in group', g['name'] else: dc.ckan.action.member_create( id='ccgois', object=dataset_name, object_type='package', capacity='member' ) return def main(workspace): DATA_DIR = ffs.Path(workspace) datasets = json.load(open(DATA_DIR / 'ccgois_indicators.json')) dc.ensure_publisher('hscic') dc.ensure_group('ccgois') load_ccgois(datasets) group_ccgois(datasets)
SUSE/azure-sdk-for-python
azure-mgmt-notificationhubs/azure/mgmt/notificationhubs/models/namespace_create_or_update_parameters.py
Python
mit
4,958
0.001412
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class NamespaceCreateOrUpdateParameters(Resource): """Parameters supplied to the CreateOrUpdate Namespace operation. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id :vartype id: str :ivar name: Resource name :vartype name: str :ivar type: Resource type :vartype type: str :param location: Resource location :type location: str :param tags: Resource tags :type tags: dict :param sku: The sku of the created namespace :type sku: :class:`Sku <azure.mgmt.notificationhubs.models.Sku>` :param namespace_create_or_update_parameters_name: The name of the namespace. :type namespace_create_or_update_parameters_name: str :param provisioning_state: Provisioning state of the Namespace. :type provisioning_state: str :param region: Specifies the targeted region in which the namespace should be created. It can be any of the following values: Australia EastAustralia SoutheastCentral USEast USEast US 2West USNorth Central USSouth Central USEast AsiaSoutheast AsiaBrazil SouthJapan EastJapan WestNorth EuropeWest Europe :type region: str :param status: Status of the namespace. It can be any of these values:1 = Created/Active2 = Creating3 = Suspended4 = Deleting :type status: str :param created_at: The time the namespace was created. :type created_at: datetime :param service_bus_endpoint: Endpoint you can use to perform NotificationHub operations. :type service_bus_endpoint: str :param subscription_id: The Id of the Azure subscription associated with the namespace. :type subscription_id: str :param scale_unit: ScaleUnit where the namespace gets created :type scale_unit: str :param enabled: Whether or not the namespace is currently enabled. :type enabled: bool :param critical: Whether or not the namespace is set as Critical. :type critical: bool :param namespace_type: The namespace type. Possible values include: 'Messaging', 'NotificationHub' :type namespace_type: str or :class:`NamespaceType <azure.mgmt.notificationhubs.models.NamespaceType>` """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'namespace_create_or_update_parameters_name': {'key': 'properties.name', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'region': {'key': 'properties.region', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, 'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, 'scale_unit': {'key': 'properties.scaleUnit', 'type': 'str'}, 'enabled': {'key': 'properties.enabled', 'type': 'bool'}, 'critical': {'key': 'properties.critical', 'type': 'bool'}, 'namespace_type': {'key': 'properties.namespaceType', 'type': 'NamespaceType'}, } def __init__(self, location, tags=None, sku=None, namespace_create_or_update_parameters_name=None, provisioning_state=None, region=None, status=None, created_at=
None, service_bus_endpoint=None, subscription_id=None, scale_unit=None, enabled=None, critical=None, namespace_type=None): super(NamespaceCreateOrUpdateParameters, self).__init__(location=location, tags=tags, sku=sku) self.namespace_create_or_update_parameters_name = namespace_create_or_update_parameters_name self.pr
ovisioning_state = provisioning_state self.region = region self.status = status self.created_at = created_at self.service_bus_endpoint = service_bus_endpoint self.subscription_id = subscription_id self.scale_unit = scale_unit self.enabled = enabled self.critical = critical self.namespace_type = namespace_type
fritzo/loom
loom/gridding.py
Python
bsd-3-clause
3,080
0.000649
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved. # Copyright (c) 2015, Google, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # - Neither the name of Salesforce.com nor the names of its contributors # may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy def uniform(min_val, max_val, point_count): grid = numpy.array(range(point_count)) + 0.5 grid *= (max_val - min_val) / float(point_count) grid += min_val return grid def center_heavy(min_val, max_val, point_count): grid = uniform(-1, 1, point_count) grid = numpy.arcsin(grid) / numpy.pi + 0.5 grid *= max_val - min_val grid += min_val return grid def left_heavy(min_val, max_val, point_count): grid = uniform(0, 1, point_count) grid = grid ** 2 grid *= max_val - min_val grid += min_val return grid def right_heavy(min_val, max_val, point_count): grid = left_heavy(max_val, min_val, point_count) return grid[::-1].copy() def pitman_yor( min_alpha=0.1, max_alpha=100, min_d=0, max_d=0.5, alpha_count=20, d_count=10): ''' For d = 0, this degenerates to the CRP, where the expected number of tables is: E[table_count] = O(alpha log(customer_count)) ''' min_alpha = float(min_alpha) max_alpha = float(max_alpha) mi
n_d = float(min_d) max_d = float(max_d) lower_triangle = [ (x, y) for x in center_heavy(0, 1, alpha_count) for y in left_heavy(0,
1, d_count) if x + y < 1 ] alpha = lambda x: min_alpha * (max_alpha / min_alpha) ** x d = lambda y: min_d + (max_d - min_d) * y grid = [ {'alpha': alpha(x), 'd': d(y)} for (x, y) in lower_triangle ] return grid
isandlaTech/cohorte-demos
led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/pelix/shell/core.py
Python
apache-2.0
48,234
0
#!/usr/bin/env python # -- Content-Encoding: UTF-8 -- """ Pelix shell bundle. Provides the basic command parsing and execution support to make a Pelix shell. :author: Thomas Calmant :copyright: Copyright 2014, isandlaTech :license: Apache License 2.0 :version: 0.5.8 :status: Beta .. Copyright 2014 isandlaTech Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # Module version __version_info__ = (0, 5, 8) __version__ = ".".join(str(x) for x in __version_info__) # Documentation strings format __docformat__ = "restructuredtext en" # ------------------------------------------------------------------------------ # Shell constants from . import SERVICE_SHELL, SERVICE_SHELL_COMMAND, \ SERVICE_SHELL_UTILS import pelix.shell.beans as beans # Pelix modules from pelix.utilities import to_str, to_bytes import pelix.constants as constants import pelix.framework as pelix # Standard library import collections import inspect import linecache import logging import os import shlex import string import sys import traceback import threading # ------------------------------------------------------------------------------ DEFAULT_NAMESPACE = "default" """ Default command name space: default """ _logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------ def _find_assignment(arg_token): """ Find the first non-escaped assignment in the given argument token. Returns -1 if no assignment was found. :param arg_token: The argument token :return: The index of the first assignment, or -1 """ idx = arg_token.find('=') while idx != -1: if idx != 0: if arg_token[idx - 1] != '\\': # No escape character return idx idx = arg_token.find('=', idx + 1) # No assignment found return -1 class _ArgTemplate(string.Template): """ Argument string template class """ idpattern = r'[_a-z\?][_a-z0-9\.]*' def _make_args(args_list, session, fw_props): """ Converts the given list of arguments into a list (args) and a dictionary (kwargs). All arguments with an assignment are put into kwargs, others in args. :param args_list: The list of arguments to be treated :param session: The current shell session :return: The (arg_token, kwargs) tuple. """ args = [] kwargs = {} for arg_token in args_list: idx = _find_assignment(arg_token) if idx != -1: # Assignment key = arg_token[:idx] value = arg_token[idx + 1:] kwargs[key] = value else: # Direct argument args.append(arg_token) # Prepare the dictionary of variables variables = collections.defaultdict(str) variables.update(fw_props) variables.update(session.variables) # Replace variables args = [_ArgTemplate(arg).safe_substitute(variables) for arg in args] kwargs = dict((key, _ArgTemplate(value).safe_substitute(variables)) for key, value in kwargs.items()) return args, kwargs def _split_ns_command(cmd_token): """ Extracts the name space and the command name of the given command token. :param cmd_token: The command token :return: The extracted (name space, command) tuple """ namespace = None cmd_split = cmd_token.split('.', 1) if len(cmd_split) == 1: # No name space given command = cmd_split[0] else: # Got a name space and a command namespace = cmd_split[0] command = cmd_split[1] if not namespace: # No name space given: given an empty one namespace = "" # Use lower case values only return namespace.lower(), command.lower() # ------------------------------------------------------------------------------ class ShellUtils(object): """ Utility methods for the shell """ def bundlestate_to_str(self, state): """ Converts a bundle state integer to a string """ states = { pelix.Bundle.INSTALLED: "INSTALLED", pelix.Bundle.ACTIVE: "ACTIVE", pelix.Bundle.RESOLVED: "RESOLVED", pelix.Bundle.STARTING: "STARTING", pelix.Bundle.STOPPING: "STOPPING", pelix.Bundle.UNINSTALLED: "UNINSTALLED" } return states.get(state, "Unknown state ({0})".format(state)) def make_table(self, headers, lines, prefix=None): """ Generates an ASCII table according to the given headers and lines :param headers: List of table headers (N-tuple) :param lines: List of table lines (N-tuples) :param prefix: Optional prefix for each line :return: The ASCII representation of the table :raise ValueError: Different number of columns between headers and lines """ # Normalize the prefix prefix = str(prefix or "") # Maximum lengths lengths = [len(title) for title in headers] # Store the number of columns (0-based) nb_columns = len(lengths) - 1 # Lines str_lines = [] for idx, line in enumerate(lines): # Recompute lengths str_line = [] str_lines.append(str_line) column = -1 try: for column, entry in enumerate(line): str_entry = str(entry) str_line.append(str_entry) if len(str_entry) > lengths[column]: lengths[column] = len(str_entry) except IndexError: # Line too small/big raise ValueError("Different sizes for header and lines " "(line {0})".format(idx + 1)) except (TypeError, AttributeError): # Invalid type of line raise ValueError("Invalid type of line: %s", type(line).__name__) else:
if column != nb_columns: # Check if all lines have the same number of columns
raise ValueError("Different sizes for header and lines " "(line {0})".format(idx + 1)) # Prepare the head (centered text) format_str = "{0}|".format(prefix) for column, length in enumerate(lengths): format_str += " {%d:^%d} |" % (column, length) head_str = format_str.format(*headers) # Prepare the separator, according the length of the headers string separator = '{0}{1}'.format(prefix, '-' * (len(head_str) - len(prefix))) idx = head_str.find('|') while idx != -1: separator = '+'.join((separator[:idx], separator[idx + 1:])) idx = head_str.find('|', idx + 1) # Prepare the output output = [separator, head_str, separator.replace('-', '=')] # Compute the lines format_str = format_str.replace('^', '<') for line in str_lines: output.append(format_str.format(*line)) output.append(separator) # Force the last end of line output.append("") # Join'em return '\n'.join(output) # ------------------------------------------------------------------------------ class Shell(object): """ A simple shell, based on shlex. Allows to use name spaces. """ def __init__(self, context, utilities): """ Sets up the shell :para
poppy-project/pypot
pypot/sensor/__init__.py
Python
gpl-3.0
117
0
from .de
pth import * from .camera import * from .contact import * from .imag
efeature import * from .arduino import *
pombredanne/https-git.fedorahosted.org-git-kobo
kobo/django/auth/krb5.py
Python
lgpl-2.1
1,587
0.00189
# -*- coding: utf-8 -*- """ # This is authentication backend for Django middleware. # In settings.py you need to set: MIDDLEWARE_CLASSES = ( ... 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.RemoteUserMiddleware', ... ) AUTHENTICATION_BACKENDS = ( 'kobo.django.auth.krb5.RemoteUserBackend', ) # Add login and logout adresses to urls.py: urlpatterns = patterns("", ... url(r'^auth/krb5login/$', django.views.generic.TemplateVi
ew.as_view(template = 'auth/krb5login.html'), url(r'^auth/logout/$', 'django.contrib.auth.views.logout', kwargs={"next_page": "/"}), ... ) # Set a httpd config to protect krb5login page with kerberos. # You need to have mo
d_auth_kerb installed to use kerberos auth. # Httpd config /etc/httpd/conf.d/<project>.conf should look like this: <Location "/"> SetHandler python-program PythonHandler django.core.handlers.modpython SetEnv DJANGO_SETTINGS_MODULE <project>.settings PythonDebug On </Location> <Location "/auth/krb5login"> AuthType Kerberos AuthName "<project> Kerberos Authentication" KrbMethodNegotiate on KrbMethodK5Passwd off KrbServiceName HTTP KrbAuthRealms EXAMPLE.COM Krb5Keytab /etc/httpd/conf/http.<hostname>.keytab KrbSaveCredentials off Require valid-user </Location> """ from django.contrib.auth.backends import RemoteUserBackend class Krb5RemoteUserBackend(RemoteUserBackend): def clean_username(self, username): # remove @REALM from username return username.split("@")[0]
GooeyComps/gooey-dist
setup.py
Python
mit
542
0.068266
from distutils.core import setup from setuptools import setup, find_packages setup( name = 'go
oeydist', packages = find_packages(), # this must be the same as the name above version = '0.2', description = 'Gooey Language', author =
'Gooey Comps', author_email = '[email protected]', url = 'https://github.com/GooeyComps/gooey-dist', # use the URL to the github repo download_url = 'https://github.com/GooeyComps/gooey-dist/tarball/0.2', # I'll explain this in a second keywords = ['gui'], # arbitrary keywords classifiers = [], )
mistermatti/plugz
plugz/__init__.py
Python
bsd-3-clause
317
0
# -*- coding: utf-
8 -*- from loading import load_plugins, register_plugin from plugz import PluginTypeBase from plugintypes import StandardPluginType __author__ = 'Matti Gruener' __email__ = '[email protected]' __version__ = '0.1.5' __A
LL__ = [load_plugins, register_plugin, StandardPluginType, PluginTypeBase]
pombredanne/PythonJS
regtests/go/list_comprehension.py
Python
bsd-3-clause
167
0.107784
''' go list comp
rehensions ''' def main(): a = []int(x for x in range(3)) TestError( len(a)==3 ) TestError( a[0]==0 ) TestError( a[1]==1 ) TestError
( a[2]==2 )
LennonChin/Django-Practices
MxOnline/apps/courses/migrations/0007_course_teacher.py
Python
apache-2.0
640
0.001563
# -*- coding: utf-8 -*- # Generated by Django 1
.9.8 on 2017-09-14 23:53 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('organization', '0004_teacher_image'), ('courses', '0006_auto_20170914_2345'), ] operations = [ migrations.AddField( model_name='course',
name='teacher', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organization.Teacher', verbose_name='\u8bb2\u5e08'), ), ]
shinsterneck/pdns
regression-tests.recursor-dnssec/test_RoutingTag.py
Python
gpl-2.0
11,767
0.003144
import dns import os import socket import struct import threading import time import clientsubnetoption import subprocess from recursortests import RecursorTest from twisted.internet.protocol import DatagramProtocol from twisted.internet import reactor emptyECSText = 'No ECS received' nameECS = 'ecs-echo.example.' nameECSInvalidScope = 'invalid-scope.ecs-echo.example.' ttlECS = 60 routingReactorRunning = False class RoutingTagTest(RecursorTest): _config_template_default = """ daemon=no trace=yes dont-query= ecs-add-for=0.0.0.0/0 local-address=127.0.0.1 packetcache-ttl=0 packetcache-servfail-ttl=0 max-cache-ttl=600 threads=1 loglevel=9 disable-syslog=yes """ def sendECSQuery(self, query, expected, expectedFirstTTL=None): res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NOERROR) self.assertRRsetInAnswer(res, expected) # this will break if you are not looking for the first RR, sorry! if expectedFirstTTL is not None: self.assertEqual(res.answer[0].ttl, expectedFirstTTL) else: expectedFirstTTL = res.answer[0].ttl # wait one second, check that the TTL has been # decreased indicating a cache hit time.sleep(1) res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NOERROR) self.assertRRsetInAnswer(res, expected) self.assertLess(res.answer[0].ttl, expectedFirstTTL) def checkECSQueryHit(self, query, expected): res = self.sendUDPQuery(query) self.assertRcodeEqual(res, dns.rcode.NOERROR) self.assertRRsetInAnswer(res, expected) # this will break if you are not looking for the first RR, sorry! self.assertLess(res.answer[0].ttl, ttlECS) def setRoutingTag(self, tag): # This value is picked up by the gettag() file = open('tagfile', 'w') if tag: file.write(tag) file.close(); @classmethod def startResponders(cls): global routingReactorRunning print("Launching responders..") address = cls._PREFIX + '.24' port = 53 if not routingReactorRunning: reactor.listenUDP(port, UDPRoutingResponder(), interface=address) routingReactorRunning = True if not reactor.running: cls._UDPResponder = threading.Thread(name='UDP Routing Responder', target=reactor.run, args=(False,)) cls._UDPResponder.setDaemon(True) cls._UDPResponder.start() @classmethod def setUpClass(cls): cls.setUpSockets() cls.startResponders() confdir = os.path.join('configs', cls._confdir) cls.createConfigDir(confdir) cls.generateRecursorConfig(confdir) cls.startRecursor(confdir, cls._recursorPort) print("Launching tests..") @classmethod def tearDownClass(cls): cls.tearDownRecursor() os.unlink('tagfile') class testRoutingTag(RoutingTagTest): _confdir = 'RoutingTag' _config_template = """ log-common-errors=yes use-incoming-edns-subnet=yes edns-subnet-whitelist=ecs-echo.example. forward-zones=ecs-echo.example=%s.24 """ % (os.environ['PREFIX']) _lua_dns_script_file = """ function gettag(remote, ednssubnet, localip, qname, qtype, ednsoptions, tcp, proxyProtocolValues) local rtag for line in io.lines('tagfile') do rtag = line break end return 0, nil, nil, nil, nil, nil, rtag end """ def testSendECS(self): # First send an ECS query with routingTag self.setRoutingTag('foo') expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24') ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected1) # Now check a cache hit with the same routingTag (but no ECS) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.checkECSQueryHit(query, expected1) expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24') # And see if a different tag does *not* hit the first one self.setRoutingTag('bar') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And see if a *no* tag does *not* hit the first one expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24') self.setRoutingTag(None) ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected3) # And see if an unknown tag from the same subnet does hit the last self.setRoutingTag('baz') ecso = clientsubnetoption.ClientSubnetOption('192.0.3.2', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.checkECSQueryHit(query, expected3) # And a no tag and no subnet query does hit the general case self.setRoutingTag(None) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And a unknown tag and no subnet query does hit the general case self.setRoutingTag('bag') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) return # remove this line to peek at cache rec_controlCmd = [os.environ['RECCONTROL'], '--config-dir=%s' % 'configs/' + self._confdir, 'dump-cache', 'x'] try: expected = b'dumped 7 records\n' ret = subprocess.check_output(rec_controlCmd, stderr=subprocess.STDOUT) self.assertEqual(ret, expected) except subprocess.CalledProcessError as e: print(e.output) raise class testRoutingTagFFI(RoutingTagTest): _confdir = 'RoutingTagFFI' _config_template = """ log-common-errors=yes use-incoming-edns-subnet=yes edns-subnet-whitelist=ecs-echo.example. forward-zones=ecs-echo.example=%s.24 """ % (os.environ['PREFIX']) _lua_dns_script_file = """ local ffi = require("ffi") ffi.cdef[[ typedef struct pdns_ffi_param pdns_ffi_param_t; const char* pdns_ffi_param_get_qname(pdns_ffi_param_t* ref); void pdns_ffi_param_set_routingtag(pdns_ffi_param_t* ref, const char* rtag); ]] function gettag_ffi(obj) for line in io.lines('tagfile') do local rtag = ffi.string(line) ffi.C.pdns_ffi_param_set_routingtag(obj, rtag) break end return 0 end """ def testSendECS(self): # First send an ECS query with routingTag self.setRoutingTag('foo') expected1 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.2.0/24') ecso = clientsubnetoption.ClientSubnetOption('192.0.2.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expecte
d1)
# Now check a cache hit with the same routingTag (but no ECS) query = dns.message.make_query(nameECS, 'TXT', 'IN') self.checkECSQueryHit(query, expected1) expected2 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '127.0.0.0/24') # And see if a different tag does *not* hit the first one self.setRoutingTag('bar') query = dns.message.make_query(nameECS, 'TXT', 'IN') self.sendECSQuery(query, expected2) # And see if a *no* tag does *not* hit the first one expected3 = dns.rrset.from_text(nameECS, ttlECS, dns.rdataclass.IN, 'TXT', '192.0.3.0/24') self.setRoutingTag(None) ecso = clientsubnetoption.ClientSubnetOption('192.0.3.1', 32) query = dns.message.make_query(nameECS, 'TXT', 'IN', use_edns=True, options=[ecso], payload=512) self.sendECSQuery(query, expected3) # And see if an unknown tag from the same subnet does hit the
jvrsantacruz/XlsxWriter
xlsxwriter/test/comparison/test_utf8_03.py
Python
bsd-2-clause
1,089
0
##################################################################
############# # _*_ coding: utf-8 # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, [email protected] # from __future__ import unicode_literals from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """
Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'utf8_03.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of an XlsxWriter file with utf-8 strings.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet('Café') worksheet.write('A1', 'Café') workbook.close() self.assertExcelEqual()
freedomtan/tensorflow
tensorflow/python/training/ftrl.py
Python
apache-2.0
13,362
0.00232
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ftrl-proximal for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.training import optimizer from tensorflow.python.training import training_ops from tensorflow.python.util.tf_export import tf_export @tf_export(v1=["train.FtrlOptimizer"]) class FtrlOptimizer(optimizer.Optimizer): """Optimizer that implements the FTRL algorithm. This version has support for both online L2 (McMahan et al., 2013) and shrinkage-type L2, which is the addition of an L2 penalty to the loss function. References: Ad-click prediction: [McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200) ([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526)) """ def __init__(self, learning_rate, learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name="Ftrl", accum_name=None, linear_name=None, l2_shrinkage_regularization_strength=0.0, beta=None): r"""Construct a new FTRL optimizer. Args: learning_rate: A float value or a constant float `Tensor`. learning_rate_power: A float value, must be less or equal to zero. Controls how the learning rate decreases during training. Use zero for a fixed learning rate. See section 3.1 in (McMahan et al., 2013). initial_accumulator_value: The starting value for accumulators. Only zero or positive values are allowed. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If `True` use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "Ftrl". accum_name: The suffix for the variable that keeps the gradient squared accumulator. If not present, defaults to name. linear_name: The suffix for the variable that keeps the linear gradient accumulator. If not present, defaults to name + "_1". l2_shrinkage_regularization_strength: A float value, must be greater than or equal to zero. This differs from L2 above in that the L2 above is a stabilization penalty, whereas this L2 shrinkage is a magnitude penalty. The FTRL formulation can be written as: w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where \hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss function w.r.t. the weights w. Specifically, in the absence of L1 regularization, it is equivalent to the following update rule: w_{t+1} = w_t - lr_t / (beta + 2*L2*lr_t) * g_t - 2*L2_shrinkage*lr_t / (beta + 2*L2*lr_t) * w_t where lr_t is the learning rate at t. When input is sparse shrinkage will only happen on the active weights. beta: A float value; corresponds to the beta parameter in the paper. Raises: ValueError: If one of the arguments is invalid. References: Ad-click prediction: [McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200) ([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526)) """ super(FtrlOptimizer, self).__init__(use_locking, name) if initial_accumulator_value < 0.0: raise ValueError( "initial_accumulator_value %f needs to be positive or zero" % initial_accumulator_value) if learning_rate_power > 0.0: raise ValueError("learning_rate_power %f needs to be negative or zero" % learning_rate_power) if l1_regularization_strength < 0.0: raise ValueError( "l1_regularization_strength %f needs to be positive or zero" % l1_regularization_strength) if l2_regularization_strength < 0.0: raise ValueError( "l2_regularization_strength %f needs to be positive or zero" % l2_regularization_strength) if l2_shrinkage_regularization_strength < 0.0: raise ValueError( "l2_shrinkage_regularization_strength %f needs to be positive" " or zero" % l2_shrinkage_regularization_strength) self._learning_rate = learning_rate self._learning_rate_power = learning_rate_power self._initial_accumulator_value = initial_accumulator_value self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength self._beta = (0.0 if beta is None else beta) self._l2_shrinkage_regularization_strength = ( l2_shrinkage_regularization_strength) self._learning_rate_tensor = None self._learning_rate_power_tensor = None self._l1_regularization_strength_tensor = None self._adjusted_l2_regularization_strength_tensor = None self._l2_shrinkage_regularization_strength_tensor = None self._accum_name = accum_name self._linear_name = linear_name def _create_slots(self, var_list): # Create the "accum" and "linear" slots. for v in var_list: val = constant_op.constant( self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape()) self._get_or_make_slot(v, val, "accum", self._accum_name or self._name) self._zeros_slot(v, "linear", self._linear_n
ame or self._name) def _prepare(self): self._learning_rate_tensor = ops.convert_to_tensor( self._learning_rate, name="learning_rate") self._l1_regularization_strength_tensor = ops.convert_to_tensor( self._l1_regularization_strength, name="l1_regularization_strength") # L2 regu
larization strength with beta added in so that the underlying # TensorFlow ops do not need to include that parameter. self._adjusted_l2_regularization_strength_tensor = ops.convert_to_tensor( self._l2_regularization_strength + self._beta / (2. * math_ops.maximum(self._learning_rate, 1e-36)), name="adjusted_l2_regularization_strength") assert self._adjusted_l2_regularization_strength_tensor is not None self._beta_tensor = ops.convert_to_tensor(self._beta, name="beta") self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor( self._l2_shrinkage_regularization_strength, name="l2_shrinkage_regularization_strength") self._learning_rate_power_tensor = ops.convert_to_tensor( self._learning_rate_power, name="learning_rate_power") def _apply_dense(self, grad, var): accum = self.get_slot(var, "accum") linear = self.get_slot(var, "linear") if self._l2_shrinkage_regularization_strength <= 0.0: return training_ops.apply_ftrl( var, accum, linear, grad, math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype), math_ops.cast(self._l1_regularization_strength_tensor, var.dtype.base_dtype), mat
SHA2017-badge/micropython-esp32
esp32/modules/setup.py
Python
mit
894
0.004474
# File: setup.py # Version: 3 # Description: Setup for SHA2017 badge # License: MIT # Authors: Renze Nicolai
<[email protected]> # Thomas Roos <?> import ugfx, badge, appglue, dialogs, easydraw, time def asked_nickname(value): if value: badge.nvs_set_str("owner", "name", value) # Do the firstboot magic newState = 1 if badge.nvs_get_u8('badge', 'setup.state', 0) ==
0 else 3 badge.nvs_set_u8('badge', 'setup.state', newState) # Show the user that we are done easydraw.msg("Hi "+value+"!", 'Your nick has been stored to flash!') time.sleep(0.5) else: badge.nvs_set_u8('badge', 'setup.state', 2) # Skip the sponsors badge.nvs_set_u8('sponsors', 'shown', 1) appglue.home() ugfx.init() nickname = badge.nvs_get_str("owner", "name", "") dialogs.prompt_text("Nickname", nickname, cb=asked_nickname)
MADindustries/WhatManager2
WhatManager2/checks.py
Python
mit
2,864
0.003492
import os from home.models import ReplicaSet, WhatTorrent, WhatFulltext def run_checks(): errors = [] warnings = [] # Check WhatFulltext integrity def check_whatfulltext(): w_torrents = dict((w.id, w) for w in WhatTorrent.objects.defer('torrent_file').all()) w_fulltext = dict((w.id, w) for w in WhatFulltext.objects.all()) for id, w_t in w_torrents.items(): if id not in w_fulltext: errors.append(u'{0} does not have a matching fulltext entry.'.format(w_t)) elif not w_fulltext[id].match(w_t): errors.append(u'{0} does not match info with fulltext entry.'.format(w_t)) for id, w_f in w_fulltext.items(): if id not in w_torrents: errors.append(u'{0} does not have a matching whattorrent entry.'.format(w_f)) check_whatfulltext() for replica_set in ReplicaSet.objects.all(): m_torrents = {} for instance in replica_set.transinstance_set.all(): i_m_torrents = instance.get_m_torrents_by_hash() i_t_torrents = instance.get_t_torrents_by_hash(['id', 'hashString']) for hash, m_torrent in i_m_torrents.items(): # Check if this torrent is already in another instance if hash in m_torrents: warnings.append(u'{0} is already in another instance of ' u'the same replica set: {1}' .format(m_torrent, m_torrents[hash].instance)) # Check if the instance has the torrent if hash not in i_t_torrents: errors.append(u'{0} is in DB, but not in Transmission at instance {1}' .format(m_torrent, instance)) m_torrents[hash] = m_torrent # Check for the presence of metafiles if the instance is a master if replica_set.is_master: files_in_dir = os.listdir(m_torrent.path) if not any('.torrent' in f for
f in files_in_dir): errors.append(u'Missing .torrent file for {0} at {1}'
.format(m_torrent, instance)) if not any('ReleaseInfo2.txt' == f for f in files_in_dir): errors.append(u'Missing ReleaseInfo2.txt for {0} at {1}' .format(m_torrent, instance)) for hash, t_torrent in i_t_torrents.items(): # Check if the database has the torrent if hash not in i_m_torrents: errors.append(u'{0} is in Transmission, but not in DB at instance {1}' .format(t_torrent, instance)) return { 'errors': errors, 'warnings': warnings }
asposecells/Aspose_Cells_Cloud
Examples/Python/Examples/ClearCellFormattingInExcelWorksheet.py
Python
mit
1,485
0.010101
import asposecellscloud from asposecellscloud.CellsApi import CellsApi from asposecellscloud.CellsApi import ApiException import asposestoragecloud from asposestoragecloud.StorageApi import StorageApi apiKey = "XXXXX" #sepcify App Key appSid = "XXXXX" #sepcify App SID apiServer = "http://api.aspose.com/v1.1" data_folder = "../../data/" #Instantiate Aspose Storage API SDK storage_apiClient = asposestoragecloud.ApiClient.ApiClient(apiKey, appSid, True) storageApi = StorageApi(storage_apiClient) #Instantiate Aspose Cells API SDK api_client = asposecellscloud.ApiClient.ApiClient(apiKey, appSid, True) cellsApi = CellsApi(api_client); #set input file name filename = "Sample_Test_Book.xls"
sheetName = "Sheet1" range = "A1:A12" #upload fi
le to aspose cloud storage storageApi.PutCreate(Path=filename, file=data_folder + filename) try: #invoke Aspose.Cells Cloud SDK API to clear cells formatting in a worksheet response = cellsApi.PostClearFormats(name=filename, sheetName=sheetName, range=range) if response.Status == "OK": #download updated Workbook from storage server response = storageApi.GetDownload(Path=filename) outfilename = "c:/temp/" + filename with open(outfilename, 'wb') as f: for chunk in response.InputStream: f.write(chunk) except ApiException as ex: print "ApiException:" print "Code:" + str(ex.code) print "Message:" + ex.message
CivicKnowledge/censuslib
censuslib/__init__.py
Python
mit
3,138
0.013384
# Support for building census bundles in Ambry __version__ = 0.8 __author__ = '[email protected]' from .generator import * from .schema import * from .sources import * from .transforms import * import ambry.bundle class AcsBundle(ambry.bundle.Bundle, MakeTablesMixin, MakeSourcesMixin, JamValueMixin, JoinGeofileMixin): # Which of the first columns in the data tables to use. header_cols = [ # Column name, Description, width, datatype, column position #('FILEID','File Identification',6,'str' ), #('FILETYPE','File Type',6,'str'), ('STUSAB','State/U.S.-Abbreviation (USPS)',2,'str',2 ), ('CHARITER','Character Iteration',3,'str',3 ), ('SEQUENCE','Sequence Number',4,'int',4 ), ('LOGRECNO','Logical Record Number',7,'int',5 ) ] def init(self): from .util import year_release self.year, self.release = year_release(self) self.log("Building Census bundle, year {}, release {}".format(self.year, self.release)) def edit_pipeline(self, pipeline): """Change the SelectPartitionFromSource so it only writes a single partition""" from ambry
.etl import SelectPartitionFromSource # THe partition is named only after the table. def select_f(pipe, bundle, source, row): return
source.dest_table.name pipeline.select_partition = SelectPartitionFromSource(select_f) @CaptureException def _pre_download(self, gen_cls): """Override the ingestion process to download all of the input files at once. This resolves the contention for the files that would occurr if many generators are trying to download the same files all at once. """ from ambry_sources import download cache = self.library.download_cache source = self.source('b00001') # First; any one will do g = gen_cls(self, source) downloads = [] for spec1, spec2 in g.generate_source_specs(): downloads.append( (spec1.url, cache) ) # The two specs usually point to different files in the same zip archive, but I'm not sure # that is always true. if spec1.url != spec2.url: downloads.append((spec2.url, cache)) # Multi-processing downloads might improve the speed, although probably not by much. for url, cache in downloads: self.log("Pre-downloading: {}".format(url)) download(url, cache) class ACS2009Bundle(AcsBundle): pass class ACS2010Bundle(AcsBundle): @CaptureException def ingest(self, sources=None, tables=None, stage=None, force=False, update_tables=True): """Override the ingestion process to download all of the input files at once. This resolves the contention for the files that would occurr if many generators are trying to download the same files all at once. """ from.generator import ACS09TableRowGenerator self._pre_download(ACS09TableRowGenerator) return super(ACS2010Bundle, self).ingest(sources, tables, stage, force, update_tables)
TakeshiTseng/SDN-Work
mininet/bgp/topo.py
Python
mit
3,549
0.001972
#!/usr/bin/python from mininet.topo import Topo from mininet.net import Mininet from mininet.cli import CLI from mininet.log import setLogLevel, info, debug from mininet.node import Host, RemoteController, OVSSwitch # Must exist and be owned by quagga user (quagga:quagga by default on Ubuntu) QUAGGA_RUN_DIR = '/var/run/quagga' QCONFIG_DIR = 'configs' ZCONFIG_DIR = 'configs' class SdnIpHost(Host): def __init__(self, name, ip, route, *args, **kwargs): Host.__init__(self, name, ip=ip, *args, **kwargs) self.route = route def config(self, **kwargs): Host.config(self, **kwargs) debug("configuring route %s" % self.route) self.cmd('ip route add default via %s' % self.route) class Router(Host): def __init__(self, name, quaggaConfFile, zebraConfFile, intfDict, *args, **kwargs): Host.__init__(self, name, *args, **kwargs) self.quaggaConfFile = quaggaConfFile self.zebraConfFile = zebraConfFile self.intfDict = intfDict def config(self, **kwargs): Host.config(self, **kwargs) self.cmd('sysctl net.ipv4.ip_forward=1') for intf, attrs in self.intfDict.items(): self.cmd('ip addr flush dev %s' % intf) # setup mac address to specific interface if 'mac' in attrs: self.cmd('ip link set %s down' % intf) self.cmd('ip link set %s address %s' % (intf, attrs['mac'])) self.cmd('ip link set %s up ' % intf) # setup address to interfaces for addr in attrs['ipAddrs']: self.cmd('ip addr add %s dev %s' % (addr, intf)) sel
f.cmd('zebra -d -f %s -z %s/zebra%s.api -i %s/zebra%s.pid' % (self.zebraConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name)) self.cmd('bgpd -d -f %s -z %s/zebra%s.api -i %s/bgpd%s.pid' % (self.quaggaConfFile, QUAGGA_RUN_DIR, self.name, QUAGGA_RUN_DIR, self.name))
def terminate(self): self.cmd("ps ax | egrep 'bgpd%s.pid|zebra%s.pid' | awk '{print $1}' | xargs kill" % (self.name, self.name)) Host.terminate(self) class SdnIpTopo(Topo): def build(self): zebraConf = '{}/zebra.conf'.format(ZCONFIG_DIR) s1 = self.addSwitch('s1', dpid='0000000000000001', cls=OVSSwitch, failMode="standalone") # Quagga 1 bgpEth0 = { 'mac': '00:00:00:00:00:01', 'ipAddrs': [ '10.0.1.1/24', ] } bgpIntfs = { 'bgpq1-eth0': bgpEth0 } bgpq1 = self.addHost("bgpq1", cls=Router, quaggaConfFile='{}/quagga1.conf'.format(QCONFIG_DIR), zebraConfFile=zebraConf, intfDict=bgpIntfs) self.addLink(bgpq1, s1) # Quagga 2 bgpEth0 = { 'mac': '00:00:00:00:00:02', 'ipAddrs': [ '10.0.2.1/24', ] } bgpIntfs = { 'bgpq2-eth0': bgpEth0 } bgpq2 = self.addHost("bgpq2", cls=Router, quaggaConfFile='{}/quagga2.conf'.format(QCONFIG_DIR), zebraConfFile=zebraConf, intfDict=bgpIntfs) self.addLink(bgpq2, s1) topos = {'sdnip': SdnIpTopo} if __name__ == '__main__': setLogLevel('debug') topo = SdnIpTopo() net = Mininet(topo=topo, controller=RemoteController) net.start() CLI(net) net.stop() info("done\n")
huyphan/pyyawhois
test/record/parser/test_response_whois_nic_pw_status_available.py
Python
mit
2,000
0.003
# This file is autogenerated. Do not edit it manually. # If you want change the content of this file, edit # # spec/fixtures/responses/whois.nic.pw/status_available # # and regenerate the tests with the following script # # $ scripts/generate_tests.py # fr
om nose.tools import * from dateutil.parser import parse as time_parse import yawhois class TestWhoisNicPwStatusAvailable(object): def setUp(self): fixture_path = "spec/fixtures/responses/whois.nic.pw/status_available.txt"
host = "whois.nic.pw" part = yawhois.record.Part(open(fixture_path, "r").read(), host) self.record = yawhois.record.Record(None, [part]) def test_status(self): eq_(self.record.status, []) def test_available(self): eq_(self.record.available, True) def test_domain(self): eq_(self.record.domain, None) def test_nameservers(self): eq_(self.record.nameservers.__class__.__name__, 'list') eq_(self.record.nameservers, []) def test_admin_contacts(self): eq_(self.record.admin_contacts.__class__.__name__, 'list') eq_(self.record.admin_contacts, []) def test_registered(self): eq_(self.record.registered, False) def test_created_on(self): eq_(self.record.created_on, None) def test_registrar(self): eq_(self.record.registrar, None) def test_registrant_contacts(self): eq_(self.record.registrant_contacts.__class__.__name__, 'list') eq_(self.record.registrant_contacts, []) def test_technical_contacts(self): eq_(self.record.technical_contacts.__class__.__name__, 'list') eq_(self.record.technical_contacts, []) def test_updated_on(self): eq_(self.record.updated_on, None) def test_domain_id(self): eq_(self.record.domain_id, None) def test_expires_on(self): eq_(self.record.expires_on, None) def test_disclaimer(self): eq_(self.record.disclaimer, None)
kd0aij/matrixpilot_old
Tools/MAVLink/mavlink/pymavlink/generator/mavtemplate.py
Python
gpl-3.0
5,130
0.003119
#!/usr/bin/env python ''' simple templating system for mavlink generator Copyright Andrew Tridgell 2011 Released under GNU GPL version 3 or later ''' from mavparse import MAVParseError class MAVTemplate(object): '''simple templating system''' def __init__(self, start_var_token="${", end_var_token="}", start_rep_token="${{", end_rep_token="}}", trim_leading_lf=True, checkmissing=True): self.start_var_token = start_var_token self.end_var_token = end_var_token self.start_rep_token = start_rep_token self.end_rep_token = end_rep_token self.trim_leading_lf = trim_leading_lf self.checkmissing = checkmissing def find_end(self, text, start_token, end_token): '''find the of a token. Returns the offset in the string immediately after the matching end_token''' if not text.startswith(start_token): raise MAVParseError("invalid token start") offset = len(start_token) nesting = 1 while nesting > 0: idx1 = text[offset:].find(start_token) idx2 = text[offset:].find(end_token) if idx1 == -1 and idx2 == -1: raise MAVParseError("token nesting error") if idx1 == -1 or idx1 > idx2: offset += idx2 + len(end_token) nesting -= 1 else: offset += idx1 + len(start_token) nesting += 1 return offset def find_var_end(self, text): '''find the of a variable''' return self.find_end(text, self.start_var_token, self.end_var_token) def find_rep_end(self, text): '''find the of a repitition''' return self.find_end(text, self.start_rep_token, self.end_rep_token) def substitute(self, text, subvars={}, trim_leading_lf=None, checkmissing=None): '''substitute variables in a string''' if trim_leading_lf is None: trim_leading_lf = self.trim_leading_lf if checkmissing is None: chec
kmissing = self.checkmissing # handle repititions while True: subidx = text.find(self.start_rep_token) if subidx == -1: break endidx = self.find_rep_end(text[subidx:]) if endidx == -1: raise
MAVParseError("missing end macro in %s" % text[subidx:]) part1 = text[0:subidx] part2 = text[subidx+len(self.start_rep_token):subidx+(endidx-len(self.end_rep_token))] part3 = text[subidx+endidx:] a = part2.split(':') field_name = a[0] rest = ':'.join(a[1:]) v = getattr(subvars, field_name, None) if v is None: raise MAVParseError('unable to find field %s' % field_name) t1 = part1 for f in v: t1 += self.substitute(rest, f, trim_leading_lf=False, checkmissing=False) if len(v) != 0 and t1[-1] in ["\n", ","]: t1 = t1[:-1] t1 += part3 text = t1 if trim_leading_lf: if text[0] == '\n': text = text[1:] while True: idx = text.find(self.start_var_token) if idx == -1: return text endidx = text[idx:].find(self.end_var_token) if endidx == -1: raise MAVParseError('missing end of variable: %s' % text[idx:idx+10]) varname = text[idx+2:idx+endidx] if isinstance(subvars, dict): if not varname in subvars: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) value = subvars[varname] else: value = getattr(subvars, varname, None) if value is None: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) text = text.replace("%s%s%s" % (self.start_var_token, varname, self.end_var_token), str(value)) return text def write(self, file, text, subvars={}, trim_leading_lf=True): '''write to a file with variable substitution''' file.write(self.substitute(text, subvars=subvars, trim_leading_lf=trim_leading_lf))
youtube/cobalt
third_party/v8/tools/testrunner/testproc/util.py
Python
bsd-3-clause
2,389
0.007535
#!/usr/bin/env python # Copyright 2020 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import heapq import os import platform import random import signal import subprocess # Base dir of the build products for Release and Debug. OUT_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out')) def list_processes_linux(): """Returns list of tuples (pid, command) of processes running in the same out directory as this checkout. """ if platform.system() != 'Linux': return [] try: cmd = 'pgrep -fa %s' % OUT_DIR output = subprocess.check_output(cmd, shell=True) or '' processes = [ (int(line.split()[0]), line[line.index(OUT_DIR):]) for line in output.splitlines() ] # Filter strange process with name as out dir. return [p for p in processes if p[1] != OUT_DIR] except: return [] def kill_processes_linux(): """Kill stray processes on the system that started in the same out directory. All swarming tasks share the same out directory location. """ if platform.system() != 'Linux': return for pid, cmd in list_processes_linux(): try: print('Attempting to kill %d - %s' % (pid, cmd)) os.kill(pid, signal.SIGKILL) except: pass class FixedSizeTopList(): """Utility collection for gathering a fixed number of elements with the biggest value for the given key. It employs a heap from which we pop the smallest element when the collection is 'full'. If you need a reversed behaviour (collect min values) just provide an inverse key.""" def __init__(self, size, key=None): self.size = size self.key = key or (lambda x: x) self.data = [] self.discriminator = 0 def add(self, elem): elem_k = self.key(elem) heapq.heappush(self.data, (elem_k, self.extra_key(), elem)) if len(self.data) > self.size: heapq.heappop(self.data) def extra_key(self): # Avoid key clash in tuples sent to the heap. # We want to avoid comparisons o
n the last element of the tuple # since those elements might not be comparable. self.discriminator += 1 return self.discriminator def as_list(self): original_data = [rec for (_, _, rec) in self.data]
return sorted(original_data, key=self.key, reverse=True)
thegooglecodearchive/marave
marave/plugins/rst2pdf.py
Python
gpl-2.0
1,268
0.014984
# -*- coding: utf-8 -*- from plugins import Plugin from PyQt4 import QtCore, QtGui import tempfile, codecs import os, subprocess class rst2pdf(Plugin): name='rst2pdf' shortcut='Ctrl+F8' description='Run through rst2pdf and preview' tmpf=None def run(self): print "Running rst2pdf" text=unicode(self.client.editor.toPlainText()) # Save to a named file if self.tmpf is None: self.tmpf=tempfile.NamedTemporaryFile(delete=False) self.tmpf.close() f=codecs.open(self.tmpf.name,'w','utf-8') f.write(text) f.close() # FIXME: unsafe # FIXME: show output of the process somewhere try: se
lf.client.notify('Running rst2pdf') subproc
ess.check_call('rst2pdf %s'%self.tmpf.name, shell=True) except subprocess.CalledProcessError: #FIXME: show error dialog return # Open with default PDF viewer # FIXME: would be awesome if we could know when this is still open # and not launch it again, since it refreshes automatically. self.client.notify('Launching PDF viewer') QtGui.QDesktopServices.openUrl(QtCore.QUrl('file://'+self.tmpf.name+'.pdf'))
iModels/mbuild
mbuild/formats/hoomd_simulation.py
Python
mit
16,626
0.00012
"""HOOMD simulation format.""" import itertools import operator import warnings from collections import namedtuple import numpy as np import parmed as pmd import mbuild as mb from mbuild.utils.conversion import RB_to_OPLS from mbuild.utils.io import import_ from mbuild.utils.sorting import natural_sort from .hoomd_snapshot import to_hoomdsnapshot gsd = import_("gsd") gsd.hoomd = import_("gsd.hoomd") hoomd = import_("hoomd") hoomd.md = import_("hoomd.md") hoomd.md.pair = import_("hoomd.md.pair") hoomd.md.special_pair = import_("hoomd.md.special_pair") hoomd.md.charge = import_("hoomd.md.charge") hoomd.md.bond = import_("hoomd.md.bond") hoomd.md.angle = import_("hoomd.md.angle") hoomd.md.dihedral = import_("hoomd.md.dihedral") hoomd.group = import_("hoomd.group") def create_hoomd_simulation( structure, ref_distance=1.0, ref_mass=1.0, ref_energy=1.0, r_cut=1.2, auto_scale=False, snapshot_kwargs={}, pppm_kwargs={"Nx": 8, "Ny": 8, "Nz": 8, "order": 4}, init_snap=None, restart=None, ): """Convert a parametrized pmd.Structure to hoomd.SimulationContext. Parameters ---------- structure : parmed.Structure ParmEd Structure object ref_distance : float, optional, default=1.0 Reference distance for conversion to reduced units ref_mass : float, optional, default=1.0 Reference mass for conversion to reduced units ref_energy : float, optional, default=1.0 Reference energy for conversion to reduced units r_cut : float, optional, default 1.2 Cutoff radius, in reduced units auto_scale : bool, optional, default=False Automatically use largest sigma value as ref_distance, largest mass value as ref_mass, and largest epsilon value as ref_energy snapshot_kwargs : dict Kwargs to pass to to_hoomdsnapshot pppm_kwargs : dict Kwargs to pass to hoomd's pppm function init_snap : hoomd.data.SnapshotParticleData, optional, default=None Initial snapshot to which to add the ParmEd structure object (useful for rigid bodies) restart : str, optional, default=None Path to the gsd file from which to restart the simulation. https://hoomd-blue.readthedocs.io/en/v2.9.4/restartable-jobs.html Note: It is assumed that the ParmEd structure and the system in restart.gsd contain the same types. The ParmEd structure is still used to initialize the forces, but restart.gsd is used to initialize the system state (e.g., particle positions, momenta, etc). Returns ------- hoomd_objects : list List of hoomd objects created during conversion ReferenceValues : namedtuple Values used in scaling Notes ----- While the hoomd objects are returned, the hoomd.SimulationContext is accessible via `hoomd.context.current`. If you pass a non-parametrized pmd.Structure, you will not have angle, dihedral, or force field information. You may be better off creating a hoomd.Snapshot. Reference units should be expected to convert parmed Structure units: angstroms, kcal/mol, and daltons """ if isinstance(structure, mb.Compound): raise ValueError( "You passed mb.Compound to create_hoomd_simulation, there will be " "no angles, dihedrals, or force field parameters. Please use " "hoomd_snapshot.to_hoomdsnapshot to create a hoomd.Snapshot, then " "create your own hoomd context and pass your hoomd.Snapshot to " "hoomd.init.read_snapshot()" ) elif not isinstance(structure, pmd.Structure): raise ValueError( "Please pass a parmed.Structure to create_hoomd_simulation" ) _check_hoomd_version() version_numbers = _check_hoomd_version() if float(version_numbers[0]) >= 3: warnings.warn( "Warning when using Hoomd 3, potential API change where the hoomd " "context is not updated upon creation of forces - utilize the " "returned `hoomd_objects`" ) hoomd_objects = [] # Potential adaptation for Hoomd v3 API if auto_scale: ref_mass = max([atom.mass for atom in structure.atoms]) pair_coeffs = list( set((a.type, a.epsilon, a.sigma) for a in structure.atoms) ) ref_energy = max(pair_coeffs, key=operator.itemgetter(1))[1] ref_distance = max(pair_coeffs, key=operator.itemgetter(2))[2] ReferenceValues = namedtuple("ref_values", ["distance", "mass", "energy"]) ref_values = ReferenceValues(ref_distance, ref_mass, ref_energy) if not hoomd.context.current: hoomd.context.initialize("") if restart is None: snapshot, _ = to_hoomdsnapshot( structure, ref_distance=ref_distance, ref_mass=ref_mass, ref_energy=ref_energy, **snapshot_kwargs, hoomd_snapshot=init_snap, ) hoomd_objects.append(snapshot) hoomd_system = hoomd.init.read_snapshot(snapshot) hoomd_objects.append(hoomd_system) else: with gsd.hoomd.open(restart) as f: snapshot = f[-1] hoomd_objects.append(snapshot) hoomd_system = hoomd.init.read_gsd(restart, restart=restart) hoomd_objects.append(hoomd_system) print("Simulation initialized from restart file") nl = hoomd.md.nlist.cell() nl.reset_exclusions(exclusions=["1-2", "1-3"]) hoomd_objects.append(nl) if structure.atoms[0].type != "": print("Processing LJ and QQ") lj = _init_hoomd_lj( structure, nl, r_cut=r_cut, ref_distance=ref_distance, ref_energy=ref_energy, ) qq = _init_hoomd_qq(structure, nl, r_cut=r_cut, **pppm_kwargs) hoomd_objects.append(lj) hoomd_objects.append(qq) if structure.adjusts: print("Processing 1-4 interactions, adjusting neighborlist exclusions") lj_14, qq_14 = _init_hoomd_14_pairs( structure, nl, ref_distance=ref_distance, ref_energy=ref_energy ) hoomd_objects.append(lj_14) hoomd_objects.append(qq_14) if structure.bond_types: print("Processing harmonic bonds") harmonic_bond = _init_hoomd_bonds( structure, ref_distance=ref_distance, ref_energy=ref_energy ) hoomd_objects.append(harmonic_bond) if structure.angle_types: print("Processing harmonic angles") harmonic_angle = _init_hoomd_angles(structure, ref_energy=ref_energy) hoomd_objects.append(harmonic_angle) if structure.dihedral_types: print("Processing periodic torsions") periodic_torsions = _init_hoomd_dihedrals( structure, ref_energy=ref_energy ) hoomd_objects.append(periodic_torsions) if structure.rb_torsion_types: print("Processing RB torsions") rb_torsions = _init_hoomd_rb_torsions(structure, ref_energy=ref_energy) hoomd_objects.append(rb_torsions) print("HOOMD SimulationContext updated from ParmEd Structure") return hoomd_objects, ref_values def _init_hoomd_lj(structure, nl, r_cut=1.2, ref_distance=1.0, ref_energy=1.0): """LJ parameters.""" # Identify the unique atom types before setting atom_type_params = {} for atom in structure.atoms: if atom.type not in atom_type_params: atom_type_params[atom.type] = atom.atom_type # Set the hoomd parameters for self-interactions lj = hoomd.md.pair.lj(r_cut, nl) for name, atom_type in atom_type_params.items(): lj.pair_coeff.set( name,
name, sigma=atom_type.sigma / ref_distance, epsilon=atom_type.epsilon / ref_energy, ) # Cross interactions, mixing rules, NBfixes all_atomtypes = sorted(atom_type_params.keys()) for a1, a2 in itertools.combinations_with_replacement(all_atomtypes, 2): nb_fix_
info = atom_type_params[a1].nbfix.get(a2, None) # nb_fix_info = (rmin, eps, rmin14, eps14) if nb_fix_info is None:
clione/django-kanban
src/kanban/wsgi.py
Python
mit
1,419
0.000705
""" WSGI config for kanban project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks # if running multiple sites in the same mod_wsgi process. To fix this, use # mod_wsgi daemon mode with each site in its own daemon process, or use # os.environ["DJANGO_SETTINGS_MODULE"] = "kanban.settings" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kanban.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WS
GI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorld
Application(application)
PyQwt/PyQwt5
qt3examples/ReallySimpleDemo.py
Python
gpl-2.0
2,005
0.015461
#!/usr/bin/env python # The really simple Python version of Qwt-5.0.0/examples/simple # for debugging, requires: python configure.py --trace ... if False: import sip sip.settracemask(0x3f) import sys import qt import Qwt5 as Qwt from Qwt5.anynumpy import * class SimplePlot(Qwt.QwtPlot): def __init__(self, *args): Qwt.QwtPlot.__init__(self, *args) # make a QwtPlot widget self.setTitle('ReallySimpleDemo.py') self.insertLegend(Qwt.QwtLegend(), Qwt.QwtPlot.RightLegend) # set axis titles self.setAxisTitle(Qwt.QwtPlot.xBottom, 'x -->') self.setAxisTitle(Qwt.QwtPlot.yLeft, 'y -->') # insert a few curves cSin = Qwt.QwtPlotCurve('y = sin(x)') cSin.setPen(qt.QPen(qt.Qt.red)) cSin.attach(self) cCos = Qwt.QwtPlotCurve('y = cos(x)') cCos.setPen(qt.QPen(qt.Qt.blue)) cCos.attach(self) # make a Numeric array for the horizontal data x = arange(0.0, 10.0, 0.1) # initialize the data cSin.setData(x, sin(x
)) cCos.setData(x, cos(x)) #
insert a horizontal marker at y = 0 mY = Qwt.QwtPlotMarker() mY.setLabel(Qwt.QwtText('y = 0')) mY.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop) mY.setLineStyle(Qwt.QwtPlotMarker.HLine) mY.setYValue(0.0) mY.attach(self) # insert a vertical marker at x = 2 pi mX = Qwt.QwtPlotMarker() mX.setLabel(Qwt.QwtText('x = 2 pi')) mX.setLabelAlignment(qt.Qt.AlignRight | qt.Qt.AlignTop) mX.setLineStyle(Qwt.QwtPlotMarker.VLine) mX.setXValue(2*pi) mX.attach(self) # replot self.replot() # __init__() # class Plot def make(): demo = SimplePlot() demo.resize(500, 300) demo.show() return demo # make() def main(args): app = qt.QApplication(args) demo = make() app.setMainWidget(demo) sys.exit(app.exec_loop()) # main() # Admire if __name__ == '__main__': main(sys.argv) # Local Variables: *** # mode: python *** # End: ***
baroquebobcat/pants
tests/python/pants_test/init/repro_mixin.py
Python
apache-2.0
1,635
0.004893
# coding
=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_fu
nction, unicode_literals, with_statement) import os from pants.util.dirutil import safe_mkdir_for class ReproMixin(object): """ Additional helper methods for use in Repro tests""" def add_file(self, root, path, content): """Add a file with specified contents :param str root: Root directory for path. :param str path: Path relative to root. :param str content: Content to write to file. """ fullpath = os.path.join(root, path) safe_mkdir_for(fullpath) with open(fullpath, 'w') as outfile: outfile.write(content) def assert_not_exists(self, root, path): """Assert a file at relpath doesn't exist :param str root: Root directory of path. :param str path: Path relative to tar.gz. :return: bool """ fullpath = os.path.join(root, path) self.assertFalse(os.path.exists(fullpath)) def assert_file(self, root, path, expected_content=None): """ Assert that a file exists with the content specified :param str root: Root directory of path. :param str path: Path relative to tar.gz. :param str expected_content: file contents. :return: bool """ fullpath = os.path.join(root, path) self.assertTrue(os.path.isfile(fullpath)) if expected_content: with open(fullpath, 'r') as infile: content = infile.read() self.assertEqual(expected_content, content)
UgCS/vsm-cpp-sdk
tools/mavgen/lib/genxmlif/xmliftest.py
Python
bsd-3-clause
670
0.022388
import genxmlif from genxmlif.xmlifODict import odict xmlIf = genxmlif.chooseXmlIf(genxmlif.XMLIF_ELEMENTTREE) x
mlTree = xmlIf.createXmlTree(None, "testTree", {"rootAttr1":"RootAttr1"}) xmlRootNode = xmlTree.getRootNode() myDict = odict( (("childTag1","123"), ("childTag2","123")) ) xmlRootNode.appendChild("childTag", myDict) xmlRootNode.appendChild("childTag", {"childTag1":"123456", "childTag2":"123456"}) xmlRootNode.appendChild("childTag", {"childTag1":"123456789",
"childTag3":"1234", "childTag2":"123456789"}) xmlRootNode.appendChild("childTag", {"childTag1":"1", "childTag2":"1"}) print xmlTree.printTree(prettyPrint=1) print xmlTree print xmlTree.getRootNode()
azer/jsbuild
jsbuild/manifest.py
Python
mit
673
0.028232
from jsbuild.attrdict import AttrDict from time import strftime class Manifest(AttrDict): def __init__(self,*args,**kwargs): super(AttrDict, self).__init__(*args,**kwargs) self._buf
fer_ = None self._parent_ = None if not self.__contains__('_dict_'): self['_dict_'] = {} self['_dict_']['timestamp'] = int(strftime("%Y%m%d%H%M")) def __getitem__(self,name): item = super(Manifest,self).__getitem__(name) if isinstance(item,Manifest) and not item._parent_: item._parent_ = self elif isinstance(item,str): root = self
while root._parent_: root = root._parent_ item = item%root._dict_ return item
Javex/qllbot
tests/lib_tests/events.py
Python
bsd-2-clause
1,213
0.000824
import sys import unittest sys.path.append('../../') import lib.event class TestEvents(unittest.TestCase): def setUp(self): TestEvents.successful = False TestEvents.successful2 = False def test_subscribe(self): @lib.event.subscribe('test') def subscribe_test(): TestEvents.successful = True lib.event.call('test') self.assertTrue(TestEvents.successful) def test_subscribe_with_params(self): @lib.event.subscribe('test2') de
f subscribe_test(successful=False): TestEvents.successful = successful lib.event.call('test2', {'successful':
True}) self.assertTrue(TestEvents.successful) def test_subscribe_two_with_params(self): @lib.event.subscribe('test3') def subscribe_test(successful=False): TestEvents.successful = successful @lib.event.subscribe('test3') def subscribe_test2(successful=False): TestEvents.successful2 = successful lib.event.call('test3', {'successful': True}) self.assertTrue(TestEvents.successful) self.assertTrue(TestEvents.successful2) if __name__ == '__main__': unittest.main()
summychou/CSForOSS
CA/OSSQt_DataMasterRigster.py
Python
mit
2,535
0.008748
# -*- coding: utf-8 -*- # import sqlite3 as sqlite import sys import uuid from pysqlcipher3 import dbapi2 as sqlite def main(): print("***************** Welcome to OSS DataMaster-Rigster System *******************") print("* *") print("******************************************************************************") conn = sqlite.connect('DataMasterSystem.db') c = conn.cursor() c.execute("PRAGMA key='data_master_system'") # 对加密的sqlite文件进行解密 try: c.execute('create table data_master_system (data_master_name text, password text, unique_id text)') except sqlite.OperationalError as e: pass unique_id = uuid.uuid1() data_masters = c.execute("select * from data_master_system").fetchall() if len(data_masters) != 0: data_master_name = input("[*] Input your data master name:\n") for col in data_masters: if data_master_name.strip() == col[0]: print("[!] Data Master Name has existed!") print("******************************************************************************") print("* *") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) else: data_master_name = input("[*] Input your data master name:\n") password = input("[*] Input your password:\n") repeat_password = input("[*] Input your password again:\n") if password.strip() != repeat_password.strip(): print("[!] Password is not equal to RePassword!") print("******************************************************************************") print("*
*") print("*********************** Data Master Rigster Is Failed! ***********************") sys.exit(-1) c.execute('insert into data_master_system values ("{}", "{}", "{}")'.format(data_master_name, password, unique_id)) conn.commit() c.close() print("******************************************************************************") print("*
*") print("********************* Data Master Rigster Is Successful! *********************") if __name__ == '__main__': main()
dmlc/xgboost
tests/python-gpu/test_gpu_prediction.py
Python
apache-2.0
17,716
0.000903
import sys import pytest import numpy as np import xgboost as xgb from xgboost.compat import PANDAS_INSTALLED from hypothesis import given, strategies, assume, settings if PANDAS_INSTALLED: from hypothesis.extra.pandas import column, data_frames, range_indexes else: def noop(*args, **kwargs): pass column, data_frames, range_indexes = noop, noop, noop sys.path.append("tests/python") import testing as tm from test_predict import run_threaded_predict # noqa from test_predict import run_predict_leaf # noqa rng = np.random.RandomState(1994) shap_parameter_strategy = strategies.fixed_dictionaries({ 'max_depth': strategies.integers(1, 11), 'max_leaves': strategies.integers(0, 256), 'num_parallel_tree': strategies.sampled_from([1, 10]), }).filter(lambda x: x['max_depth'] > 0 or x['max_leaves'] > 0) predict_parameter_strategy = strategies.fixed_dictionaries({ 'max_depth': strategies.integers(1, 8), 'num_parallel_tree': strategies.sampled_from([1, 4]), }) class TestGPUPredict: def test_predict(self): iterations = 10 np.random.seed(1) test_num_rows = [10, 1000, 5000] test_num_cols = [10, 50, 500] # This test passes for tree_method=gpu_hist and tree_method=exact. but # for `hist` and `approx` the floating point error accumulates faster # and fails even tol is set to 1e-4. For `hist`, the mismatching rate # with 5000 rows is 0.04. for num_rows in test_num_rows: for num_cols in test_num_cols: dtrain = xgb.DMatrix(np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2)) dval = xgb.DMatrix(np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2)) dtest = xgb.DMatrix(np.random.randn(num_rows, num_cols), label=[0, 1] * int(num_rows / 2)) watchlist = [(dtrain, 'train'), (dval, 'validation')] res = {} param = { "objective": "binary:logistic", "predictor": "gpu_predictor", 'eval_metric': 'logloss', 'tree_method': 'gpu_hist', 'max_depth': 1 } bst = xgb.train(param, dtrain, iterations, evals=watchlist, evals_result=res) assert self.non_increasing(res["train"]["logloss"]) gpu_pred_train = bst.predict(dtrain, output_margin=True) gpu_pred_test = bst.predict(dtest, output_margin=True) gpu_pred_val = bst.predict(dval, output_margin=True) param["predictor"] = "cpu_predictor" bst_cpu = xgb.train(param, dtrain, iterations, evals=watchlist) cpu_pred_train = bst_cpu.predict(dtrain, output_margin=True) cpu_pred_test = bst_cpu.predict(dtest, output_margin=True) cpu_pred_val = bst_cpu.predict(dval, output_margin=True) np.testing.assert_allclose(cpu_pred_train, gpu_pred_train, rtol=1e-6) np.testing.assert_allclose(cpu_pred_val, gpu_pred_val, rtol=1e-6) np.testing.assert_allclose(cpu_pred_test, gpu_pred_test, rtol=1e-6) def non_increasing(self, L): return all((y - x) < 0.001 for x, y in zip(L, L[1:])) # Test case for a bug where multiple batch predictions made on a # test set produce incorrect results @pytest.mark.skipif(**tm.no_sklearn()) def test_multi_predict(self): from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split n = 1000 X, y = make_regression(n, random_state=rng) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123) dtrain = xgb.DMatrix(X_train, label=y_train) dtest = xgb.DMatrix(X_test) params = {} params["tree_method"] = "gpu_hist" params['predictor'] = "gpu_predictor" bst_gpu_predict = xgb.train(params, dtrain) params['predictor'] = "cpu_predictor" bst_cpu_predict = xgb.train(params, dtrain) predict0 = bst_gpu_predict.predict(dtest) predict1 = bst_gpu_predict.predict(dtest) cpu_predict = bst_cpu_predict.predict(dtest) assert np.allclose(predict0, predict1) assert np.allclose(predict0, cpu_predict) @pytest.mark.skipif(**tm.no_sklearn()) def test_sklearn(self): m, n = 15000, 14 tr_size = 2500 X = np.random.rand(m, n) y = 200 * np.matmul(X, np.arange(-3, -3 + n)) X_train, y_train = X[:tr_size, :], y[:tr_size] X_test, y_test = X[tr_size:, :], y[tr_size:] # First with cpu_predictor params = {'tree_method': 'gpu_hist', 'predictor': 'cpu_predictor', 'n_jobs': -1, 'seed': 123} m = xgb.XGBRegressor(**params).fit(X_train, y_train) cpu_train_score = m.score(X_train, y_train) cpu_test_score = m.score(X_test, y_test) # Now with gpu_predictor params['predictor'] = 'gpu_predictor' m = xgb.XGBRegressor(**params).fit(X_train, y_train) gpu_train_score = m.score(X_train, y_train) gpu_test_score = m.score(X_test, y_test) assert np.allclose(cpu_train_score, gpu_train_score) assert np.allclose(cpu_test_score, gpu_test_score) def run_inplace_base_margin(self, booster, dtrain, X, base_margin): import cupy as cp dtrain.set_info(base_margin=base_margin) from_inplace = booster.inplace_predict(data=X, base_margin=base_margin) from_dmatrix = booster.predict(dtrain) cp.testing.assert_allclose(from_inplace, from_dmatrix) @pytest.mark.skipif(**tm.no_cupy()) def test_inplace_predict_cupy(self): import cupy as cp cp.cuda.runtime.setDevice(0) rows = 1000 cols = 10 missing = 11 # set to integer for testing cp_rng = cp.random.RandomState(1994) cp.random.set_random_state(cp_rng) X = cp.random.randn(rows, cols) missing_idx = [i for i in range(0, cols, 4)] X[:, missing_idx] = missing # set to be missing y = cp.random.randn(rows) dtrain = xgb.DMatrix(X, y) booster = xgb.train({'tree_method': 'gpu_hist'}, dtrain, num_boost_round=10) test = xgb.DMatrix(X[:10, ...], missing=missing) predt_from_array = booster.inplace_predict(X[:10, ...], missing=missing) predt_from_dmatrix = booster.predict(test) cp.testing.assert_allclose(predt_from_array, predt_from_dmatrix) def predict_dense(x): inplace_predt = booster.inplace_predict(x) d = xgb.DMatrix(x) copied_predt = cp.array(booster.predict(d)) return cp.all(copied_predt == inplace_predt) # Don't do this on Windows, see issue #5793 if sys.platform.startswith("win"): pytest.skip( 'Multi-threaded in-place prediction with cuPy is not working on Windows') for i in range(10): run_threaded_predict(X, rows, predict_dense) base_margin = cp_rng.randn(rows)
self.run_inplace_base_margin(booster, dtrain, X, base_margin) # Create a wide dataset X = cp_rng.randn(100, 10000) y = cp_rng.randn(100) missing_idx = [i for i in range(0, X.shape[1], 16)] X[:, missing_idx] = missing reg = xgb.XGBRegressor(tree_method="gpu_hist", n_estimators=8, missing=missing) reg.fit(X, y) gpu_predt = reg.pred
ict(X) reg.set_params(predictor="cpu_predictor") cpu_predt = reg.predict(X) np.testing.assert_allclose(gpu_predt, cpu_predt, atol=1e-6) @pytest.mark.skipif(**tm.no_cupy()) @pytest.mark.skipif
eddiejessup/cellulist
docs/conf.py
Python
bsd-3-clause
8,421
0.005344
#!/usr/bin/env python # -*- coding: utf-8 -*- # # cellulist documentation build configuration file, created by # sphinx-quickstart on Tue Jul 9 22:26:36 2013. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerate
d file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get t
he project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import cellulist # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'cellulist' copyright = u'2015, Elliot Marsden' # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = cellulist.__version__ # The full version, including alpha/beta/rc tags. release = cellulist.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'cellulistdoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'cellulist.tex', u'cellulist Documentation', u'Elliot Marsden', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'cellulist', u'cellulist Documentation', [u'Elliot Marsden'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'cellulist', u'cellulist Documentation', u'Elliot Marsden', 'cellulist', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index
liqd/a4-meinberlin
tests/kiezkasse/conftest.py
Python
agpl-3.0
125
0
from pytest_fact
oryboy import register from meinberlin.test.factories import kiezkasse register(kiezkasse.ProposalFactor
y)
indico/indico
indico/modules/events/tracks/forms.py
Python
mit
2,016
0.001984
# This file is part of Indico. # Copyright (C) 2002 - 2022 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from wtforms.fields import StringField from wtforms.validators import DataRequired from wtforms_sqlalchemy.fields import QuerySelectField from indico.core.db.sqlalchemy.descriptions import RenderMode from indico.modules.events.sessions.models.sessions import Session from indico.modules.events.tracks.models.groups import TrackGroup from indico.util.i18n import _ from indico.web.forms.base import IndicoForm, generated_data from indico.web.forms.f
ields import IndicoMarkdownField class TrackForm(IndicoForm): title = StringField(_('Title'), [DataRequired()]) code = StringField(_('Code')) track_group = QuerySelectField(_('Track g
roup'), default='', allow_blank=True, get_label='title', description=_('Select a track group to which this track should belong')) default_session = QuerySelectField(_('Default session'), default='', allow_blank=True, get_label='title', description=_('Indico will preselect this session whenever an abstract is ' 'accepted for the track')) description = IndicoMarkdownField(_('Description'), editor=True) def __init__(self, *args, **kwargs): event = kwargs.pop('event') super().__init__(*args, **kwargs) self.default_session.query = Session.query.with_parent(event) self.track_group.query = TrackGroup.query.with_parent(event) class ProgramForm(IndicoForm): program = IndicoMarkdownField(_('Program'), editor=True, mathjax=True) @generated_data def program_render_mode(self): return RenderMode.markdown class TrackGroupForm(IndicoForm): title = StringField(_('Title'), [DataRequired()]) description = IndicoMarkdownField(_('Description'), editor=True)
rmfitzpatrick/ansible
lib/ansible/plugins/action/nxos.py
Python
gpl-3.0
4,638
0.001725
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible import constants as C from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.network_common import load_provider from ansible.module_utils.nxos import nxos_provider_spec try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): provider = load_provider(nxos_provider_spec, self._task.args) transport = provider['transport'] or 'cli' if self._play_context.connection != 'local' and transport == 'cli': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) if transport == 'cli': pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'nxos' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port'] or self._play_context.port or 22) pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT) display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module rc, out, err = connection.exec_command('prompt()') while str(out).strip().endswith(')#'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) connection.exec_command('exit') rc, out, err = connection.exec_command('prompt()') task_vars['ansible_socket'] = socket_path else: provider['transport'] = 'nxapi' if provider.get('host') is None: provider['host'] = se
lf._play_context.remote_addr if provider.get('port') is None: if provider.get('use_ssl'): provider['port'] = 443 else: provider['port'] = 80 if provider.get('timeout') is None: provider['timeout'] = C.PERSISTENT_COMMAND_TIMEOUT if provider.get('username') is None: provider['username'] = self._play_context.connection_user if provider.get('pas
sword') is None: provider['password'] = self._play_context.password if provider.get('use_ssl') is None: provider['use_ssl'] = False if provider.get('validate_certs') is None: provider['validate_certs'] = True self._task.args['provider'] = provider # make sure a transport value is set in args self._task.args['transport'] = transport result = super(ActionModule, self).run(tmp, task_vars) return result
plotly/plotly.py
packages/python/plotly/plotly/validators/scatterternary/marker/gradient/_typesrc.py
Python
mit
454
0
import _plotly_utils.basevalidators class TypesrcValidator(_plotly_utils.basevalidators.SrcValidator): d
ef __init__( self, plotly_name="typesrc", parent_name="scatterternary.marker.gradient", **kwargs ): super(TypesrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), **k
wargs )
heroku/wal-e
setup.py
Python
bsd-3-clause
1,831
0.001092
#!/usr/bin/env python import os.path import sys # Version file managment scheme and graceful degredation for # setuptools borrowed and adapted from GitPython. try: from setuptools import setup, find_packages # Silence pyflakes assert setup assert find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages if sys.version_info < (2, 6): raise RuntimeError('Python versions < 2.6 are not supported.') # Utility function to read the contents of short files. def read(fname): with open(os.pat
h.join(os.path.dirname(__file__), fname)) as f: return f.read() VERSION = read(os.path.join('wal_e', 'VERSION')).strip() install_requires = [ l for l in read('requirements.txt').split('\n') if l and not l.startswith('#')] if sys.version_info < (2, 7): install_requires.appen
d('argparse>=0.8') setup( name="wal-e", version=VERSION, packages=find_packages(), install_requires=install_requires, # metadata for upload to PyPI author="The WAL-E Contributors", author_email="[email protected]", maintainer="Daniel Farina", maintainer_email="[email protected]", description="Continuous Archiving for Postgres", long_description=read('README.rst'), classifiers=['Topic :: Database', 'Topic :: System :: Archiving', 'Topic :: System :: Recovery Tools'], platforms=['any'], license="BSD", keywords=("postgres postgresql database backup archive archiving s3 aws " "openstack swift wabs azure wal shipping"), url="https://github.com/wal-e/wal-e", # Include the VERSION file package_data={'wal_e': ['VERSION']}, # install entry_points={'console_scripts': ['wal-e=wal_e.cmd:main']})